前言
在之前的文章Glide源码解析之MemoryCache介绍了Glide的二级缓存MemoryCache,现在让我们来看下三级缓存DiskCache。
获取DiskCache
在上文Glide源码解析之DecodeHelper我们讲到DecodeHelper主要是起到了一个提供数据的作用, DiskCache同样是通过DecoderHelper来通过diskCacheProvider获取的,而diskCacheProvider的实现类为LazyDiskCacheProvider,是由Engine在构造函数里创建并传递给DecoderHelper的。LazyDiskCacheProvider的构造函数参数为DiskCache.Factory,它的实现类为InternalCacheDiskCacheFactory,是在GlideBuilder的builde()里面创建并传给Engine的。
//DecodeHelper
DiskCache getDiskCache() {
return diskCacheProvider.getDiskCache();
}
//Engine
Engine(MemoryCache cache,
DiskCache.Factory diskCacheFactory,
xxx) {
this.diskCacheProvider = new LazyDiskCacheProvider(diskCacheFactory);
}
//GlideBuilder
Glide builde(Context context){
if (diskCacheFactory == null) {
diskCacheFactory = new InternalCacheDiskCacheFactory(context);
}
if (engine == null) {
engine =
new Engine(
memoryCache,
diskCacheFactory,
diskCacheExecutor,
sourceExecutor,
GlideExecutor.newUnlimitedSourceExecutor(),
animationExecutor,
isActiveResourceRetentionAllowed);
}
}
LazyDiskCacheProvider
这里对DiskCache用了单例模式来保存实例,它的创建则是由DiskCache.Factory来提供,也就是上面说到的InternalCacheDiskCacheFactory。InternalCacheDiskCacheFactory继承于DiskLruCacheFactory,DiskLruCacheFactory实现了DiskCache.Factory的接口,待会再看下这两个类。
private static class LazyDiskCacheProvider implements DecodeJob.DiskCacheProvider {
private final DiskCache.Factory factory;
private volatile DiskCache diskCache;
LazyDiskCacheProvider(DiskCache.Factory factory) {
this.factory = factory;
}
@VisibleForTesting
synchronized void clearDiskCacheIfCreated() {
if (diskCache == null) {
return;
}
diskCache.clear();
}
@Override
public DiskCache getDiskCache() {
if (diskCache == null) {
synchronized (this) {
if (diskCache == null) {
diskCache = factory.build(); //由DiskLruCacheFactory去实例化DiskCache
}
if (diskCache == null) {
diskCache = new DiskCacheAdapter();
}
}
}
return diskCache;
}
}
DiskCache.Factory
InternalCacheDiskCacheFactory只是提供了构造函数,并最终调用了DiskLruCacheFactory的构造函数,给它提供了diskCacheSize和cacheDirectoryGetter,build()还是由DiskLruCacheFactory来实现的。
在builde()里面首先检查缓存目录是否为null,这个一般不为null,目录路径为data/data/你的app/cache/image_manager_disk_cache。最终返回的DiskCache实现类则是DiskLruCacheWrapper。
//DiskLruCacheFactory
public DiskCache build() {
File cacheDir = cacheDirectoryGetter.getCacheDirectory();
if (cacheDir == null) {
return null;
}
if (!cacheDir.mkdirs() && (!cacheDir.exists() || !cacheDir.isDirectory())) {
return null;
}
return DiskLruCacheWrapper.create(cacheDir, diskCacheSize);
}
//DiskLruCacheWrapper
public static DiskCache create(File directory, long maxSize) {
return new DiskLruCacheWrapper(directory, maxSize);
}
//InternalCacheDiskCacheFactory
public final class InternalCacheDiskCacheFactory extends DiskLruCacheFactory {
public InternalCacheDiskCacheFactory(Context context) {
this(context, DiskCache.Factory.DEFAULT_DISK_CACHE_DIR/*image_manager_disk_cache*/,
DiskCache.Factory.DEFAULT_DISK_CACHE_SIZE/*250M*/);
}
public InternalCacheDiskCacheFactory(final Context context, final String diskCacheName,
long diskCacheSize) {
super(new CacheDirectoryGetter() {
@Override
public File getCacheDirectory() {
File cacheDirectory = context.getCacheDir();
if (cacheDirectory == null) {
return null;
}
if (diskCacheName != null) {
//缓存目录为cache/image_manager_disk_cache
return new File(cacheDirectory, diskCacheName);
}
return cacheDirectory;
}
}, diskCacheSize);
}
}
DiskCache
DiskCache的实现类为DiskLruCacheWrapper,但是它并不进行实际的磁盘缓存,具体的操作是由DiskLruCache来实现的。由名字可以看出这里使用了装饰模式来对DiskLruCache增加一些功能。
public interface DiskCache {
/**
* 根据key获取缓存文件
*/
@Nullable
File get(Key key);
/**
* 写入缓存文件
*/
void put(Key key, Writer writer);
/**
* 删除缓存文件
*/
void delete(Key key);
/**
* 清空缓存文件(全删)
*/
void clear();
}
public class DiskLruCacheWrapper implements DiskCache {
private static final int APP_VERSION = 1;
private static final int VALUE_COUNT = 1;
private final SafeKeyGenerator safeKeyGenerator; //用来构建String类型的磁盘缓存Key
private final File directory;
private final long maxSize;
private final DiskCacheWriteLocker writeLocker = new DiskCacheWriteLocker();
private DiskLruCache diskLruCache;
private synchronized DiskLruCache getDiskCache() throws IOException {
if (diskLruCache == null) {
diskLruCache = DiskLruCache.open(directory, APP_VERSION, VALUE_COUNT, maxSize);
}
return diskLruCache;
}
}
DiskLruCache
DiskLruCache和MemoryCache一样是使用LinkedHashMap来实现LRU算法,但是LinkedHashMap只能对内存数据进行处理,但是要是App关掉下次再打开不就是没最近使用记录了? 所以DiskLruCache将对数据的操作都写进了一个日志文件里,当初始化时先从日志文件中获取历史缓存以及读取顺序,之后再操作时也会同步更新到日志文件。
public final class DiskLruCache implements Closeable {
//日志文件相关
static final String JOURNAL_FILE = "journal";
static final String JOURNAL_FILE_TEMP = "journal.tmp";
static final String JOURNAL_FILE_BACKUP = "journal.bkp";
static final String MAGIC = "libcore.io.DiskLruCache";
static final String VERSION_1 = "1";
static final long ANY_SEQUENCE_NUMBER = -1;
private static final String CLEAN = "CLEAN";
private static final String DIRTY = "DIRTY";
private static final String REMOVE = "REMOVE";
private static final String READ = "READ";
private final File directory; //图片缓存文件夹
private final File journalFile; //日志文件
private final File journalFileTmp; //临时日志文件
private final File journalFileBackup; //日志备份文件
private final int appVersion;
private long maxSize; //最大缓存大小
private long size = 0; //当前缓存大小
private final int valueCount; //值为1
private Writer journalWriter; //快速读写文件使用 Writer
private final LinkedHashMap<String, Entry> lruEntries =
new LinkedHashMap<String, Entry>(0, 0.75f, true);
}
//日志文件格式
libcore.io.DiskLruCache //MAGIC
1 //VERSION_1
1 //appVersion
1 //valueCount
DIRTY 4244bd8b60e86cb88a8d24052c5a3d52da7091a289a9d3c09b98531260ce0171 //新写入LinkedHashMap的数据,状态为DIRTY 后面跟着的一串是key
CLEAN 4244bd8b60e86cb88a8d24052c5a3d52da7091a289a9d3c09b98531260ce0171 8441 //将数据写进磁盘后状态为CLEAN,最后的数字是图片大小
READ 4244bd8b60e86cb88a8d24052c5a3d52da7091a289a9d3c09b98531260ce0171 //读了之后状态为READ
REMOVE 64af945d99537d3f777a76dc62012d9c2368f33f145bbf592fa11f28489f8142 //删除的状态为REMOVE
//日志文件记录的是操作状态,执行了新的操作之后并不会把之前的删了,只是会添加一条新的记录。
open()
在这里创建DiskLruCache
public static DiskLruCache open(File directory, int appVersion, int valueCount, long maxSize)
throws IOException {
if (maxSize <= 0) {
throw new IllegalArgumentException("maxSize <= 0");
}
if (valueCount <= 0) {
throw new IllegalArgumentException("valueCount <= 0");
}
File backupFile = new File(directory, JOURNAL_FILE_BACKUP);
if (backupFile.exists()) {
//如果有日志备份文件
File journalFile = new File(directory, JOURNAL_FILE);
if (journalFile.exists()) {
// 如果日志文件存在则删除备份文件
backupFile.delete();
} else {
//如果日志文件不存在则把备份文件重命名为日志文件
renameTo(backupFile, journalFile, false);
}
}
DiskLruCache cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
if (cache.journalFile.exists()) {
try {
cache.readJournal(); //读取日志文件
cache.processJournal(); //处理日志文件
return cache; //如果没有异常的话直接返回
} catch (IOException journalIsCorrupt) {
System.out
.println("DiskLruCache "
+ directory
+ " is corrupt: "
+ journalIsCorrupt.getMessage()
+ ", removing");
cache.delete(); //删除全部缓存文件
}
}
//上面出现异常后会删除全部缓存文件,这里重新生成DiskLruCache以及日志文件
directory.mkdirs();
cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
cache.rebuildJournal();
return cache;
}
readJournal()
在这里读取日志文件,将日志文件记录的数据写入LinkedHashMap。
private void readJournal() throws IOException {
StrictLineReader reader = new StrictLineReader(new FileInputStream(journalFile), Util.US_ASCII);
try {
String magic = reader.readLine();
String version = reader.readLine();
String appVersionString = reader.readLine();
String valueCountString = reader.readLine();
String blank = reader.readLine();
//检查格式是否正确
if (!MAGIC.equals(magic)
|| !VERSION_1.equals(version)
|| !Integer.toString(appVersion).equals(appVersionString)
|| !Integer.toString(valueCount).equals(valueCountString)
|| !"".equals(blank)) {
throw new IOException("unexpected journal header: [" + magic + ", " + version + ", "
+ valueCountString + ", " + blank + "]");
}
int lineCount = 0;
while (true) {
try {
//一行行读取日志文件
readJournalLine(reader.readLine());
lineCount++;
} catch (EOFException endOfJournal) {
break;
}
}
redundantOpCount = lineCount - lruEntries.size();
if (reader.hasUnterminatedLine()) {
rebuildJournal();
} else {
journalWriter = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(journalFile, true), Util.US_ASCII));
}
} finally {
Util.closeQuietly(reader);
}
}
private void readJournalLine(String line) throws IOException {
int firstSpace = line.indexOf(' '); //第一个空格的位置,它的前面是状态,后面是key
if (firstSpace == -1) {
throw new IOException("unexpected journal line: " + line);
}
int keyBegin = firstSpace + 1;
int secondSpace = line.indexOf(' ', keyBegin); //如果状态是CLEAN则会有两个空格,它的前面是key,后面是图片大小
final String key;
if (secondSpace == -1) {
key = line.substring(keyBegin);
//如果状态是REMOVE则从LinkedHashMap中删除
if (firstSpace == REMOVE.length() && line.startsWith(REMOVE)) {
lruEntries.remove(key);
return;
}
} else {
key = line.substring(keyBegin, secondSpace);
}
Entry entry = lruEntries.get(key);
if (entry == null) {
//Entry里面根据key来获取缓存文件
entry = new Entry(key);
//将获取到的缓存文件添加到LinkedHashMap中
lruEntries.put(key, entry);
}
if (secondSpace != -1 && firstSpace == CLEAN.length() && line.startsWith(CLEAN)) {
//如果状态是CLEAN则代表图片已经写入缓存文件了
String[] parts = line.substring(secondSpace + 1).split(" ");
entry.readable = true;
entry.currentEditor = null;
entry.setLengths(parts);
} else if (secondSpace == -1 && firstSpace == DIRTY.length() && line.startsWith(DIRTY)) {
//状态是DIRTY则以后会通过Editor来写入缓存文件,也会通过判断currentEditor是否为null来来确定是否是DIRTY的数据
entry.currentEditor = new Editor(entry);
} else if (secondSpace == -1 && firstSpace == READ.length() && line.startsWith(READ)) {
// 因为上面调用过lruEntries.get(key),LinkedHashMap会自动调整访问顺序,所以这里不需要再执行其他操作
} else {
throw new IOException("unexpected journal line: " + line);
}
}
put()
首先会使用SafeKeyGenerator来对key进行sha256BytesToHex()计算,接着判断是否已经有缓存数据,如果没有则会先把图片写进DirtyFile,最后再重命名为CleanFile。
@Override
public void put(Key key, Writer writer) {
String safeKey = safeKeyGenerator.getSafeKey(key);
//使用磁盘缓存写锁同步代码
writeLocker.acquire(safeKey);
try {
try {
// 如果已经有缓存数据了,则不覆盖,直接返回。因为key计算的唯一性,使得一个key只会对应一个value。
DiskLruCache diskCache = getDiskCache();
Value current = diskCache.get(safeKey);
if (current != null) {
return;
}
DiskLruCache.Editor editor = diskCache.edit(safeKey);
if (editor == null) {
throw new IllegalStateException("Had two simultaneous puts for: " + safeKey);
}
try {
//这里获取的是DirtyFile
File file = editor.getFile(0);
//将图片写进DirtyFile
if (writer.write(file)) {
editor.commit(); //将DirtyFile重命名为CleanFile
}
} finally {
editor.abortUnlessCommitted();
}
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Unable to put to disk cache", e);
}
}
} finally {
writeLocker.release(safeKey);
}
}
//DiskLruCache
public Editor edit(String key) throws IOException {
return edit(key, ANY_SEQUENCE_NUMBER);
}
private synchronized Editor edit(String key, long expectedSequenceNumber) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER && (entry == null
|| entry.sequenceNumber != expectedSequenceNumber)) {
return null;
}
if (entry == null) {
entry = new Entry(key);
lruEntries.put(key, entry); //存新数据到LinkedHashMap
} else if (entry.currentEditor != null) {
return null;
}
Editor editor = new Editor(entry);
entry.currentEditor = editor;
// 往日志文件添加一行状态为DIRTY的记录
journalWriter.append(DIRTY);
journalWriter.append(' ');
journalWriter.append(key);
journalWriter.append('\n');
journalWriter.flush();
return editor;
}
将临时文件改为正式的缓存文件
public File getFile(int index) throws IOException {
synchronized (DiskLruCache.this) {
if (entry.currentEditor != this) {
throw new IllegalStateException();
}
//readable默认为false
if (!entry.readable) {
written[index] = true; //这里会赋值为true
}
File dirtyFile = entry.getDirtyFile(index);
if (!directory.exists()) {
directory.mkdirs();
}
return dirtyFile;
}
}
public void commit() throws IOException {
completeEdit(this, true);
committed = true;
}
private synchronized void completeEdit(Editor editor, boolean success) throws IOException {
Entry entry = editor.entry;
if (entry.currentEditor != editor) {
throw new IllegalStateException();
}
if (success && !entry.readable) {
for (int i = 0; i < valueCount; i++) {
//上面在getFile()时赋值为true了,所以不会进入这里。
if (!editor.written[i]) {
editor.abort();
throw new IllegalStateException("Newly created entry didn't create value for index " + i);
}
//在getFile()之后就调用了writer.write(file),所以DirtyFile也是存在的,不会进入里面。
if (!entry.getDirtyFile(i).exists()) {
editor.abort();
return;
}
}
}
//valueCount为1
for (int i = 0; i < valueCount; i++) {
File dirty = entry.getDirtyFile(i);
if (success) {
if (dirty.exists()) {
//将DirtyFile重命名为CleanFile,文件名为(key.0),并计算当前缓存大小
File clean = entry.getCleanFile(i);
dirty.renameTo(clean);
long oldLength = entry.lengths[i];
long newLength = clean.length();
entry.lengths[i] = newLength;
size = size - oldLength + newLength;
}
} else {
deleteIfExists(dirty);
}
}
redundantOpCount++;
entry.currentEditor = null;
if (entry.readable | success) {
//往日志文件添加状态为CLEAN的记录
entry.readable = true; //赋值为true
journalWriter.append(CLEAN);
journalWriter.append(' ');
journalWriter.append(entry.key);
journalWriter.append(entry.getLengths());
journalWriter.append('\n');
if (success) {
entry.sequenceNumber = nextSequenceNumber++;
}
} else {
lruEntries.remove(entry.key);
journalWriter.append(REMOVE);
journalWriter.append(' ');
journalWriter.append(entry.key);
journalWriter.append('\n');
}
journalWriter.flush();
if (size > maxSize || journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
}
get()
获取缓存文件,先根据key获取对应的Entry,如果存在则判断是否可读(磁盘有对应的图片缓存),如果缓存文件存在则往日志文件添加一条状态为READ的记录,最后把数据封装进Value并返回。
public File get(Key key) {
String safeKey = safeKeyGenerator.getSafeKey(key);
File result = null;
try {
//Value只是DiskLruCache里面的一个内部类,主要起到封装数据的作用,并不是实际缓存的值。
final DiskLruCache.Value value = getDiskCache().get(safeKey);
if (value != null) {
//返回缓存的文件,即是CleanFile
result = value.getFile(0);
}
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Unable to get from disk cache", e);
}
}
return result;
}
//DiskLruCache
public synchronized Value get(String key) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (entry == null) {
return null;
}
//在上面的completeEdit()中会将readable赋值为true
if (!entry.readable) {
return null;
}
for (File file : entry.cleanFiles) {
// 判断缓存文件是否存在
if (!file.exists()) {
return null;
}
}
redundantOpCount++;
//往日志文件添加一条状态为READ的记录
journalWriter.append(READ);
journalWriter.append(' ');
journalWriter.append(key);
journalWriter.append('\n');
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
//将数据封装进Value并返回
return new Value(key, entry.sequenceNumber, entry.cleanFiles, entry.lengths);
}
delete()
删除缓存,首先会将缓存文件删除,然后往日志文件添加一条状态为REMOVE的记录,最后把LinkedHashMap中的数据也删除。
@Override
public void delete(Key key) {
String safeKey = safeKeyGenerator.getSafeKey(key);
try {
getDiskCache().remove(safeKey);
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Unable to delete from disk cache", e);
}
}
}
//DisLruCache
public synchronized boolean remove(String key) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
//在completeEdit()中会将currentEditor置为null
if (entry == null || entry.currentEditor != null) {
return false;
}
for (int i = 0; i < valueCount; i++) {
File file = entry.getCleanFile(i);
//删除CleanFile
if (file.exists() && !file.delete()) {
throw new IOException("failed to delete " + file);
}
size -= entry.lengths[i];
entry.lengths[i] = 0;
}
redundantOpCount++;
//往日志文件添加一条状态为REMOVE的记录
journalWriter.append(REMOVE);
journalWriter.append(' ');
journalWriter.append(key);
journalWriter.append('\n');
//在LinkedHashMap中也删除
lruEntries.remove(key);
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
return true;
}
clear()
删除全部磁盘缓存数据。
@Override
public synchronized void clear() {
try {
getDiskCache().delete();
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Unable to clear disk cache or disk cache cleared externally", e);
}
} finally {
resetDiskCache(); //diskLruCache = null;
}
}
//DiskLruCache
public void delete() throws IOException {
close();
Util.deleteContents(directory);
}
public synchronized void close() throws IOException {
if (journalWriter == null) {
//如果之前已经执行过close()的则直接返回
return;
}
for (Entry entry : new ArrayList<Entry>(lruEntries.values())) {
if (entry.currentEditor != null) {
//删除DirtyFile,接着在LinkedHashMap中删除对应的数据,最后往日志文件添加一条状态为REMOVE的记录
entry.currentEditor.abort();
}
}
trimToSize();
journalWriter.close();
journalWriter = null;
}
public void abort() throws IOException {
//在上面有分析,第二个参数不一样而已
completeEdit(this, false);
}
//如果文件存在则删除
private static void deleteIfExists(File file) throws IOException {
if (file.exists() && !file.delete()) {
throw new IOException();
}
}
//递归删除文件夹及里面的文件
static void deleteContents(File dir) throws IOException {
File[] files = dir.listFiles();
if (files == null) {
throw new IOException("not a readable directory: " + dir);
}
for (File file : files) {
if (file.isDirectory()) {
deleteContents(file);
}
if (!file.delete()) {
throw new IOException("failed to delete file: " + file);
}
}
}
总结
DiskLruCacheWrapper实现了DiskCache接口来提供磁盘缓存的操作,但是具体的操作则是由DiskLruCache来实现。DiskLruCache内部同样是使用了LinkedHashMap来实现最近最少使用原则,同时还会将所有的操作记录写入日志文件。当初始化时LinkedHashMap就可以根据日志文件来恢复数据和使用顺序。
网友评论