Bug 913819 - HTTP cache v2: preload several chunks in advance in input stream to speed up reading, r=honzab

This commit is contained in:
Michal Novotny 2014-05-02 20:15:14 +02:00
Родитель 3e08a28dc2
Коммит 9a6e8306aa
7 изменённых файлов: 219 добавлений и 84 удалений

Просмотреть файл

@ -20,6 +20,7 @@
// unused chunks only when:
// - CacheFile is memory-only
// - CacheFile is still waiting for the handle
// - the chunk is preloaded
//#define CACHE_CHUNKS
@ -181,6 +182,7 @@ CacheFile::CacheFile()
, mDataAccessed(false)
, mDataIsDirty(false)
, mWritingMetadata(false)
, mPreloadWithoutInputStreams(true)
, mStatus(NS_OK)
, mDataSize(-1)
, mOutput(nullptr)
@ -316,7 +318,7 @@ CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
uint32_t index = aChunk->Index();
LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08x, chunk=%p, idx=%d]",
LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08x, chunk=%p, idx=%u]",
this, aResult, aChunk, index));
if (NS_FAILED(aResult)) {
@ -339,7 +341,7 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
nsresult rv;
LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08x, chunk=%p, idx=%d]",
LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08x, chunk=%p, idx=%u]",
this, aResult, aChunk, aChunk->Index()));
MOZ_ASSERT(!mMemoryOnly);
@ -373,25 +375,17 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
return NS_OK;
}
#ifdef CACHE_CHUNKS
bool keepChunk = false;
if (NS_SUCCEEDED(aResult)) {
LOG(("CacheFile::OnChunkWritten() - Caching unused chunk [this=%p, "
"chunk=%p]", this, aChunk));
keepChunk = ShouldKeepChunk(aChunk->Index());
LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
keepChunk ? "Caching" : "Releasing", this, aChunk));
} else {
LOG(("CacheFile::OnChunkWritten() - Removing failed chunk [this=%p, "
LOG(("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
"chunk=%p]", this, aChunk));
}
#else
LOG(("CacheFile::OnChunkWritten() - Releasing %s chunk [this=%p, chunk=%p]",
NS_SUCCEEDED(aResult) ? "unused" : "failed", this, aChunk));
#endif
RemoveChunkInternal(aChunk,
#ifdef CACHE_CHUNKS
NS_SUCCEEDED(aResult));
#else
false);
#endif
RemoveChunkInternal(aChunk, keepChunk);
WriteMetadataIfNeededLocked();
@ -580,6 +574,9 @@ CacheFile::OnMetadataRead(nsresult aResult)
if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
isNew = true;
mMetadata->MarkDirty();
} else {
CacheFileAutoLock lock(this);
PreloadChunks(0);
}
InitIndexEntry();
@ -671,6 +668,11 @@ CacheFile::OpenInputStream(nsIInputStream **_retval)
return NS_ERROR_NOT_AVAILABLE;
}
// Once we open input stream we no longer allow preloading of chunks without
// input stream, i.e. we will no longer keep first few chunks preloaded when
// the last input stream is closed.
mPreloadWithoutInputStreams = false;
CacheFileInputStream *input = new CacheFileInputStream(this);
LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
@ -802,12 +804,7 @@ CacheFile::ThrowMemoryCachedData()
return NS_ERROR_ABORT;
}
#ifdef CACHE_CHUNKS
mCachedChunks.Clear();
#else
// If we don't cache all chunks, mCachedChunks must be empty.
MOZ_ASSERT(mCachedChunks.Count() == 0);
#endif
return NS_OK;
}
@ -986,26 +983,32 @@ CacheFile::ReleaseOutsideLock(nsISupports *aObject)
}
nsresult
CacheFile::GetChunk(uint32_t aIndex, bool aWriter,
CacheFile::GetChunk(uint32_t aIndex, ECallerType aCaller,
CacheFileChunkListener *aCallback, CacheFileChunk **_retval)
{
CacheFileAutoLock lock(this);
return GetChunkLocked(aIndex, aWriter, aCallback, _retval);
return GetChunkLocked(aIndex, aCaller, aCallback, _retval);
}
nsresult
CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
CacheFileChunkListener *aCallback,
CacheFileChunk **_retval)
{
AssertOwnsLock();
LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%d, writer=%d, listener=%p]",
this, aIndex, aWriter, aCallback));
LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
this, aIndex, aCaller, aCallback));
MOZ_ASSERT(mReady);
MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
MOZ_ASSERT((aWriter && !aCallback) || (!aWriter && aCallback));
MOZ_ASSERT((aCaller == READER && aCallback) ||
(aCaller == WRITER && !aCallback) ||
(aCaller == PRELOADER && !aCallback));
// Preload chunks from disk when this is disk backed entry and the listener
// is reader.
bool preload = !mMemoryOnly && (aCaller == READER);
nsresult rv;
@ -1014,6 +1017,9 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
chunk.get(), this));
// Preloader calls this method to preload only non-loaded chunks.
MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
// We might get failed chunk between releasing the lock in
// CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
rv = chunk->GetStatus();
@ -1024,26 +1030,27 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
return rv;
}
if (chunk->IsReady() || aWriter) {
if (chunk->IsReady() || aCaller == WRITER) {
chunk.swap(*_retval);
}
else {
} else {
rv = QueueChunkListener(aIndex, aCallback);
NS_ENSURE_SUCCESS(rv, rv);
}
if (preload) {
PreloadChunks(aIndex + 1);
}
return NS_OK;
}
if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
#ifndef CACHE_CHUNKS
// We don't cache all chunks, so we must not have handle and we must be
// either waiting for the handle, or this is memory-only entry.
MOZ_ASSERT(!mHandle && (mMemoryOnly || mOpeningFile));
#endif
LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
chunk.get(), this));
// Preloader calls this method to preload only non-loaded chunks.
MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
mChunks.Put(aIndex, chunk);
mCachedChunks.Remove(aIndex);
chunk->mFile = this;
@ -1052,6 +1059,11 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
MOZ_ASSERT(chunk->IsReady());
chunk.swap(*_retval);
if (preload) {
PreloadChunks(aIndex + 1);
}
return NS_OK;
}
@ -1085,18 +1097,20 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
return rv;
}
if (aWriter) {
if (aCaller == WRITER) {
chunk.swap(*_retval);
}
else {
} else if (aCaller != PRELOADER) {
rv = QueueChunkListener(aIndex, aCallback);
NS_ENSURE_SUCCESS(rv, rv);
}
if (preload) {
PreloadChunks(aIndex + 1);
}
return NS_OK;
}
else if (off == mDataSize) {
if (aWriter) {
} else if (off == mDataSize) {
if (aCaller == WRITER) {
// this listener is going to write to the chunk
chunk = new CacheFileChunk(this, aIndex);
mChunks.Put(aIndex, chunk);
@ -1115,9 +1129,8 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
chunk.swap(*_retval);
return NS_OK;
}
}
else {
if (aWriter) {
} else {
if (aCaller == WRITER) {
// this chunk was requested by writer, but we need to fill the gap first
// Fill with zero the last chunk if it is incomplete
@ -1137,8 +1150,7 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
rv = PadChunkWithZeroes(i);
NS_ENSURE_SUCCESS(rv, rv);
}
}
else {
} else {
// We don't need to create CacheFileChunk for other empty chunks unless
// there is some input stream waiting for this chunk.
@ -1155,8 +1167,7 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
if (HaveChunkListeners(i)) {
rv = PadChunkWithZeroes(i);
NS_ENSURE_SUCCESS(rv, rv);
}
else {
} else {
mMetadata->SetHash(i, kEmptyChunkHash);
mDataSize = (i + 1) * kChunkSize;
}
@ -1164,7 +1175,7 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
}
MOZ_ASSERT(mDataSize == off);
rv = GetChunkLocked(aIndex, true, nullptr, getter_AddRefs(chunk));
rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
NS_ENSURE_SUCCESS(rv, rv);
chunk.swap(*_retval);
@ -1172,18 +1183,106 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
}
}
// We can be here only if the caller is reader since writer always create a
// new chunk above and preloader calls this method to preload only chunks that
// are not loaded but that do exist.
MOZ_ASSERT(aCaller == READER, "Unexpected!");
if (mOutput) {
// the chunk doesn't exist but mOutput may create it
rv = QueueChunkListener(aIndex, aCallback);
NS_ENSURE_SUCCESS(rv, rv);
}
else {
} else {
return NS_ERROR_NOT_AVAILABLE;
}
return NS_OK;
}
void
CacheFile::PreloadChunks(uint32_t aIndex)
{
AssertOwnsLock();
uint32_t limit = aIndex + CacheObserver::PreloadChunkCount();
for (uint32_t i = aIndex; i < limit; ++i) {
int64_t off = i * kChunkSize;
if (off >= mDataSize) {
// This chunk is beyond EOF.
return;
}
if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
// This chunk is already in memory or is being read right now.
continue;
}
LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
this, i));
nsRefPtr<CacheFileChunk> chunk;
GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
// We've checked that we don't have this chunk, so no chunk must be
// returned.
MOZ_ASSERT(!chunk);
}
}
bool
CacheFile::ShouldKeepChunk(uint32_t aIndex)
{
AssertOwnsLock();
#ifdef CACHE_CHUNKS
// We cache all chunks.
return true;
#else
// Cache chunk when this is memory only entry or we don't have a handle yet.
if (mMemoryOnly || mOpeningFile) {
return true;
}
uint32_t preloadChunkCount = CacheObserver::PreloadChunkCount();
if (preloadChunkCount == 0) {
// Preloading of chunks is disabled
return false;
}
if (mPreloadWithoutInputStreams && aIndex < preloadChunkCount) {
// We don't have any input stream yet, but it is likely that some will be
// opened soon. Keep first preloadChunkCount chunks in memory.
return true;
}
// Check whether this chunk should be considered as preloaded chunk for any
// existing input stream.
// maxPos is the position of the last byte in the given chunk
int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
// minPos is the position of the first byte in a chunk that precedes the given
// chunk by PreloadChunkCount chunks
int64_t minPos;
if (preloadChunkCount >= aIndex) {
minPos = 0;
} else {
minPos = static_cast<int64_t>(aIndex - preloadChunkCount) * kChunkSize;
}
for (uint32_t i = 0; i < mInputs.Length(); ++i) {
int64_t inputPos = mInputs[i]->GetPosition();
if (inputPos >= minPos && inputPos <= maxPos) {
return true;
}
}
return false;
#endif
}
nsresult
CacheFile::RemoveChunk(CacheFileChunk *aChunk)
{
@ -1195,7 +1294,7 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
{
CacheFileAutoLock lock(this);
LOG(("CacheFile::RemoveChunk() [this=%p, chunk=%p, idx=%d]",
LOG(("CacheFile::RemoveChunk() [this=%p, chunk=%p, idx=%u]",
this, aChunk, aChunk->Index()));
MOZ_ASSERT(mReady);
@ -1227,7 +1326,7 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
if (NS_FAILED(mStatus)) {
// Don't write any chunk to disk since this entry will be doomed
LOG(("CacheFile::RemoveChunk() - Removing chunk because of status "
LOG(("CacheFile::RemoveChunk() - Releasing chunk because of status "
"[this=%p, chunk=%p, mStatus=0x%08x]", this, chunk.get(), mStatus));
RemoveChunkInternal(chunk, false);
@ -1262,27 +1361,11 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
}
}
#ifdef CACHE_CHUNKS
LOG(("CacheFile::RemoveChunk() - Caching unused chunk [this=%p, chunk=%p]",
this, chunk.get()));
#else
if (mMemoryOnly || mOpeningFile) {
LOG(("CacheFile::RemoveChunk() - Caching unused chunk [this=%p, chunk=%p,"
" reason=%s]", this, chunk.get(),
mMemoryOnly ? "memory-only" : "opening-file"));
} else {
LOG(("CacheFile::RemoveChunk() - Releasing unused chunk [this=%p, "
"chunk=%p]", this, chunk.get()));
}
#endif
bool keepChunk = ShouldKeepChunk(aChunk->Index());
LOG(("CacheFile::RemoveChunk() - %s unused chunk [this=%p, chunk=%p]",
keepChunk ? "Caching" : "Releasing", this, chunk.get()));
RemoveChunkInternal(chunk,
#ifdef CACHE_CHUNKS
true);
#else
// Cache the chunk only when we have a reason to do so
mMemoryOnly || mOpeningFile);
#endif
RemoveChunkInternal(chunk, keepChunk);
if (!mMemoryOnly)
WriteMetadataIfNeededLocked();
@ -1321,6 +1404,10 @@ CacheFile::RemoveInput(CacheFileInputStream *aInput)
if (!mMemoryOnly)
WriteMetadataIfNeededLocked();
// If the input didn't read all data, there might be left some preloaded
// chunks that won't be used anymore.
mCachedChunks.Enumerate(&CacheFile::CleanUpPreloadedChunks, this);
return NS_OK;
}
@ -1359,7 +1446,7 @@ CacheFile::NotifyChunkListener(CacheFileChunkListener *aCallback,
CacheFileChunk *aChunk)
{
LOG(("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
"rv=0x%08x, idx=%d, chunk=%p]", this, aCallback, aTarget, aResult,
"rv=0x%08x, idx=%u, chunk=%p]", this, aCallback, aTarget, aResult,
aChunkIdx, aChunk));
nsresult rv;
@ -1378,7 +1465,7 @@ nsresult
CacheFile::QueueChunkListener(uint32_t aIndex,
CacheFileChunkListener *aCallback)
{
LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%d, listener=%p]",
LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]",
this, aIndex, aCallback));
AssertOwnsLock();
@ -1403,7 +1490,7 @@ nsresult
CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
CacheFileChunk *aChunk)
{
LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%d, rv=0x%08x, "
LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08x, "
"chunk=%p]", this, aIndex, aResult, aChunk));
AssertOwnsLock();
@ -1568,7 +1655,7 @@ CacheFile::WriteAllCachedChunks(const uint32_t& aIdx,
{
CacheFile *file = static_cast<CacheFile*>(aClosure);
LOG(("CacheFile::WriteAllCachedChunks() [this=%p, idx=%d, chunk=%p]",
LOG(("CacheFile::WriteAllCachedChunks() [this=%p, idx=%u, chunk=%p]",
file, aIdx, aChunk.get()));
file->mChunks.Put(aIdx, aChunk);
@ -1591,7 +1678,7 @@ CacheFile::FailListenersIfNonExistentChunk(
{
CacheFile *file = static_cast<CacheFile*>(aClosure);
LOG(("CacheFile::FailListenersIfNonExistentChunk() [this=%p, idx=%d]",
LOG(("CacheFile::FailListenersIfNonExistentChunk() [this=%p, idx=%u]",
file, aIdx));
nsRefPtr<CacheFileChunk> chunk;
@ -1621,7 +1708,7 @@ CacheFile::FailUpdateListeners(
CacheFile *file = static_cast<CacheFile*>(aClosure);
#endif
LOG(("CacheFile::FailUpdateListeners() [this=%p, idx=%d]",
LOG(("CacheFile::FailUpdateListeners() [this=%p, idx=%u]",
file, aIdx));
if (aChunk->IsReady()) {
@ -1631,6 +1718,25 @@ CacheFile::FailUpdateListeners(
return PL_DHASH_NEXT;
}
PLDHashOperator
CacheFile::CleanUpPreloadedChunks(const uint32_t& aIdx,
nsRefPtr<CacheFileChunk>& aChunk,
void* aClosure)
{
CacheFile *file = static_cast<CacheFile*>(aClosure);
LOG(("CacheFile::CleanUpPreloadedChunks() [this=%p, idx=%u, chunk=%p]", file,
aIdx, aChunk.get()));
if (file->ShouldKeepChunk(aIdx)) {
LOG(("CacheFile::CleanUpPreloadedChunks() - Keeping chunk"));
return PL_DHASH_NEXT;
}
LOG(("CacheFile::CleanUpPreloadedChunks() - Removing chunk"));
return PL_DHASH_REMOVE;
}
nsresult
CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
{
@ -1642,7 +1748,7 @@ CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
nsresult rv;
nsRefPtr<CacheFileChunk> chunk;
rv = GetChunkLocked(aChunkIdx, true, nullptr, getter_AddRefs(chunk));
rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
NS_ENSURE_SUCCESS(rv, rv);
LOG(("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"

Просмотреть файл

@ -121,12 +121,22 @@ private:
void AssertOwnsLock() const;
void ReleaseOutsideLock(nsISupports *aObject);
nsresult GetChunk(uint32_t aIndex, bool aWriter,
enum ECallerType {
READER = 0,
WRITER = 1,
PRELOADER = 2
};
nsresult GetChunk(uint32_t aIndex, ECallerType aCaller,
CacheFileChunkListener *aCallback,
CacheFileChunk **_retval);
nsresult GetChunkLocked(uint32_t aIndex, bool aWriter,
nsresult GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
CacheFileChunkListener *aCallback,
CacheFileChunk **_retval);
void PreloadChunks(uint32_t aIndex);
bool ShouldKeepChunk(uint32_t aIndex);
nsresult RemoveChunk(CacheFileChunk *aChunk);
void RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk);
@ -162,6 +172,11 @@ private:
nsRefPtr<CacheFileChunk>& aChunk,
void* aClosure);
static PLDHashOperator CleanUpPreloadedChunks(
const uint32_t& aIdx,
nsRefPtr<CacheFileChunk>& aChunk,
void* aClosure);
nsresult PadChunkWithZeroes(uint32_t aChunkIdx);
void SetError(nsresult aStatus);
@ -176,6 +191,7 @@ private:
bool mDataAccessed;
bool mDataIsDirty;
bool mWritingMetadata;
bool mPreloadWithoutInputStreams;
nsresult mStatus;
int64_t mDataSize;
nsCString mKey;

Просмотреть файл

@ -543,7 +543,8 @@ CacheFileInputStream::EnsureCorrectChunk(bool aReleaseOnly)
return;
}
rv = mFile->GetChunkLocked(chunkIdx, false, this, getter_AddRefs(mChunk));
rv = mFile->GetChunkLocked(chunkIdx, CacheFile::READER, this,
getter_AddRefs(mChunk));
if (NS_FAILED(rv)) {
LOG(("CacheFileInputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
"[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));

Просмотреть файл

@ -38,6 +38,8 @@ public:
// Memory reporting
size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
uint32_t GetPosition() const { return mPos; };
private:
virtual ~CacheFileInputStream();

Просмотреть файл

@ -345,7 +345,8 @@ CacheFileOutputStream::EnsureCorrectChunk(bool aReleaseOnly)
return;
nsresult rv;
rv = mFile->GetChunkLocked(chunkIdx, true, nullptr, getter_AddRefs(mChunk));
rv = mFile->GetChunkLocked(chunkIdx, CacheFile::WRITER, nullptr,
getter_AddRefs(mChunk));
if (NS_FAILED(rv)) {
LOG(("CacheFileOutputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
"[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));

Просмотреть файл

@ -56,6 +56,9 @@ uint32_t CacheObserver::sDiskCacheCapacity = kDefaultDiskCacheCapacity;
static bool const kDefaultSmartCacheSizeEnabled = false;
bool CacheObserver::sSmartCacheSizeEnabled = kDefaultSmartCacheSizeEnabled;
static uint32_t const kDefaultPreloadChunkCount = 4;
uint32_t CacheObserver::sPreloadChunkCount = kDefaultPreloadChunkCount;
static uint32_t const kDefaultMaxMemoryEntrySize = 4 * 1024; // 4 MB
uint32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
@ -141,6 +144,9 @@ CacheObserver::AttachToPreferences()
mozilla::Preferences::AddIntVarCache(
&sMemoryCacheCapacity, "browser.cache.memory.capacity", kDefaultMemoryCacheCapacity);
mozilla::Preferences::AddUintVarCache(
&sPreloadChunkCount, "browser.cache.disk.preload_chunk_count", kDefaultPreloadChunkCount);
mozilla::Preferences::AddUintVarCache(
&sMaxDiskEntrySize, "browser.cache.disk.max_entry_size", kDefaultMaxDiskEntrySize);
mozilla::Preferences::AddUintVarCache(

Просмотреть файл

@ -40,6 +40,8 @@ class CacheObserver : public nsIObserver
static void SetDiskCacheCapacity(uint32_t); // parameter in bytes.
static bool const SmartCacheSizeEnabled()
{ return sSmartCacheSizeEnabled; }
static uint32_t const PreloadChunkCount()
{ return sPreloadChunkCount; }
static uint32_t const MaxMemoryEntrySize() // result in bytes.
{ return sMaxMemoryEntrySize << 10; }
static uint32_t const MaxDiskEntrySize() // result in bytes.
@ -71,6 +73,7 @@ private:
static int32_t sAutoMemoryCacheCapacity;
static uint32_t sDiskCacheCapacity;
static bool sSmartCacheSizeEnabled;
static uint32_t sPreloadChunkCount;
static uint32_t sMaxMemoryEntrySize;
static uint32_t sMaxDiskEntrySize;
static uint32_t sCompressionLevel;