Bug 1254766 - Stop caching Safe Browsing completions to disk. r=gcp

MozReview-Commit-ID: 8Qnc8yymgDL
This commit is contained in:
dimi 2016-08-11 08:17:39 +08:00
Родитель 7d8d990106
Коммит 365c242c26
16 изменённых файлов: 783 добавлений и 278 удалений

Просмотреть файл

@ -14,24 +14,18 @@ class TestSafeBrowsingInitialDownload(FirefoxTestCase):
'platforms': ['linux', 'windows_nt', 'darwin'],
'files': [
# Phishing
"goog-badbinurl-shavar.cache",
"goog-badbinurl-shavar.pset",
"goog-badbinurl-shavar.sbstore",
"goog-malware-shavar.cache",
"goog-malware-shavar.pset",
"goog-malware-shavar.sbstore",
"goog-phish-shavar.cache",
"goog-phish-shavar.pset",
"goog-phish-shavar.sbstore",
"goog-unwanted-shavar.cache",
"goog-unwanted-shavar.pset",
"goog-unwanted-shavar.sbstore",
# Tracking Protections
"base-track-digest256.cache",
"base-track-digest256.pset",
"base-track-digest256.sbstore",
"mozstd-trackwhite-digest256.cache",
"mozstd-trackwhite-digest256.pset",
"mozstd-trackwhite-digest256.sbstore"
]
@ -39,7 +33,6 @@ class TestSafeBrowsingInitialDownload(FirefoxTestCase):
{
'platforms': ['windows_nt'],
'files': [
"goog-downloadwhite-digest256.cache",
"goog-downloadwhite-digest256.pset",
"goog-downloadwhite-digest256.sbstore"
]

Просмотреть файл

@ -304,11 +304,11 @@ Classifier::ApplyUpdates(nsTArray<TableUpdate*>* aUpdates)
LOG(("Applying %d table updates.", aUpdates->Length()));
for (uint32_t i = 0; i < aUpdates->Length(); i++) {
// Previous ApplyTableUpdates() may have consumed this update..
// Previous UpdateHashStore() may have consumed this update..
if ((*aUpdates)[i]) {
// Run all updates for one table
nsCString updateTable(aUpdates->ElementAt(i)->TableName());
rv = ApplyTableUpdates(aUpdates, updateTable);
rv = UpdateHashStore(aUpdates, updateTable);
if (NS_FAILED(rv)) {
if (rv != NS_ERROR_OUT_OF_MEMORY) {
Reset();
@ -344,6 +344,25 @@ Classifier::ApplyUpdates(nsTArray<TableUpdate*>* aUpdates)
return NS_OK;
}
nsresult
Classifier::ApplyFullHashes(nsTArray<TableUpdate*>* aUpdates)
{
LOG(("Applying %d table gethashes.", aUpdates->Length()));
for (uint32_t i = 0; i < aUpdates->Length(); i++) {
TableUpdate *update = aUpdates->ElementAt(i);
nsresult rv = UpdateCache(update);
NS_ENSURE_SUCCESS(rv, rv);
aUpdates->ElementAt(i) = nullptr;
delete update;
}
aUpdates->Clear();
return NS_OK;
}
nsresult
Classifier::MarkSpoiled(nsTArray<nsCString>& aTables)
{
@ -354,12 +373,20 @@ Classifier::MarkSpoiled(nsTArray<nsCString>& aTables)
// Remove any cached Completes for this table
LookupCache *cache = GetLookupCache(aTables[i]);
if (cache) {
cache->ClearCompleteCache();
cache->ClearCache();
}
}
return NS_OK;
}
int64_t
Classifier::GetLastUpdateTime(const nsACString& aTableName)
{
int64_t age;
bool found = mTableFreshness.Get(aTableName, &age);
return found ? (age * PR_MSEC_PER_SEC) : 0;
}
void
Classifier::SetLastUpdateTime(const nsACString &aTable,
uint64_t updateTime)
@ -550,24 +577,17 @@ Classifier::RecoverBackups()
return NS_OK;
}
/*
* This will consume+delete updates from the passed nsTArray.
*/
nsresult
Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
bool
Classifier::CheckValidUpdate(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable)
{
LOG(("Classifier::ApplyTableUpdates(%s)", PromiseFlatCString(aTable).get()));
HashStore store(aTable, mStoreDirectory);
// take the quick exit if there is no valid update for us
// (common case)
uint32_t validupdates = 0;
for (uint32_t i = 0; i < aUpdates->Length(); i++) {
TableUpdate *update = aUpdates->ElementAt(i);
if (!update || !update->TableName().Equals(store.TableName()))
if (!update || !update->TableName().Equals(aTable))
continue;
if (update->Empty()) {
aUpdates->ElementAt(i) = nullptr;
@ -579,6 +599,24 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
if (!validupdates) {
// This can happen if the update was only valid for one table.
return false;
}
return true;
}
/*
* This will consume+delete updates from the passed nsTArray.
*/
nsresult
Classifier::UpdateHashStore(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable)
{
LOG(("Classifier::UpdateHashStore(%s)", PromiseFlatCString(aTable).get()));
HashStore store(aTable, mStoreDirectory);
if (!CheckValidUpdate(aUpdates, store.TableName())) {
return NS_OK;
}
@ -588,20 +626,22 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
NS_ENSURE_SUCCESS(rv, rv);
// Read the part of the store that is (only) in the cache
LookupCache *prefixSet = GetLookupCache(store.TableName());
if (!prefixSet) {
LookupCache *lookupCache = GetLookupCache(store.TableName());
if (!lookupCache) {
return NS_ERROR_FAILURE;
}
// Clear cache when update
lookupCache->ClearCache();
FallibleTArray<uint32_t> AddPrefixHashes;
rv = prefixSet->GetPrefixes(AddPrefixHashes);
rv = lookupCache->GetPrefixes(AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
rv = store.AugmentAdds(AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
AddPrefixHashes.Clear();
uint32_t applied = 0;
bool updateFreshness = false;
bool hasCompletes = false;
for (uint32_t i = 0; i < aUpdates->Length(); i++) {
TableUpdate *update = aUpdates->ElementAt(i);
@ -623,17 +663,6 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
LOG((" %d add expirations", update->AddExpirations().Length()));
LOG((" %d sub expirations", update->SubExpirations().Length()));
if (!update->IsLocalUpdate()) {
updateFreshness = true;
LOG(("Remote update, updating freshness"));
}
if (update->AddCompletes().Length() > 0
|| update->SubCompletes().Length() > 0) {
hasCompletes = true;
LOG(("Contains Completes, keeping cache."));
}
aUpdates->ElementAt(i) = nullptr;
delete update;
}
@ -643,11 +672,6 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
rv = store.Rebuild();
NS_ENSURE_SUCCESS(rv, rv);
// Not an update with Completes, clear all completes data.
if (!hasCompletes) {
store.ClearCompletes();
}
LOG(("Table %s now has:", store.TableName().get()));
LOG((" %d add chunks", store.AddChunks().Length()));
LOG((" %d add prefixes", store.AddPrefixes().Length()));
@ -661,21 +685,41 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
// At this point the store is updated and written out to disk, but
// the data is still in memory. Build our quick-lookup table here.
rv = prefixSet->Build(store.AddPrefixes(), store.AddCompletes());
rv = lookupCache->Build(store.AddPrefixes(), store.AddCompletes());
NS_ENSURE_SUCCESS(rv, rv);
#if defined(DEBUG)
prefixSet->Dump();
lookupCache->Dump();
#endif
rv = prefixSet->WriteFile();
rv = lookupCache->WriteFile();
NS_ENSURE_SUCCESS(rv, rv);
if (updateFreshness) {
int64_t now = (PR_Now() / PR_USEC_PER_SEC);
LOG(("Successfully updated %s", store.TableName().get()));
mTableFreshness.Put(store.TableName(), now);
return NS_OK;
}
nsresult
Classifier::UpdateCache(TableUpdate* aUpdate)
{
if (!aUpdate) {
return NS_OK;
}
nsAutoCString table(aUpdate->TableName());
LOG(("Classifier::UpdateCache(%s)", table.get()));
LookupCache *lookupCache = GetLookupCache(table);
NS_ENSURE_TRUE(lookupCache, NS_ERROR_FAILURE);
lookupCache->AddCompletionsToCache(aUpdate->AddCompletes());
#if defined(DEBUG)
lookupCache->DumpCache();
#endif
return NS_OK;
}

Просмотреть файл

@ -55,12 +55,19 @@ public:
* the updates in the array and clears it. Wacky!
*/
nsresult ApplyUpdates(nsTArray<TableUpdate*>* aUpdates);
/**
* Apply full hashes retrived from gethash to cache.
*/
nsresult ApplyFullHashes(nsTArray<TableUpdate*>* aUpdates);
/**
* Failed update. Spoil the entries so we don't block hosts
* unnecessarily
*/
nsresult MarkSpoiled(nsTArray<nsCString>& aTables);
void SetLastUpdateTime(const nsACString& aTableName, uint64_t updateTime);
int64_t GetLastUpdateTime(const nsACString& aTableName);
nsresult CacheCompletions(const CacheResultArray& aResults);
uint32_t GetHashKey(void) { return mHashKey; }
/*
@ -84,11 +91,16 @@ private:
nsresult RegenActiveTables();
nsresult ScanStoreDir(nsTArray<nsCString>& aTables);
nsresult ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
nsresult UpdateHashStore(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable);
nsresult UpdateCache(TableUpdate* aUpdates);
LookupCache *GetLookupCache(const nsACString& aTable);
bool CheckValidUpdate(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable);
// Root dir of the Local profile.
nsCOMPtr<nsIFile> mCacheDirectory;
// Main directory where to store the databases.

Просмотреть файл

@ -165,6 +165,7 @@ HashStore::HashStore(const nsACString& aTableName, nsIFile* aStoreDir)
: mTableName(aTableName)
, mStoreDirectory(aStoreDir)
, mInUpdate(false)
, mFileSize(0)
{
}
@ -187,13 +188,18 @@ HashStore::Reset()
rv = storeFile->Remove(false);
NS_ENSURE_SUCCESS(rv, rv);
mFileSize = 0;
return NS_OK;
}
nsresult
HashStore::CheckChecksum(nsIFile* aStoreFile,
uint32_t aFileSize)
HashStore::CheckChecksum(uint32_t aFileSize)
{
if (!mInputStream) {
return NS_OK;
}
// Check for file corruption by
// comparing the stored checksum to actual checksum of data
nsAutoCString hash;
@ -255,11 +261,8 @@ HashStore::Open()
return NS_ERROR_FAILURE;
}
uint32_t fileSize32 = static_cast<uint32_t>(fileSize);
mInputStream = NS_BufferInputStream(origStream, fileSize32);
rv = CheckChecksum(storeFile, fileSize32);
SUCCESS_OR_RESET(rv);
mFileSize = static_cast<uint32_t>(fileSize);
mInputStream = NS_BufferInputStream(origStream, mFileSize);
rv = ReadHeader();
SUCCESS_OR_RESET(rv);
@ -267,9 +270,6 @@ HashStore::Open()
rv = SanityCheck();
SUCCESS_OR_RESET(rv);
rv = ReadChunkNumbers();
SUCCESS_OR_RESET(rv);
return NS_OK;
}
@ -363,7 +363,9 @@ HashStore::UpdateHeader()
nsresult
HashStore::ReadChunkNumbers()
{
NS_ENSURE_STATE(mInputStream);
if (!mInputStream || AlreadyReadChunkNumbers()) {
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET,
@ -403,6 +405,45 @@ HashStore::ReadHashes()
rv = ReadSubPrefixes();
NS_ENSURE_SUCCESS(rv, rv);
// If completions was read before, then we are done here.
if (AlreadyReadCompletions()) {
return NS_OK;
}
rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes);
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::ReadCompletions()
{
if (!mInputStream || AlreadyReadCompletions()) {
return NS_OK;
}
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(STORE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
uint32_t offset = mFileSize -
sizeof(struct AddComplete) * mHeader.numAddCompletes -
sizeof(struct SubComplete) * mHeader.numSubCompletes -
nsCheckSummedOutputStream::CHECKSUM_SIZE;
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes);
NS_ENSURE_SUCCESS(rv, rv);
@ -412,12 +453,28 @@ HashStore::ReadHashes()
return NS_OK;
}
nsresult
HashStore::PrepareForUpdate()
{
nsresult rv = CheckChecksum(mFileSize);
SUCCESS_OR_RESET(rv);
rv = ReadChunkNumbers();
SUCCESS_OR_RESET(rv);
rv = ReadHashes();
SUCCESS_OR_RESET(rv);
return NS_OK;
}
nsresult
HashStore::BeginUpdate()
{
// Read the rest of the store in memory.
nsresult rv = ReadHashes();
SUCCESS_OR_RESET(rv);
// Check wether the file is corrupted and read the rest of the store
// in memory.
nsresult rv = PrepareForUpdate();
NS_ENSURE_SUCCESS(rv, rv);
// Close input stream, won't be needed any more and
// we will rewrite ourselves.
@ -1066,5 +1123,61 @@ HashStore::AugmentAdds(const nsTArray<uint32_t>& aPrefixes)
return NS_OK;
}
ChunkSet&
HashStore::AddChunks()
{
ReadChunkNumbers();
return mAddChunks;
}
ChunkSet&
HashStore::SubChunks()
{
ReadChunkNumbers();
return mSubChunks;
}
AddCompleteArray&
HashStore::AddCompletes()
{
ReadCompletions();
return mAddCompletes;
}
SubCompleteArray&
HashStore::SubCompletes()
{
ReadCompletions();
return mSubCompletes;
}
bool
HashStore::AlreadyReadChunkNumbers()
{
// If there are chunks but chunk set not yet contains any data
// Then we haven't read chunk numbers.
if ((mHeader.numAddChunks != 0 && mAddChunks.Length() == 0) ||
(mHeader.numSubChunks != 0 && mSubChunks.Length() == 0)) {
return false;
}
return true;
}
bool
HashStore::AlreadyReadCompletions()
{
// If there are completions but completion set not yet contains any data
// Then we haven't read completions.
if ((mHeader.numAddCompletes != 0 && mAddCompletes.Length() == 0) ||
(mHeader.numSubCompletes != 0 && mSubCompletes.Length() == 0)) {
return false;
}
return true;
}
} // namespace safebrowsing
} // namespace mozilla

Просмотреть файл

@ -23,7 +23,7 @@ namespace safebrowsing {
class TableUpdate {
public:
explicit TableUpdate(const nsACString& aTable)
: mTable(aTable), mLocalUpdate(false) {}
: mTable(aTable) {}
const nsCString& TableName() const { return mTable; }
bool Empty() const {
@ -60,8 +60,6 @@ public:
MOZ_MUST_USE nsresult NewSubComplete(uint32_t aAddChunk,
const Completion& aCompletion,
uint32_t aSubChunk);
void SetLocalUpdate(void) { mLocalUpdate = true; }
bool IsLocalUpdate(void) { return mLocalUpdate; }
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
@ -78,8 +76,6 @@ public:
private:
nsCString mTable;
// Update not from the remote server (no freshness)
bool mLocalUpdate;
// The list of chunk numbers that we have for each of the type of chunks.
ChunkSet mAddChunks;
@ -112,12 +108,12 @@ public:
// prefixes+chunknumbers dataset.
nsresult AugmentAdds(const nsTArray<uint32_t>& aPrefixes);
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
ChunkSet& AddChunks();
ChunkSet& SubChunks();
AddPrefixArray& AddPrefixes() { return mAddPrefixes; }
AddCompleteArray& AddCompletes() { return mAddCompletes; }
SubPrefixArray& SubPrefixes() { return mSubPrefixes; }
SubCompleteArray& SubCompletes() { return mSubCompletes; }
AddCompleteArray& AddCompletes();
SubCompleteArray& SubCompletes();
// =======
// Updates
@ -149,9 +145,10 @@ private:
nsresult SanityCheck();
nsresult CalculateChecksum(nsAutoCString& aChecksum, uint32_t aFileSize,
bool aChecksumPresent);
nsresult CheckChecksum(nsIFile* aStoreFile, uint32_t aFileSize);
nsresult CheckChecksum(uint32_t aFileSize);
void UpdateHeader();
nsresult ReadCompletions();
nsresult ReadChunkNumbers();
nsresult ReadHashes();
@ -163,6 +160,11 @@ private:
nsresult ProcessSubs();
nsresult PrepareForUpdate();
bool AlreadyReadChunkNumbers();
bool AlreadyReadCompletions();
// This is used for checking that the database is correct and for figuring out
// the number of chunks, etc. to read from disk on restart.
struct Header {
@ -202,6 +204,8 @@ private:
// updates from the completion server and updates from the regular server.
AddCompleteArray mAddCompletes;
SubCompleteArray mSubCompletes;
uint32_t mFileSize;
};
} // namespace safebrowsing

Просмотреть файл

@ -6,7 +6,6 @@
#include "LookupCache.h"
#include "HashStore.h"
#include "nsISeekableStream.h"
#include "nsISafeOutputStream.h"
#include "mozilla/Telemetry.h"
#include "mozilla/Logging.h"
#include "nsNetUtil.h"
@ -17,19 +16,17 @@
// The latter solely exists to store the data needed to handle
// the updates from the protocol.
// This module has its own store, which stores the Completions,
// mostly caching lookups that have happened over the net.
// The prefixes are cached/checked by looking them up in the
// PrefixSet.
// This module provides a front for PrefixSet, mUpdateCompletions,
// and mGetHashCache, which together contain everything needed to
// provide a classification as long as the data is up to date.
// Data format for the ".cache" files:
// uint32_t magic Identify the file type
// uint32_t version Version identifier for file format
// uint32_t numCompletions Amount of completions stored
// 0...numCompletions 256-bit Completions
// Name of the lookupcomplete cache
#define CACHE_SUFFIX ".cache"
// PrefixSet stores and provides lookups for 4-byte prefixes.
// mUpdateCompletions contains 32-byte completions which were
// contained in updates. They are retrieved from HashStore/.sbtore
// on startup.
// mGetHashCache contains 32-byte completions which were
// returned from the gethash server. They are not serialized,
// only cached until the next update.
// Name of the persistent PrefixSet storage
#define PREFIXSET_SUFFIX ".pset"
@ -42,9 +39,6 @@ extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
namespace mozilla {
namespace safebrowsing {
const uint32_t LOOKUPCACHE_MAGIC = 0x1231af3e;
const uint32_t CURRENT_VERSION = 2;
LookupCache::LookupCache(const nsACString& aTableName, nsIFile* aStoreDir)
: mPrimed(false)
, mTableName(aTableName)
@ -69,40 +63,10 @@ LookupCache::~LookupCache()
nsresult
LookupCache::Open()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
LOG(("Reading Completions"));
nsresult rv = ReadCompletions();
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIInputStream> inputStream;
rv = NS_NewLocalFileInputStream(getter_AddRefs(inputStream), storeFile,
PR_RDONLY | nsIFile::OS_READAHEAD);
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
Reset();
return rv;
}
if (rv == NS_ERROR_FILE_NOT_FOUND) {
// Simply lacking a .cache file is a recoverable error,
// as unlike the .pset/.sbstore files it is a pure cache.
// Just create a new empty one.
ClearCompleteCache();
} else {
// Read in the .cache file
rv = ReadHeader(inputStream);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("ReadCompletions"));
rv = ReadCompletions(inputStream);
NS_ENSURE_SUCCESS(rv, rv);
rv = inputStream->Close();
NS_ENSURE_SUCCESS(rv, rv);
}
LOG(("Loading PrefixSet"));
rv = LoadPrefixSet();
NS_ENSURE_SUCCESS(rv, rv);
@ -121,20 +85,13 @@ LookupCache::Reset()
{
LOG(("LookupCache resetting"));
nsCOMPtr<nsIFile> storeFile;
nsCOMPtr<nsIFile> prefixsetFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = mStoreDirectory->Clone(getter_AddRefs(prefixsetFile));
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(prefixsetFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = prefixsetFile->AppendNative(mTableName + NS_LITERAL_CSTRING(PREFIXSET_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->Remove(false);
NS_ENSURE_SUCCESS(rv, rv);
rv = prefixsetFile->Remove(false);
NS_ENSURE_SUCCESS(rv, rv);
@ -151,13 +108,13 @@ LookupCache::Build(AddPrefixArray& aAddPrefixes,
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_LC_COMPLETIONS,
static_cast<uint32_t>(aAddCompletes.Length()));
mCompletions.Clear();
mCompletions.SetCapacity(aAddCompletes.Length());
mUpdateCompletions.Clear();
mUpdateCompletions.SetCapacity(aAddCompletes.Length());
for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
mCompletions.AppendElement(aAddCompletes[i].CompleteHash());
mUpdateCompletions.AppendElement(aAddCompletes[i].CompleteHash());
}
aAddCompletes.Clear();
mCompletions.Sort();
mUpdateCompletions.Sort();
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_LC_PREFIXES,
static_cast<uint32_t>(aAddPrefixes.Length()));
@ -169,17 +126,43 @@ LookupCache::Build(AddPrefixArray& aAddPrefixes,
return NS_OK;
}
nsresult
LookupCache::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
{
for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
}
}
mGetHashCache.Sort();
return NS_OK;
}
#if defined(DEBUG)
void
LookupCache::DumpCache()
{
if (!LOG_ENABLED())
return;
for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
nsAutoCString str;
mGetHashCache[i].ToHexString(str);
LOG(("Caches: %s", str.get()));
}
}
void
LookupCache::Dump()
{
if (!LOG_ENABLED())
return;
for (uint32_t i = 0; i < mCompletions.Length(); i++) {
for (uint32_t i = 0; i < mUpdateCompletions.Length(); i++) {
nsAutoCString str;
mCompletions[i].ToHexString(str);
LOG(("Completion: %s", str.get()));
mUpdateCompletions[i].ToHexString(str);
LOG(("Update: %s", str.get()));
}
}
#endif
@ -202,7 +185,9 @@ LookupCache::Has(const Completion& aCompletion,
*aHas = true;
}
if (mCompletions.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex) {
// TODO: We may need to distinguish completions found in cache or update in the future
if ((mGetHashCache.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex) ||
(mUpdateCompletions.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex)) {
LOG(("Complete in %s", mTableName.get()));
*aComplete = true;
*aHas = true;
@ -214,36 +199,8 @@ LookupCache::Has(const Completion& aCompletion,
nsresult
LookupCache::WriteFile()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIOutputStream> out;
rv = NS_NewSafeLocalFileOutputStream(getter_AddRefs(out), storeFile,
PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE);
NS_ENSURE_SUCCESS(rv, rv);
UpdateHeader();
LOG(("Writing %d completions", mHeader.numCompletions));
uint32_t written;
rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written);
NS_ENSURE_SUCCESS(rv, rv);
rv = WriteTArray(out, mCompletions);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out);
rv = safeOut->Finish();
NS_ENSURE_SUCCESS(rv, rv);
rv = EnsureSizeConsistent();
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIFile> psFile;
rv = mStoreDirectory->Clone(getter_AddRefs(psFile));
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(psFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = psFile->AppendNative(mTableName + NS_LITERAL_CSTRING(PREFIXSET_SUFFIX));
@ -258,102 +215,39 @@ LookupCache::WriteFile()
void
LookupCache::ClearAll()
{
ClearCompleteCache();
ClearCache();
ClearUpdatedCompletions();
mPrefixSet->SetPrefixes(nullptr, 0);
mPrimed = false;
}
void
LookupCache::ClearCompleteCache()
LookupCache::ClearUpdatedCompletions()
{
mCompletions.Clear();
UpdateHeader();
mUpdateCompletions.Clear();
}
void
LookupCache::UpdateHeader()
LookupCache::ClearCache()
{
mHeader.magic = LOOKUPCACHE_MAGIC;
mHeader.version = CURRENT_VERSION;
mHeader.numCompletions = mCompletions.Length();
mGetHashCache.Clear();
}
nsresult
LookupCache::EnsureSizeConsistent()
LookupCache::ReadCompletions()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
HashStore store(mTableName, mStoreDirectory);
nsresult rv = store.Open();
NS_ENSURE_SUCCESS(rv, rv);
int64_t fileSize;
rv = storeFile->GetFileSize(&fileSize);
NS_ENSURE_SUCCESS(rv, rv);
mUpdateCompletions.Clear();
if (fileSize < 0) {
return NS_ERROR_FAILURE;
const AddCompleteArray& addComplete = store.AddCompletes();
for (uint32_t i = 0; i < addComplete.Length(); i++) {
mUpdateCompletions.AppendElement(addComplete[i].complete);
}
int64_t expectedSize = sizeof(mHeader)
+ mHeader.numCompletions*sizeof(Completion);
if (expectedSize != fileSize) {
NS_WARNING("File length does not match. Probably corrupted.");
Reset();
return NS_ERROR_FILE_CORRUPTED;
}
return NS_OK;
}
nsresult
LookupCache::ReadHeader(nsIInputStream* aInputStream)
{
if (!aInputStream) {
ClearCompleteCache();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(aInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(rv, rv);
void *buffer = &mHeader;
rv = NS_ReadInputStreamToBuffer(aInputStream,
&buffer,
sizeof(Header));
NS_ENSURE_SUCCESS(rv, rv);
if (mHeader.magic != LOOKUPCACHE_MAGIC || mHeader.version != CURRENT_VERSION) {
NS_WARNING("Unexpected header data in the store.");
Reset();
return NS_ERROR_FILE_CORRUPTED;
}
LOG(("%d completions present", mHeader.numCompletions));
rv = EnsureSizeConsistent();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
LookupCache::ReadCompletions(nsIInputStream* aInputStream)
{
if (!mHeader.numCompletions) {
mCompletions.Clear();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(aInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, sizeof(Header));
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(aInputStream, &mCompletions, mHeader.numCompletions);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Read %d completions", mCompletions.Length()));
return NS_OK;
}

Просмотреть файл

@ -108,10 +108,13 @@ public:
// This will Clear() the passed arrays when done.
nsresult Build(AddPrefixArray& aAddPrefixes,
AddCompleteArray& aAddCompletes);
nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
nsresult GetPrefixes(FallibleTArray<uint32_t>& aAddPrefixes);
void ClearCompleteCache();
void ClearUpdatedCompletions();
void ClearCache();
#if DEBUG
void DumpCache();
void Dump();
#endif
nsresult WriteFile();
@ -122,28 +125,22 @@ public:
private:
void ClearAll();
nsresult Reset();
void UpdateHeader();
nsresult ReadHeader(nsIInputStream* aInputStream);
nsresult ReadCompletions(nsIInputStream* aInputStream);
nsresult EnsureSizeConsistent();
nsresult ReadCompletions();
nsresult LoadPrefixSet();
nsresult LoadCompletions();
// Construct a Prefix Set with known prefixes.
// This will Clear() aAddPrefixes when done.
nsresult ConstructPrefixSet(AddPrefixArray& aAddPrefixes);
struct Header {
uint32_t magic;
uint32_t version;
uint32_t numCompletions;
};
Header mHeader;
bool mPrimed;
nsCString mTableName;
nsCOMPtr<nsIFile> mStoreDirectory;
CompletionArray mCompletions;
// Set of prefixes known to be in the database
RefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
// Full length hashes obtained in update request
CompletionArray mUpdateCompletions;
// Full length hashes obtained in gethash request
CompletionArray mGetHashCache;
};
} // namespace safebrowsing

Просмотреть файл

@ -191,6 +191,13 @@ interface nsIUrlClassifierDBService : nsISupports
* database, emptying all tables. Mostly intended for use in unit tests.
*/
void resetDatabase();
/**
* Reload he url-classifier database. This will empty all cache for
* completions from gethash, and reload it from database. Mostly intended
* for use in tests.
*/
void reloadDatabase();
};
/**

Просмотреть файл

@ -628,6 +628,38 @@ nsUrlClassifierDBServiceWorker::ResetDatabase()
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierDBServiceWorker::ReloadDatabase()
{
nsTArray<nsCString> tables;
nsTArray<int64_t> lastUpdateTimes;
nsresult rv = mClassifier->ActiveTables(tables);
NS_ENSURE_SUCCESS(rv, rv);
// We need to make sure lastupdatetime is set after reload database
// Otherwise request will be skipped if it is not confirmed.
for (uint32_t table = 0; table < tables.Length(); table++) {
lastUpdateTimes.AppendElement(mClassifier->GetLastUpdateTime(tables[table]));
}
// This will null out mClassifier
rv = CloseDb();
NS_ENSURE_SUCCESS(rv, rv);
// Create new mClassifier and load prefixset and completions from disk.
rv = OpenDb();
NS_ENSURE_SUCCESS(rv, rv);
for (uint32_t table = 0; table < tables.Length(); table++) {
int64_t time = lastUpdateTimes[table];
if (time) {
mClassifier->SetLastUpdateTime(tables[table], lastUpdateTimes[table]);
}
}
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierDBServiceWorker::CancelUpdate()
{
@ -721,7 +753,6 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
if (NS_FAILED(rv)) {
return rv;
}
tu->SetLocalUpdate();
updates.AppendElement(tu);
pParse->ForgetTableUpdates();
} else {
@ -729,7 +760,7 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
}
}
mClassifier->ApplyUpdates(&updates);
mClassifier->ApplyFullHashes(&updates);
mLastResults = *resultsPtr;
return NS_OK;
}
@ -1591,6 +1622,14 @@ nsUrlClassifierDBService::ResetDatabase()
return mWorkerProxy->ResetDatabase();
}
NS_IMETHODIMP
nsUrlClassifierDBService::ReloadDatabase()
{
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
return mWorkerProxy->ReloadDatabase();
}
nsresult
nsUrlClassifierDBService::CacheCompletions(CacheResultArray *results)
{

Просмотреть файл

@ -171,6 +171,15 @@ UrlClassifierDBServiceWorkerProxy::ResetDatabase()
return DispatchToWorkerThread(r);
}
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::ReloadDatabase()
{
nsCOMPtr<nsIRunnable> r =
NewRunnableMethod(mTarget,
&nsIUrlClassifierDBServiceWorker::ReloadDatabase);
return DispatchToWorkerThread(r);
}
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::OpenDb()
{

Просмотреть файл

@ -3,6 +3,9 @@
const { classes: Cc, interfaces: Ci, results: Cr } = Components;
var dbService = Cc["@mozilla.org/url-classifier/dbservice;1"]
.getService(Ci.nsIUrlClassifierDBService);
function setTimeout(callback, delay) {
let timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
timer.initWithCallback({ notify: callback },
@ -11,8 +14,6 @@ function setTimeout(callback, delay) {
}
function doUpdate(update) {
const { classes: Cc, interfaces: Ci, results: Cr } = Components;
let listener = {
QueryInterface: function(iid)
{
@ -48,6 +49,63 @@ function doUpdate(update) {
}
}
function doReload() {
dbService.reloadDatabase();
sendAsyncMessage("reloadSuccess");
}
// SafeBrowsing.jsm is initialized after mozEntries are added. Add observer
// to receive "finished" event. For the case when this function is called
// after the event had already been notified, we lookup entries to see if
// they are already added to database.
function waitForInit() {
let observerService = Cc["@mozilla.org/observer-service;1"]
.getService(Ci.nsIObserverService);
observerService.addObserver(function() {
sendAsyncMessage("safeBrowsingInited");
}, "mozentries-update-finished", false);
// This url must sync with the table, url in SafeBrowsing.jsm addMozEntries
const table = "test-phish-simple";
const url = "http://itisatrap.org/firefox/its-a-trap.html";
let secMan = Cc["@mozilla.org/scriptsecuritymanager;1"]
.getService(Ci.nsIScriptSecurityManager);
let iosvc = Cc["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService);
let principal = secMan.createCodebasePrincipal(
iosvc.newURI(url, null, null), {});
let listener = {
QueryInterface: function(iid)
{
if (iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsIUrlClassifierUpdateObserver))
return this;
throw Cr.NS_ERROR_NO_INTERFACE;
},
handleEvent: function(value)
{
if (value === table) {
sendAsyncMessage("safeBrowsingInited");
}
},
};
dbService.lookup(principal, table, listener);
}
addMessageListener("doUpdate", ({ testUpdate }) => {
doUpdate(testUpdate);
});
addMessageListener("doReload", () => {
doReload();
});
addMessageListener("waitForInit", () => {
waitForInit();
});

Просмотреть файл

@ -23,6 +23,17 @@ classifierHelper._updates = [];
// removed after test complete.
classifierHelper._updatesToCleanup = [];
classifierHelper._initsCB = [];
// This function return a Promise, promise is resolved when SafeBrowsing.jsm
// is initialized.
classifierHelper.waitForInit = function() {
return new Promise(function(resolve, reject) {
classifierHelper._initsCB.push(resolve);
gScript.sendAsyncMessage("waitForInit");
});
}
// This function is used to allow completion for specific "list",
// some lists like "test-malware-simple" is default disabled to ask for complete.
// "list" is the db we would like to allow it
@ -115,6 +126,17 @@ classifierHelper.resetDB = function() {
});
};
classifierHelper.reloadDatabase = function() {
return new Promise(function(resolve, reject) {
gScript.addMessageListener("reloadSuccess", function handler() {
gScript.removeMessageListener('reloadSuccess', handler);
resolve();
});
gScript.sendAsyncMessage("doReload");
});
}
classifierHelper._update = function(testUpdate, onsuccess, onerror) {
// Queue the task if there is still an on-going update
classifierHelper._updates.push({"data": testUpdate,
@ -147,9 +169,17 @@ classifierHelper._updateError = function(errorCode) {
}
};
classifierHelper._inited = function() {
classifierHelper._initsCB.forEach(function (cb) {
cb();
});
classifierHelper._initsCB = [];
};
classifierHelper._setup = function() {
gScript.addMessageListener("updateSuccess", classifierHelper._updateSuccess);
gScript.addMessageListener("updateError", classifierHelper._updateError);
gScript.addMessageListener("safeBrowsingInited", classifierHelper._inited);
// cleanup will be called at end of each testcase to remove all the urls added to database.
SimpleTest.registerCleanupFunction(classifierHelper._cleanup);

Просмотреть файл

@ -11,6 +11,8 @@ function handleRequest(request, response)
query[val.slice(0, idx)] = unescape(val.slice(idx + 1));
});
var responseBody;
// Store fullhash in the server side.
if ("list" in query && "fullhash" in query) {
// In the server side we will store:
@ -31,8 +33,12 @@ function handleRequest(request, response)
}
return;
}
// gethash count return how many gethash request received.
// This is used by client to know if a gethash request is triggered by gecko
} else if ("gethashcount" == request.queryString) {
var counter = getState("counter");
responseBody = counter == "" ? "0" : counter;
} else {
var body = new BinaryInputStream(request.bodyInputStream);
var avail;
var bytes = [];
@ -41,7 +47,12 @@ function handleRequest(request, response)
Array.prototype.push.apply(bytes, body.readByteArray(avail));
}
var responseBody = parseV2Request(bytes);
var counter = getState("counter");
counter = counter == "" ? "1" : (parseInt(counter) + 1).toString();
setState("counter", counter);
responseBody = parseV2Request(bytes);
}
response.setHeader("Content-Type", "text/plain", false);
response.write(responseBody);

Просмотреть файл

@ -33,3 +33,4 @@ skip-if = (os == 'linux' && debug) #Bug 1199778
[test_classify_ping.html]
[test_classify_track.html]
[test_gethash.html]
[test_bug1254766.html]

Просмотреть файл

@ -0,0 +1,299 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Bug 1272239 - Test gethash.</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="classifierHelper.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<p id="display"></p>
<div id="content" style="display: none">
</div>
<pre id="test">
<script class="testbody" type="text/javascript">
const MALWARE_LIST = "test-malware-simple";
const MALWARE_HOST1 = "malware.example.com/";
const MALWARE_HOST2 = "test1.example.com/";
const UNWANTED_LIST = "test-unwanted-simple";
const UNWANTED_HOST1 = "unwanted.example.com/";
const UNWANTED_HOST2 = "test2.example.com/";
const UNUSED_MALWARE_HOST = "unused.malware.com/";
const UNUSED_UNWANTED_HOST = "unused.unwanted.com/";
const GETHASH_URL =
"http://mochi.test:8888/tests/toolkit/components/url-classifier/tests/mochitest/gethash.sjs";
var gPreGethashCounter = 0;
var gCurGethashCounter = 0;
var expectLoad = false;
function loadTestFrame() {
return new Promise(function(resolve, reject) {
var iframe = document.createElement("iframe");
iframe.setAttribute("src", "gethashFrame.html");
document.body.appendChild(iframe);
iframe.onload = function() {
document.body.removeChild(iframe);
resolve();
};
}).then(getGethashCounter);
}
function getGethashCounter() {
return new Promise(function(resolve, reject) {
var xhr = new XMLHttpRequest;
xhr.open("PUT", GETHASH_URL + "?gethashcount");
xhr.setRequestHeader("Content-Type", "text/plain");
xhr.onreadystatechange = function() {
if (this.readyState == this.DONE) {
gPreGethashCounter = gCurGethashCounter;
gCurGethashCounter = parseInt(xhr.response);
resolve();
}
};
xhr.send();
});
}
// calculate the fullhash and send it to gethash server
function addCompletionToServer(list, url) {
return new Promise(function(resolve, reject) {
var listParam = "list=" + list;
var fullhashParam = "fullhash=" + hash(url);
var xhr = new XMLHttpRequest;
xhr.open("PUT", GETHASH_URL + "?" + listParam + "&" + fullhashParam, true);
xhr.setRequestHeader("Content-Type", "text/plain");
xhr.onreadystatechange = function() {
if (this.readyState == this.DONE) {
resolve();
}
};
xhr.send();
});
}
function hash(str) {
function bytesFromString(str) {
var converter =
SpecialPowers.Cc["@mozilla.org/intl/scriptableunicodeconverter"]
.createInstance(SpecialPowers.Ci.nsIScriptableUnicodeConverter);
converter.charset = "UTF-8";
return converter.convertToByteArray(str);
}
var hasher = SpecialPowers.Cc["@mozilla.org/security/hash;1"]
.createInstance(SpecialPowers.Ci.nsICryptoHash);
var data = bytesFromString(str);
hasher.init(hasher.SHA256);
hasher.update(data, data.length);
return hasher.finish(true);
}
// setup function allows classifier send gethash request for test database
// also it calculate to fullhash for url and store those hashes in gethash sjs.
function setup() {
classifierHelper.allowCompletion([MALWARE_LIST, UNWANTED_LIST], GETHASH_URL);
return Promise.all([
addCompletionToServer(MALWARE_LIST, MALWARE_HOST1),
addCompletionToServer(MALWARE_LIST, MALWARE_HOST2),
addCompletionToServer(UNWANTED_LIST, UNWANTED_HOST1),
addCompletionToServer(UNWANTED_LIST, UNWANTED_HOST2),
]);
}
// Reset function in helper try to simulate the behavior we restart firefox
function reset() {
return classifierHelper.resetDB()
.catch(err => {
ok(false, "Couldn't update classifier. Error code: " + errorCode);
// Abort test.
SimpleTest.finish();
});
}
function updateUnusedUrl() {
var testData = [
{ url: UNUSED_MALWARE_HOST, db: MALWARE_LIST },
{ url: UNUSED_UNWANTED_HOST, db: UNWANTED_LIST }
];
return classifierHelper.addUrlToDB(testData)
.catch(err => {
ok(false, "Couldn't update classifier. Error code: " + err);
// Abort test.
SimpleTest.finish();
});
}
function addPrefixToDB() {
return update(true);
}
function addCompletionToDB() {
return update(false);
}
function update(prefix = false) {
var length = prefix ? 4 : 32;
var testData = [
{ url: MALWARE_HOST1, db: MALWARE_LIST, len: length },
{ url: MALWARE_HOST2, db: MALWARE_LIST, len: length },
{ url: UNWANTED_HOST1, db: UNWANTED_LIST, len: length },
{ url: UNWANTED_HOST2, db: UNWANTED_LIST, len: length }
];
return classifierHelper.addUrlToDB(testData)
.catch(err => {
ok(false, "Couldn't update classifier. Error code: " + errorCode);
// Abort test.
SimpleTest.finish();
});
}
// This testcase is to make sure gethash works:
// 1. Add prefixes to DB.
// 2. Load test frame contains malware & unwanted url, those urls should be blocked.
// 3. The second step should also trigger a gethash request since completions is not in
// either cache or DB.
// 4. Load test frame again, since completions is stored in cache now, no gethash
// request should be triggered.
function testGethash() {
return Promise.resolve()
.then(addPrefixToDB)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(reset);
}
// This testcase is to make sure an update request will clear completion cache:
// 1. Add prefixes to DB.
// 2. Load test frame, this should trigger a gethash request
// 3. Trigger an update, completion cache should be cleared now.
// 4. Load test frame again, since cache is cleared now, gethash request should be triggered.
function testUpdateClearCache() {
return Promise.resolve()
.then(addPrefixToDB)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(updateUnusedUrl)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(reset);
}
// This testcae is to make sure completions in update works:
// 1. Add completions to DB.
// 2. Load test frame, since completions is stored in DB, gethash request should
// not be triggered.
function testUpdate() {
return Promise.resolve()
.then(addCompletionToDB)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(reset);
}
// This testcase is to make sure an update request will not clear completions in DB:
// 1. Add completions to DB.
// 2. Load test frame to make sure completions is stored in database, in this case, gethash
// should not be triggered.
// 3. Trigger an update, cache is cleared, but completions in DB should still remain.
// 4. Load test frame again, since completions is in DB, gethash request should not be triggered.
function testUpdateNotClearCompletions() {
return Promise.resolve()
.then(addCompletionToDB)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(updateUnusedUrl)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(reset);
}
// This testcase is to make sure completion store in DB will properly load after restarting.
// 1. Add completions to DB.
// 2. Simulate firefox restart by calling reloadDatabase.
// 3. Load test frame, since completions should be loaded from DB, no gethash request should
// be triggered.
function testUpdateCompletionsAfterReload() {
return Promise.resolve()
.then(addCompletionToDB)
.then(classifierHelper.reloadDatabase)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(reset);
}
// This testcase is to make sure cache will be cleared after restarting
// 1. Add prefixes to DB.
// 2. Load test frame, this should trigger a gethash request and completions will be stored in
// cache.
// 3. Load test frame again, no gethash should be triggered because of cache.
// 4. Simulate firefox restart by calling reloadDatabase.
// 5. Load test frame again, since cache is cleared, gethash request should be triggered.
function testGethashCompletionsAfterReload() {
return Promise.resolve()
.then(addPrefixToDB)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(classifierHelper.reloadDatabase)
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(reset);
}
function runTest() {
Promise.resolve()
.then(classifierHelper.waitForInit)
.then(setup)
.then(testGethash)
.then(testUpdateClearCache)
.then(testUpdate)
.then(testUpdateNotClearCompletions)
.then(testUpdateCompletionsAfterReload)
.then(testGethashCompletionsAfterReload)
.then(function() {
SimpleTest.finish();
}).catch(function(e) {
ok(false, "Some test failed with error " + e);
SimpleTest.finish();
});
}
SimpleTest.waitForExplicitFinish();
SpecialPowers.pushPrefEnv({"set": [
["browser.safebrowsing.malware.enabled", true]
]}, runTest);
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -58,12 +58,6 @@ function cleanUp() {
delFile("safebrowsing/test-block-simple.sbstore");
delFile("safebrowsing/test-track-simple.sbstore");
delFile("safebrowsing/test-trackwhite-simple.sbstore");
delFile("safebrowsing/test-phish-simple.cache");
delFile("safebrowsing/test-malware-simple.cache");
delFile("safebrowsing/test-unwanted-simple.cache");
delFile("safebrowsing/test-block-simple.cache");
delFile("safebrowsing/test-track-simple.cache");
delFile("safebrowsing/test-trackwhite-simple.cache");
delFile("safebrowsing/test-phish-simple.pset");
delFile("safebrowsing/test-malware-simple.pset");
delFile("safebrowsing/test-unwanted-simple.pset");