Backed out 4 changesets (bug 1311935) for causing assertion crash by developer's request

Backed out changeset 27e624cd9479 (bug 1311935)
Backed out changeset 4c0381ab0990 (bug 1311935)
Backed out changeset 73587838ef16 (bug 1311935)
Backed out changeset a5a6c0f79733 (bug 1311935)
This commit is contained in:
Iris Hsiao 2017-04-11 11:04:54 +08:00
Родитель f75dd3437a
Коммит 3d85c0330d
24 изменённых файлов: 259 добавлений и 1029 удалений

Просмотреть файл

@ -393,22 +393,15 @@ Classifier::TableRequest(nsACString& aResult)
HashStore store(tables[i], GetProvider(tables[i]), mRootStoreDirectory);
nsresult rv = store.Open();
if (NS_FAILED(rv)) {
if (NS_FAILED(rv))
continue;
}
ChunkSet &adds = store.AddChunks();
ChunkSet &subs = store.SubChunks();
// Open HashStore will always succeed even that is not a v2 table.
// So skip tables without add and sub chunks.
if (adds.Length() == 0 && subs.Length() == 0) {
continue;
}
aResult.Append(store.TableName());
aResult.Append(';');
ChunkSet &adds = store.AddChunks();
ChunkSet &subs = store.SubChunks();
if (adds.Length() > 0) {
aResult.AppendLiteral("a:");
nsAutoCString addList;
@ -496,18 +489,25 @@ Classifier::Check(const nsACString& aSpec,
for (uint32_t i = 0; i < cacheArray.Length(); i++) {
LookupCache *cache = cacheArray[i];
bool has, fromCache, confirmed;
bool has, fromCache;
uint32_t matchLength;
rv = cache->Has(lookupHash, mTableFreshness, aFreshnessGuarantee,
&has, &matchLength, &confirmed, &fromCache);
rv = cache->Has(lookupHash, &has, &matchLength, &fromCache);
NS_ENSURE_SUCCESS(rv, rv);
if (has) {
LookupResult *result = aResults.AppendElement();
if (!result)
return NS_ERROR_OUT_OF_MEMORY;
// For V2, there is no TTL for caching, so we use table freshness to
// decide if matching a completion should trigger a gethash request or not.
// For V4, this is done by Positive Caching & Negative Caching mechanism.
bool confirmed = false;
if (fromCache) {
cache->IsHashEntryConfirmed(lookupHash, mTableFreshness,
aFreshnessGuarantee, &confirmed);
}
LOG(("Found a result in %s: %s",
cache->TableName().get(),
confirmed ? "confirmed." : "Not confirmed."));
@ -922,51 +922,42 @@ Classifier::RegenActiveTables()
mActiveTablesCache.Clear();
nsTArray<nsCString> foundTables;
ScanStoreDir(mRootStoreDirectory, foundTables);
ScanStoreDir(foundTables);
for (uint32_t i = 0; i < foundTables.Length(); i++) {
nsCString table(foundTables[i]);
HashStore store(table, GetProvider(table), mRootStoreDirectory);
LookupCache *lookupCache = GetLookupCache(table);
nsresult rv = store.Open();
if (NS_FAILED(rv))
continue;
LookupCache *lookupCache = GetLookupCache(store.TableName());
if (!lookupCache) {
continue;
}
if (!lookupCache->IsPrimed()) {
if (!lookupCache->IsPrimed())
continue;
}
if (LookupCache::Cast<LookupCacheV4>(lookupCache)) {
LOG(("Active v4 table: %s", table.get()));
} else {
HashStore store(table, GetProvider(table), mRootStoreDirectory);
const ChunkSet &adds = store.AddChunks();
const ChunkSet &subs = store.SubChunks();
nsresult rv = store.Open();
if (NS_FAILED(rv)) {
continue;
}
if (adds.Length() == 0 && subs.Length() == 0)
continue;
const ChunkSet &adds = store.AddChunks();
const ChunkSet &subs = store.SubChunks();
if (adds.Length() == 0 && subs.Length() == 0) {
continue;
}
LOG(("Active v2 table: %s", store.TableName().get()));
}
mActiveTablesCache.AppendElement(table);
LOG(("Active table: %s", store.TableName().get()));
mActiveTablesCache.AppendElement(store.TableName());
}
return NS_OK;
}
nsresult
Classifier::ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables)
Classifier::ScanStoreDir(nsTArray<nsCString>& aTables)
{
nsCOMPtr<nsISimpleEnumerator> entries;
nsresult rv = aDirectory->GetDirectoryEntries(getter_AddRefs(entries));
nsresult rv = mRootStoreDirectory->GetDirectoryEntries(getter_AddRefs(entries));
NS_ENSURE_SUCCESS(rv, rv);
bool hasMore;
@ -977,22 +968,11 @@ Classifier::ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables)
nsCOMPtr<nsIFile> file = do_QueryInterface(supports);
// If |file| is a directory, recurse to find its entries as well.
bool isDirectory;
if (NS_FAILED(file->IsDirectory(&isDirectory))) {
continue;
}
if (isDirectory) {
ScanStoreDir(file, aTables);
continue;
}
nsCString leafName;
rv = file->GetNativeLeafName(leafName);
NS_ENSURE_SUCCESS(rv, rv);
// Both v2 and v4 contain .pset file
nsCString suffix(NS_LITERAL_CSTRING(".pset"));
nsCString suffix(NS_LITERAL_CSTRING(".sbstore"));
int32_t dot = leafName.RFind(suffix, 0);
if (dot != -1) {
@ -1326,11 +1306,6 @@ Classifier::UpdateTableV4(nsTArray<TableUpdate*>* aUpdates,
return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND;
}
// Remove cache entries whose negative cache time is expired when update.
// We don't check if positive cache time is expired here because we want to
// keep the eviction rule simple when doing an update.
lookupCache->InvalidateExpiredCacheEntry();
nsresult rv = NS_OK;
// If there are multiple updates for the same table, prefixes1 & prefixes2
@ -1421,19 +1396,8 @@ Classifier::UpdateCache(TableUpdate* aUpdate)
return NS_ERROR_FAILURE;
}
auto lookupV2 = LookupCache::Cast<LookupCacheV2>(lookupCache);
if (lookupV2) {
auto updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
lookupV2->AddCompletionsToCache(updateV2->AddCompletes());
} else {
auto lookupV4 = LookupCache::Cast<LookupCacheV4>(lookupCache);
if (!lookupV4) {
return NS_ERROR_FAILURE;
}
auto updateV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
lookupV4->AddFullHashResponseToCache(updateV4->FullHashResponse());
}
auto updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
lookupCache->AddCompletionsToCache(updateV2->AddCompletes());
#if defined(DEBUG)
lookupCache->DumpCache();

Просмотреть файл

@ -152,7 +152,7 @@ private:
nsresult DumpFailedUpdate();
#endif
nsresult ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables);
nsresult ScanStoreDir(nsTArray<nsCString>& aTables);
nsresult UpdateHashStore(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable);

Просмотреть файл

@ -318,44 +318,6 @@ typedef nsClassHashtable<nsUint32HashKey, nsCString> PrefixStringMap;
typedef nsDataHashtable<nsCStringHashKey, int64_t> TableFreshnessMap;
typedef nsCStringHashKey VLHashPrefixString;
typedef nsCStringHashKey FullHashString;
typedef nsDataHashtable<FullHashString, int64_t> FullHashExpiryCache;
struct CachedFullHashResponse {
int64_t negativeCacheExpirySec;
// Map contains all matches found in Fullhash response, this field might be empty.
FullHashExpiryCache fullHashes;
CachedFullHashResponse& operator=(const CachedFullHashResponse& aOther) {
negativeCacheExpirySec = aOther.negativeCacheExpirySec;
fullHashes.Clear();
for (auto iter = aOther.fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
fullHashes.Put(iter.Key(), iter.Data());
}
return *this;
}
bool operator==(const CachedFullHashResponse& aOther) const {
if (negativeCacheExpirySec != aOther.negativeCacheExpirySec ||
fullHashes.Count() != aOther.fullHashes.Count()) {
return false;
}
for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
if (iter.Data() != aOther.fullHashes.Get(iter.Key())) {
return false;
}
}
return true;
}
};
typedef nsClassHashtable<VLHashPrefixString, CachedFullHashResponse> FullHashResponseMap;
} // namespace safebrowsing
} // namespace mozilla

Просмотреть файл

@ -193,19 +193,6 @@ TableUpdateV4::NewChecksum(const std::string& aChecksum)
mChecksum.Assign(aChecksum.data(), aChecksum.size());
}
nsresult
TableUpdateV4::NewFullHashResponse(const nsACString& aPrefix,
CachedFullHashResponse& aResponse)
{
CachedFullHashResponse* response =
mFullHashResponseMap.LookupOrAdd(aPrefix);
if (!response) {
return NS_ERROR_OUT_OF_MEMORY;
}
*response = aResponse;
return NS_OK;
}
HashStore::HashStore(const nsACString& aTableName,
const nsACString& aProvider,
nsIFile* aRootStoreDir)

Просмотреть файл

@ -159,9 +159,7 @@ public:
bool Empty() const override
{
return mPrefixesMap.IsEmpty() &&
mRemovalIndiceArray.IsEmpty() &&
mFullHashResponseMap.IsEmpty();
return mPrefixesMap.IsEmpty() && mRemovalIndiceArray.IsEmpty();
}
bool IsFullUpdate() const { return mFullUpdate; }
@ -169,7 +167,6 @@ public:
RemovalIndiceArray& RemovalIndices() { return mRemovalIndiceArray; }
const nsACString& ClientState() const { return mClientState; }
const nsACString& Checksum() const { return mChecksum; }
const FullHashResponseMap& FullHashResponse() const { return mFullHashResponseMap; }
// For downcasting.
static const int TAG = 4;
@ -179,8 +176,6 @@ public:
void NewRemovalIndices(const uint32_t* aIndices, size_t aNumOfIndices);
void SetNewClientState(const nsACString& aState) { mClientState = aState; }
void NewChecksum(const std::string& aChecksum);
nsresult NewFullHashResponse(const nsACString& aPrefix,
CachedFullHashResponse& aResponse);
private:
virtual int Tag() const override { return TAG; }
@ -190,9 +185,6 @@ private:
RemovalIndiceArray mRemovalIndiceArray;
nsCString mClientState;
nsCString mChecksum;
// This is used to store response from fullHashes.find.
FullHashResponseMap mFullHashResponseMap;
};
// There is one hash store per table.

Просмотреть файл

@ -40,9 +40,6 @@ extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
namespace mozilla {
namespace safebrowsing {
const int CacheResultV2::VER = CacheResult::V2;
const int CacheResultV4::VER = CacheResult::V4;
const int LookupCacheV2::VER = 2;
LookupCache::LookupCache(const nsACString& aTableName,
@ -96,6 +93,34 @@ LookupCache::UpdateRootDirHandle(nsIFile* aNewRootStoreDirectory)
return rv;
}
nsresult
LookupCache::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
{
for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
}
}
mGetHashCache.Sort();
return NS_OK;
}
#if defined(DEBUG)
void
LookupCache::DumpCache()
{
if (!LOG_ENABLED())
return;
for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
nsAutoCString str;
mGetHashCache[i].ToHexString(str);
LOG(("Caches: %s", str.get()));
}
}
#endif
nsresult
LookupCache::WriteFile()
{
@ -124,6 +149,12 @@ LookupCache::ClearAll()
mPrimed = false;
}
void
LookupCache::ClearCache()
{
mGetHashCache.Clear();
}
/* static */ bool
LookupCache::IsCanonicalizedIP(const nsACString& aHost)
{
@ -367,12 +398,10 @@ LookupCacheV2::ClearAll()
nsresult
LookupCacheV2::Has(const Completion& aCompletion,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aHas, uint32_t* aMatchLength,
bool* aConfirmed, bool* aFromCache)
bool* aFromCache)
{
*aHas = *aConfirmed = *aFromCache = false;
*aHas = *aFromCache = false;
*aMatchLength = 0;
uint32_t prefix = aCompletion.ToUint32();
@ -394,20 +423,30 @@ LookupCacheV2::Has(const Completion& aCompletion,
*aFromCache = true;
*aHas = true;
*aMatchLength = COMPLETE_SIZE;
int64_t ageSec; // in seconds
if (aTableFreshness.Get(mTableName, &ageSec)) {
int64_t nowSec = (PR_Now() / PR_USEC_PER_SEC);
MOZ_ASSERT(ageSec <= nowSec);
// Considered completion as unsafe if its table is up-to-date.
*aConfirmed = (nowSec - ageSec) < aFreshnessGuarantee;
}
}
return NS_OK;
}
void
LookupCacheV2::IsHashEntryConfirmed(const Completion& aEntry,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aConfirmed)
{
int64_t age; // in seconds
bool found = aTableFreshness.Get(mTableName, &age);
if (!found) {
*aConfirmed = false;
} else {
int64_t now = (PR_Now() / PR_USEC_PER_SEC);
MOZ_ASSERT(age <= now);
// Considered completion as unsafe if its table is up-to-date.
*aConfirmed = (now - age) < aFreshnessGuarantee;
}
}
bool
LookupCacheV2::IsEmpty()
{
@ -452,19 +491,6 @@ LookupCacheV2::GetPrefixes(FallibleTArray<uint32_t>& aAddPrefixes)
return mPrefixSet->GetPrefixesNative(aAddPrefixes);
}
nsresult
LookupCacheV2::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
{
for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
}
}
mGetHashCache.Sort();
return NS_OK;
}
nsresult
LookupCacheV2::ReadCompletions()
{
@ -483,12 +509,6 @@ LookupCacheV2::ReadCompletions()
return NS_OK;
}
void
LookupCacheV2::ClearCache()
{
mGetHashCache.Clear();
}
nsresult
LookupCacheV2::ClearPrefixes()
{
@ -569,21 +589,6 @@ LookupCacheV2::ConstructPrefixSet(AddPrefixArray& aAddPrefixes)
}
#if defined(DEBUG)
void
LookupCacheV2::DumpCache()
{
if (!LOG_ENABLED()) {
return;
}
for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
nsAutoCString str;
mGetHashCache[i].ToHexString(str);
LOG(("Caches: %s", str.get()));
}
}
void
LookupCacheV2::DumpCompletions()
{

Просмотреть файл

@ -103,68 +103,18 @@ public:
typedef nsTArray<LookupResult> LookupResultArray;
class CacheResult {
public:
enum { V2, V4 };
virtual int Ver() const = 0;
virtual bool findCompletion(const Completion& aCompletion) const = 0;
virtual ~CacheResult() {}
template<typename T>
static T* Cast(CacheResult* aThat) {
return ((aThat && T::VER == aThat->Ver()) ?
reinterpret_cast<T*>(aThat) : nullptr);
}
struct CacheResult {
AddComplete entry;
nsCString table;
bool operator==(const CacheResult& aOther) const {
if (entry != aOther.entry) {
return false;
}
return table == aOther.table;
}
};
class CacheResultV2 final : public CacheResult
{
public:
static const int VER;
Completion completion;
uint32_t addChunk;
bool operator==(const CacheResultV2& aOther) const {
return table == aOther.table &&
completion == aOther.completion &&
addChunk == aOther.addChunk;
}
bool findCompletion(const Completion& aCompletion) const override {
return completion == aCompletion;
}
virtual int Ver() const override { return VER; }
};
class CacheResultV4 final : public CacheResult
{
public:
static const int VER;
nsCString prefix;
CachedFullHashResponse response;
bool operator==(const CacheResultV4& aOther) const {
return prefix == aOther.prefix &&
response == aOther.response;
}
bool findCompletion(const Completion& aCompletion) const override {
nsDependentCSubstring completion(
reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
return response.fullHashes.Contains(completion);
}
virtual int Ver() const override { return VER; }
};
typedef nsTArray<UniquePtr<CacheResult>> CacheResultArray;
typedef nsTArray<CacheResult> CacheResultArray;
class LookupCache {
public:
@ -195,31 +145,37 @@ public:
// be moved away when a backup is made.
nsresult UpdateRootDirHandle(nsIFile* aRootStoreDirectory);
// This will Clear() the passed arrays when done.
nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
// Write data stored in lookup cache to disk.
nsresult WriteFile();
// Clear completions retrieved from gethash request.
void ClearCache();
bool IsPrimed() const { return mPrimed; };
#if DEBUG
void DumpCache();
#endif
virtual nsresult Open();
virtual nsresult Init() = 0;
virtual nsresult ClearPrefixes() = 0;
virtual nsresult Has(const Completion& aCompletion,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aHas, uint32_t* aMatchLength,
bool* aConfirmed, bool* aFromCache) = 0;
bool* aFromCache) = 0;
// Clear completions retrieved from gethash request.
virtual void ClearCache() = 0;
virtual void IsHashEntryConfirmed(const Completion& aEntry,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aConfirmed) = 0;
virtual bool IsEmpty() = 0;
virtual void ClearAll();
#if DEBUG
virtual void DumpCache() = 0;
#endif
template<typename T>
static T* Cast(LookupCache* aThat) {
return ((aThat && T::VER == aThat->Ver()) ? reinterpret_cast<T*>(aThat) : nullptr);
@ -241,6 +197,9 @@ protected:
nsCOMPtr<nsIFile> mRootStoreDirectory;
nsCOMPtr<nsIFile> mStoreDirectory;
// Full length hashes obtained in gethash request
CompletionArray mGetHashCache;
// For gtest to inspect private members.
friend class PerProviderDirectoryTestUtils;
};
@ -256,13 +215,15 @@ public:
virtual nsresult Init() override;
virtual nsresult Open() override;
virtual void ClearCache() override;
virtual void ClearAll() override;
virtual nsresult Has(const Completion& aCompletion,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aHas, uint32_t* aMatchLength,
bool* aConfirmed, bool* aFromCache) override;
bool* aFromCache) override;
virtual void IsHashEntryConfirmed(const Completion& aEntry,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aConfirmed) override;
virtual bool IsEmpty() override;
@ -271,12 +232,7 @@ public:
nsresult GetPrefixes(FallibleTArray<uint32_t>& aAddPrefixes);
// This will Clear() the passed arrays when done.
nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
#if DEBUG
virtual void DumpCache() override;
void DumpCompletions();
#endif
@ -302,9 +258,6 @@ private:
// Set of prefixes known to be in the database
RefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
// Full length hashes obtained in gethash request
CompletionArray mGetHashCache;
};
} // namespace safebrowsing

Просмотреть файл

@ -80,12 +80,10 @@ LookupCacheV4::Init()
nsresult
LookupCacheV4::Has(const Completion& aCompletion,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aHas, uint32_t* aMatchLength,
bool* aConfirmed, bool* aFromCache)
bool* aFromCache)
{
*aHas = *aConfirmed = *aFromCache = false;
*aHas = *aFromCache = false;
*aMatchLength = 0;
uint32_t length = 0;
@ -95,8 +93,6 @@ LookupCacheV4::Has(const Completion& aCompletion,
nsresult rv = mVLPrefixSet->Matches(fullhash, &length);
NS_ENSURE_SUCCESS(rv, rv);
MOZ_ASSERT(length == 0 || (length >= PREFIX_SIZE && length <= COMPLETE_SIZE));
*aHas = length >= PREFIX_SIZE;
*aMatchLength = length;
@ -106,71 +102,21 @@ LookupCacheV4::Has(const Completion& aCompletion,
prefix, *aHas, length == COMPLETE_SIZE));
}
// Check if fullhash match any prefix in the local database
if (!(*aHas)) {
return NS_OK;
}
// We always send 4-bytes for completion(Bug 1323953) so the prefix used to
// lookup for cache should be 4-bytes too.
nsDependentCSubstring prefix(reinterpret_cast<const char*>(aCompletion.buf),
PREFIX_SIZE);
// Check if prefix can be found in cache.
CachedFullHashResponse* fullHashResponse = mCache.Get(prefix);
if (!fullHashResponse) {
return NS_OK;
}
*aFromCache = true;
int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
int64_t expiryTime;
FullHashExpiryCache& fullHashes = fullHashResponse->fullHashes;
nsDependentCSubstring completion(
reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
// Check if we can find the fullhash in positive cache
if (fullHashes.Get(completion, &expiryTime)) {
if (nowSec <= expiryTime) {
// Url is NOT safe.
*aConfirmed = true;
LOG(("Found a valid fullhash in the positive cache"));
} else {
// Trigger a gethash request in this case(aConfirmed is false).
LOG(("Found an expired fullhash in the positive cache"));
// Remove fullhash entry from the cache when the negative cache
// is also expired because whether or not the fullhash is cached
// locally, we will need to consult the server next time we
// lookup this hash. We may as well remove it from our cache.
if (fullHashResponse->negativeCacheExpirySec < expiryTime) {
fullHashes.Remove(completion);
if (fullHashes.Count() == 0 &&
fullHashResponse->negativeCacheExpirySec < nowSec) {
mCache.Remove(prefix);
}
}
}
return NS_OK;
}
// Check negative cache.
if (fullHashResponse->negativeCacheExpirySec >= nowSec) {
// Url is safe.
LOG(("Found a valid prefix in the negative cache"));
*aHas = false;
} else {
LOG(("Found an expired prefix in the negative cache"));
if (fullHashes.Count() == 0) {
mCache.Remove(prefix);
}
}
// TODO : Bug 1311935 - Implement v4 caching
return NS_OK;
}
void
LookupCacheV4::IsHashEntryConfirmed(const Completion& aEntry,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aConfirmed)
{
// TODO : Bug 1311935 - Implement v4 caching
*aConfirmed = true;
}
bool
LookupCacheV4::IsEmpty()
{
@ -383,17 +329,6 @@ LookupCacheV4::ApplyUpdate(TableUpdateV4* aTableUpdate,
return NS_OK;
}
nsresult
LookupCacheV4::AddFullHashResponseToCache(const FullHashResponseMap& aResponseMap)
{
for (auto iter = aResponseMap.ConstIter(); !iter.Done(); iter.Next()) {
CachedFullHashResponse* response = mCache.LookupOrAdd(iter.Key());
*response = *(iter.Data());
}
return NS_OK;
}
nsresult
LookupCacheV4::InitCrypto(nsCOMPtr<nsICryptoHash>& aCrypto)
{
@ -604,84 +539,6 @@ LookupCacheV4::LoadMetadata(nsACString& aState, nsACString& aChecksum)
return rv;
}
void
LookupCacheV4::ClearCache()
{
mCache.Clear();
}
// This function remove cache entries whose negative cache time is expired.
// It is possible that a cache entry whose positive cache time is not yet
// expired but still being removed after calling this API. Right now we call
// this on every update.
void
LookupCacheV4::InvalidateExpiredCacheEntry()
{
int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
for (auto iter = mCache.Iter(); !iter.Done(); iter.Next()) {
CachedFullHashResponse* response = iter.Data();
if (response->negativeCacheExpirySec < nowSec) {
iter.Remove();
}
}
}
#if defined(DEBUG)
static
void CStringToHexString(const nsACString& aIn, nsACString& aOut)
{
static const char* const lut = "0123456789ABCDEF";
// 32 bytes is the longest hash
size_t len = COMPLETE_SIZE;
aOut.SetCapacity(2 * len);
for (size_t i = 0; i < aIn.Length(); ++i) {
const char c = static_cast<const char>(aIn[i]);
aOut.Append(lut[(c >> 4) & 0x0F]);
aOut.Append(lut[c & 15]);
}
}
static
nsCString GetFormattedTimeString(int64_t aCurTimeSec)
{
PRExplodedTime pret;
PR_ExplodeTime(aCurTimeSec * PR_USEC_PER_SEC, PR_GMTParameters, &pret);
return nsPrintfCString(
"%04d-%02d-%02d %02d:%02d:%02d UTC",
pret.tm_year, pret.tm_month + 1, pret.tm_mday,
pret.tm_hour, pret.tm_min, pret.tm_sec);
}
void
LookupCacheV4::DumpCache()
{
if (!LOG_ENABLED()) {
return;
}
for (auto iter = mCache.ConstIter(); !iter.Done(); iter.Next()) {
nsAutoCString strPrefix;
CStringToHexString(iter.Key(), strPrefix);
CachedFullHashResponse* response = iter.Data();
LOG(("Caches prefix: %s, Expire time: %s",
strPrefix.get(),
GetFormattedTimeString(response->negativeCacheExpirySec).get()));
FullHashExpiryCache& fullHashes = response->fullHashes;
for (auto iter2 = fullHashes.ConstIter(); !iter2.Done(); iter2.Next()) {
nsAutoCString strFullhash;
CStringToHexString(iter2.Key(), strFullhash);
LOG((" - %s, Expire time: %s", strFullhash.get(),
GetFormattedTimeString(iter2.Data()).get()));
}
}
}
#endif
VLPrefixSet::VLPrefixSet(const PrefixStringMap& aMap)
: mCount(0)
{

Просмотреть файл

@ -25,16 +25,13 @@ public:
virtual nsresult Init() override;
virtual nsresult Has(const Completion& aCompletion,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aHas, uint32_t* aMatchLength,
bool* aConfirmed, bool* aFromCache) override;
bool* aFromCache) override;
virtual void ClearCache() override;
#if DEBUG
virtual void DumpCache() override;
#endif
virtual void IsHashEntryConfirmed(const Completion& aEntry,
const TableFreshnessMap& aTableFreshness,
uint32_t aFreshnessGuarantee,
bool* aConfirmed) override;
virtual bool IsEmpty() override;
@ -48,13 +45,9 @@ public:
PrefixStringMap& aInputMap,
PrefixStringMap& aOutputMap);
nsresult AddFullHashResponseToCache(const FullHashResponseMap& aResponseMap);
nsresult WriteMetadata(TableUpdateV4* aTableUpdate);
nsresult LoadMetadata(nsACString& aState, nsACString& aChecksum);
void InvalidateExpiredCacheEntry();
static const int VER;
protected:
@ -70,8 +63,6 @@ private:
nsresult VerifyChecksum(const nsACString& aChecksum);
RefPtr<VariableLengthPrefixSet> mVLPrefixSet;
FullHashResponseMap mCache;
};
} // namespace safebrowsing

Просмотреть файл

@ -188,7 +188,6 @@ VariableLengthPrefixSet::Matches(const nsACString& aFullHash, uint32_t* aLength)
for (auto iter = mVLPrefixSet.ConstIter(); !iter.Done(); iter.Next()) {
if (BinarySearch(aFullHash, *iter.Data(), iter.Key())) {
*aLength = iter.Key();
MOZ_ASSERT(*aLength > 4);
return NS_OK;
}
}

Просмотреть файл

@ -4,23 +4,6 @@
#include "nsISupports.idl"
interface nsIArray;
/**
* This interface contains feilds in Matches object of FullHashResponse(V4).
* Reference from:
* https://developers.google.com/safe-browsing/v4/update-api#http-post-response_2
*/
[scriptable, uuid(aabeb50e-d9f7-418e-9469-2cd9608958c0)]
interface nsIFullHashMatch : nsISupports
{
readonly attribute ACString tableName;
readonly attribute ACString fullHash;
readonly attribute uint32_t cacheDuration;
};
/**
* This interface is implemented by nsIUrlClassifierHashCompleter clients.
*/
@ -33,33 +16,15 @@ interface nsIUrlClassifierHashCompleterCallback : nsISupports
* nsIUrlClassifierCompleter::complete() call.
*
* @param hash
* The 256-bit hash that was discovered.
* The 128-bit hash that was discovered.
* @param table
* The name of the table that this hash belongs to.
* @param chunkId
* The database chunk that this hash belongs to.
*/
void completionV2(in ACString hash,
in ACString table,
in uint32_t chunkId);
/**
* This will be called when a fullhash response is received and parsed
* no matter if any full hash has been found.
*
* @param partialHash
* The hash that was sent for completion.
* @param table
* The name of the table that this hash belongs to.
* @param negativeCacheDuration
* The negative cache duration in millisecond.
* @param fullHashes
* Array of fullhashes that match the prefix.
*/
void completionV4(in ACString partialHash,
in ACString table,
in uint32_t negativeCacheDuration,
in nsIArray fullHashes);
void completion(in ACString hash,
in ACString table,
in uint32_t chunkId);
/**
* The completion is complete. This method is called once per

Просмотреть файл

@ -6,7 +6,6 @@
#include "nsAutoPtr.h"
#include "nsCOMPtr.h"
#include "nsAppDirectoryServiceDefs.h"
#include "nsArrayUtils.h"
#include "nsCRT.h"
#include "nsICryptoHash.h"
#include "nsICryptoHMAC.h"
@ -123,9 +122,6 @@ LazyLogModule gUrlClassifierDbServiceLog("UrlClassifierDbService");
#define CONFIRM_AGE_PREF "urlclassifier.max-complete-age"
#define CONFIRM_AGE_DEFAULT_SEC (45 * 60)
// 30 minutes as the maximum negative cache duration.
#define MAXIMUM_NEGATIVE_CACHE_DURATION_SEC (30 * 60 * 1000)
// TODO: The following two prefs are to be removed after we
// roll out full v4 hash completion. See Bug 1331534.
#define TAKE_V4_COMPLETION_RESULT_PREF "browser.safebrowsing.temporary.take_v4_completion_result"
@ -842,26 +838,19 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
}
LOG(("nsUrlClassifierDBServiceWorker::CacheCompletions [%p]", this));
if (!mClassifier) {
if (!mClassifier)
return NS_OK;
}
// Ownership is transferred in to us
nsAutoPtr<CacheResultArray> resultsPtr(results);
if (resultsPtr->Length() == 0) {
return NS_OK;
}
if (IsSameAsLastResults(*resultsPtr)) {
if (mLastResults == *resultsPtr) {
LOG(("Skipping completions that have just been cached already."));
return NS_OK;
}
nsAutoPtr<ProtocolParser> pParse;
pParse = resultsPtr->ElementAt(0)->Ver() == CacheResult::V2 ?
static_cast<ProtocolParser*>(new ProtocolParserV2()) :
static_cast<ProtocolParser*>(new ProtocolParserProtobuf());
nsAutoPtr<ProtocolParserV2> pParse(new ProtocolParserV2());
nsTArray<TableUpdate*> updates;
// Only cache results for tables that we have, don't take
// in tables we might accidentally have hit during a completion.
@ -870,28 +859,37 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
nsresult rv = mClassifier->ActiveTables(tables);
NS_ENSURE_SUCCESS(rv, rv);
nsTArray<TableUpdate*> updates;
for (uint32_t i = 0; i < resultsPtr->Length(); i++) {
bool activeTable = false;
CacheResult* result = resultsPtr->ElementAt(i).get();
for (uint32_t table = 0; table < tables.Length(); table++) {
if (tables[table].Equals(result->table)) {
if (tables[table].Equals(resultsPtr->ElementAt(i).table)) {
activeTable = true;
break;
}
}
if (activeTable) {
TableUpdate* tu = pParse->GetTableUpdate(result->table);
TableUpdateV2* tuV2 = TableUpdate::Cast<TableUpdateV2>(
pParse->GetTableUpdate(resultsPtr->ElementAt(i).table));
rv = CacheResultToTableUpdate(result, tu);
// Ignore V4 for now.
if (!tuV2) {
continue;
}
LOG(("CacheCompletion Addchunk %d hash %X", resultsPtr->ElementAt(i).entry.addChunk,
resultsPtr->ElementAt(i).entry.ToUint32()));
rv = tuV2->NewAddComplete(resultsPtr->ElementAt(i).entry.addChunk,
resultsPtr->ElementAt(i).entry.complete);
if (NS_FAILED(rv)) {
// We can bail without leaking here because ForgetTableUpdates
// hasn't been called yet.
return rv;
}
updates.AppendElement(tu);
rv = tuV2->NewAddChunk(resultsPtr->ElementAt(i).entry.addChunk);
if (NS_FAILED(rv)) {
return rv;
}
updates.AppendElement(tuV2);
pParse->ForgetTableUpdates();
} else {
LOG(("Completion received, but table is not active, so not caching."));
@ -899,53 +897,10 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
}
mClassifier->ApplyFullHashes(&updates);
mLastResults = Move(resultsPtr);
mLastResults = *resultsPtr;
return NS_OK;
}
nsresult
nsUrlClassifierDBServiceWorker::CacheResultToTableUpdate(CacheResult* aCacheResult,
TableUpdate* aUpdate)
{
auto tuV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
if (tuV2) {
auto result = CacheResult::Cast<CacheResultV2>(aCacheResult);
MOZ_ASSERT(result);
LOG(("CacheCompletion hash %X, Addchunk %d", result->completion.ToUint32(),
result->addChunk));
nsresult rv = tuV2->NewAddComplete(result->addChunk, result->completion);
if (NS_FAILED(rv)) {
return rv;
}
rv = tuV2->NewAddChunk(result->addChunk);
return rv;
}
auto tuV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
if (tuV4) {
auto result = CacheResult::Cast<CacheResultV4>(aCacheResult);
MOZ_ASSERT(result);
if (LOG_ENABLED()) {
const FullHashExpiryCache& fullHashes = result->response.fullHashes;
for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
Completion completion;
completion.Assign(iter.Key());
LOG(("CacheCompletion(v4) hash %X, CacheExpireTime %" PRId64,
completion.ToUint32(), iter.Data()));
}
}
tuV4->NewFullHashResponse(result->prefix, result->response);
return NS_OK;
}
// tableUpdate object should be either v2 or v4.
return NS_ERROR_FAILURE;
}
nsresult
nsUrlClassifierDBServiceWorker::CacheMisses(PrefixArray *results)
{
@ -1007,39 +962,10 @@ NS_IMETHODIMP
nsUrlClassifierDBServiceWorker::ClearLastResults()
{
MOZ_ASSERT(!NS_IsMainThread(), "Must be on the background thread");
if (mLastResults) {
mLastResults->Clear();
}
mLastResults.Clear();
return NS_OK;
}
bool
nsUrlClassifierDBServiceWorker::IsSameAsLastResults(CacheResultArray& aResult)
{
if (!mLastResults || mLastResults->Length() != aResult.Length()) {
return false;
}
bool equal = true;
for (uint32_t i = 0; i < mLastResults->Length() && equal; i++) {
CacheResult* lhs = mLastResults->ElementAt(i).get();
CacheResult* rhs = aResult[i].get();
if (lhs->Ver() != rhs->Ver()) {
return false;
}
if (lhs->Ver() == CacheResult::V2) {
equal = *(CacheResult::Cast<CacheResultV2>(lhs)) ==
*(CacheResult::Cast<CacheResultV2>(rhs));
} else if (lhs->Ver() == CacheResult::V4) {
equal = *(CacheResult::Cast<CacheResultV4>(lhs)) ==
*(CacheResult::Cast<CacheResultV4>(rhs));
}
}
return equal;
}
// -------------------------------------------------------------------------
// nsUrlClassifierLookupCallback
@ -1068,7 +994,6 @@ private:
~nsUrlClassifierLookupCallback();
nsresult HandleResults();
nsresult ProcessComplete(CacheResult* aCacheResult);
RefPtr<nsUrlClassifierDBService> mDBService;
nsAutoPtr<LookupResultArray> mResults;
@ -1184,88 +1109,30 @@ nsUrlClassifierLookupCallback::CompletionFinished(nsresult status)
}
NS_IMETHODIMP
nsUrlClassifierLookupCallback::CompletionV2(const nsACString& aCompleteHash,
const nsACString& aTableName,
uint32_t aChunkId)
nsUrlClassifierLookupCallback::Completion(const nsACString& completeHash,
const nsACString& tableName,
uint32_t chunkId)
{
LOG(("nsUrlClassifierLookupCallback::Completion [%p, %s, %d]",
this, PromiseFlatCString(aTableName).get(), aChunkId));
this, PromiseFlatCString(tableName).get(), chunkId));
MOZ_ASSERT(!StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
mozilla::safebrowsing::Completion hash;
hash.Assign(completeHash);
auto result = new CacheResultV2;
result->table = aTableName;
result->completion.Assign(aCompleteHash);
result->addChunk = aChunkId;
return ProcessComplete(result);
}
NS_IMETHODIMP
nsUrlClassifierLookupCallback::CompletionV4(const nsACString& aPartialHash,
const nsACString& aTableName,
uint32_t aNegativeCacheDuration,
nsIArray* aFullHashes)
{
LOG(("nsUrlClassifierLookupCallback::CompletionV4 [%p, %s, %d]",
this, PromiseFlatCString(aTableName).get(), aNegativeCacheDuration));
MOZ_ASSERT(StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
if(!aFullHashes) {
return NS_ERROR_INVALID_ARG;
}
if (aNegativeCacheDuration > MAXIMUM_NEGATIVE_CACHE_DURATION_SEC) {
LOG(("Negative cache duration too large, clamping it down to"
"a reasonable value."));
aNegativeCacheDuration = MAXIMUM_NEGATIVE_CACHE_DURATION_SEC;
}
auto result = new CacheResultV4;
int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
result->table = aTableName;
result->prefix = aPartialHash;
result->response.negativeCacheExpirySec = nowSec + aNegativeCacheDuration;
// Fill in positive cache entries.
uint32_t fullHashCount = 0;
nsresult rv = aFullHashes->GetLength(&fullHashCount);
if (NS_FAILED(rv)) {
return rv;
}
for (uint32_t i = 0; i < fullHashCount; i++) {
nsCOMPtr<nsIFullHashMatch> match = do_QueryElementAt(aFullHashes, i);
nsCString fullHash;
match->GetFullHash(fullHash);
uint32_t duration;
match->GetCacheDuration(&duration);
result->response.fullHashes.Put(fullHash, nowSec + duration);
}
return ProcessComplete(result);
}
nsresult
nsUrlClassifierLookupCallback::ProcessComplete(CacheResult* aCacheResult)
{
// Send this completion to the store for caching.
if (!mCacheResults) {
mCacheResults = new CacheResultArray();
if (!mCacheResults) {
if (!mCacheResults)
return NS_ERROR_OUT_OF_MEMORY;
}
}
CacheResult result;
result.entry.addChunk = chunkId;
result.entry.complete = hash;
result.table = tableName;
// OK if this fails, we just won't cache the item.
mCacheResults->AppendElement(aCacheResult);
mCacheResults->AppendElement(result);
// Check if this matched any of our results.
for (uint32_t i = 0; i < mResults->Length(); i++) {
@ -1273,8 +1140,8 @@ nsUrlClassifierLookupCallback::ProcessComplete(CacheResult* aCacheResult)
// Now, see if it verifies a lookup
if (!result.mNoise
&& result.mTableName.Equals(aCacheResult->table)
&& aCacheResult->findCompletion(result.CompleteHash())) {
&& result.CompleteHash() == hash
&& result.mTableName.Equals(tableName)) {
result.mProtocolConfirmed = true;
}
}

Просмотреть файл

@ -218,11 +218,6 @@ private:
uint32_t aCount,
LookupResultArray& results);
nsresult CacheResultToTableUpdate(CacheResult* aCacheResult,
TableUpdate* aUpdate);
bool IsSameAsLastResults(CacheResultArray& aResult);
// Can only be used on the background thread
nsCOMPtr<nsICryptoHash> mCryptoHash;
@ -244,7 +239,7 @@ private:
PrefixArray mMissCache;
// Stores the last results that triggered a table update.
nsAutoPtr<CacheResultArray> mLastResults;
CacheResultArray mLastResults;
nsresult mUpdateStatus;
nsTArray<nsCString> mUpdateTables;

Просмотреть файл

@ -149,20 +149,6 @@ function httpStatusToBucket(httpStatus) {
return statusBucket;
}
function FullHashMatch(table, hash, duration) {
this.tableName = table;
this.fullHash = hash;
this.cacheDuration = duration;
}
FullHashMatch.prototype = {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIFullHashMatch]),
tableName : null,
fullHash : null,
cacheDuration : null,
};
function HashCompleter() {
// The current HashCompleterRequest in flight. Once it is started, it is set
// to null. It may be used by multiple calls to |complete| in succession to
@ -329,8 +315,7 @@ HashCompleterRequest.prototype = {
this._requests.push({
partialHash: aPartialHash,
callback: aCallback,
tableName: aTableName,
response: { matches:[] },
responses: []
});
if (aTableName) {
@ -531,7 +516,7 @@ HashCompleterRequest.prototype = {
httpChannel.requestMethod = "POST";
},
// Parses the response body and eventually adds items to the |response.matches| array
// Parses the response body and eventually adds items to the |responses| array
// for elements of |this._requests|.
handleResponse: function HCR_handleResponse() {
if (this._response == "") {
@ -552,8 +537,6 @@ HashCompleterRequest.prototype = {
handleResponseV4: function HCR_handleResponseV4() {
let callback = {
// onCompleteHashFound will be called for each fullhash found in
// FullHashResponse.
onCompleteHashFound : (aCompleteHash,
aTableNames,
aPerHashCacheDuration) => {
@ -573,16 +556,11 @@ HashCompleterRequest.prototype = {
log("WARNING: Got complete hash which has ambigious threat type.");
}
this.handleItem({
completeHash: aCompleteHash,
tableName: filteredTables[0],
cacheDuration: aPerHashCacheDuration
});
this.handleItem(aCompleteHash, filteredTables[0], 0);
// TODO: Bug 1311935 - Implement v4 cache.
},
// onResponseParsed will be called no matter if there is match in
// FullHashResponse, the callback is mainly used to pass negative cache
// duration and minimum wait duration.
onResponseParsed : (aMinWaitDuration,
aNegCacheDuration) => {
log("V4 fullhash response parsed callback: " +
@ -592,22 +570,15 @@ HashCompleterRequest.prototype = {
let minWaitDuration = aMinWaitDuration;
if (aMinWaitDuration > MIN_WAIT_DURATION_MAX_VALUE) {
log("WARNING: Minimum wait duration too large, clamping it down " +
"to a reasonable value.");
minWaitDuration = MIN_WAIT_DURATION_MAX_VALUE;
} else if (aMinWaitDuration < 0) {
log("WARNING: Minimum wait duration is negative, reset it to 0");
minWaitDuration = 0;
}
this._completer._nextGethashTimeMs[this.gethashUrl] =
Date.now() + minWaitDuration;
// A fullhash request may contain more than one prefix, so the negative
// cache duration should be set for all the prefixes in the request.
this._requests.forEach(request => {
request.response.negCacheDuration = aNegCacheDuration;
});
// TODO: Bug 1311935 - Implement v4 cache.
},
};
@ -644,11 +615,8 @@ HashCompleterRequest.prototype = {
let data = body.substr(newlineIndex + 1, dataLength);
for (let i = 0; i < (dataLength / COMPLETE_LENGTH); i++) {
this.handleItem({
completeHash: data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH),
tableName: list,
chunkId: addChunk
});
this.handleItem(data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH), list,
addChunk);
}
return aStart + newlineIndex + 1 + dataLength;
@ -656,11 +624,15 @@ HashCompleterRequest.prototype = {
// This adds a complete hash to any entry in |this._requests| that matches
// the hash.
handleItem: function HCR_handleItem(aData) {
handleItem: function HCR_handleItem(aData, aTableName, aChunkId) {
for (let i = 0; i < this._requests.length; i++) {
let request = this._requests[i];
if (aData.completeHash.startsWith(request.partialHash)) {
request.response.matches.push(aData);
if (aData.startsWith(request.partialHash)) {
request.responses.push({
completeHash: aData,
tableName: aTableName,
chunkId: aChunkId,
});
}
}
},
@ -670,32 +642,16 @@ HashCompleterRequest.prototype = {
// while notifyFailure only makes a |completionFinished| call with the error
// code.
notifySuccess: function HCR_notifySuccess() {
// V2 completion handler
let completionV2 = (req) => {
req.response.matches.forEach((m) => {
req.callback.completionV2(m.completeHash, m.tableName, m.chunkId);
});
for (let i = 0; i < this._requests.length; i++) {
let request = this._requests[i];
for (let j = 0; j < request.responses.length; j++) {
let response = request.responses[j];
request.callback.completion(response.completeHash, response.tableName,
response.chunkId);
}
req.callback.completionFinished(Cr.NS_OK);
};
// V4 completion handler
let completionV4 = (req) => {
let matches = Cc["@mozilla.org/array;1"].createInstance(Ci.nsIMutableArray);
req.response.matches.forEach(m => {
matches.appendElement(
new FullHashMatch(m.tableName, m.completeHash, m.cacheDuration), false);
});
req.callback.completionV4(req.partialHash, req.tableName,
req.response.negCacheDuration, matches);
req.callback.completionFinished(Cr.NS_OK);
};
let completion = this.isV4 ? completionV4 : completionV2;
this._requests.forEach((req) => { completion(req); });
request.callback.completionFinished(Cr.NS_OK);
}
},
notifyFailure: function HCR_notifyFailure(aStatus) {

Просмотреть файл

@ -459,7 +459,7 @@ nsUrlClassifierUtils::ParseFindFullHashResponseV4(const nsACString& aResponse,
continue; // Ignore un-convertable threat type.
}
auto& hash = m.threat().hash();
auto cacheDuration = m.cache_duration().seconds();
auto cacheDuration = DurationToMs(m.cache_duration());
aCallback->OnCompleteHashFound(nsCString(hash.c_str(), hash.length()),
tableNames, cacheDuration);
@ -468,7 +468,7 @@ nsUrlClassifierUtils::ParseFindFullHashResponseV4(const nsACString& aResponse,
}
auto minWaitDuration = DurationToMs(r.minimum_wait_duration());
auto negCacheDuration = r.negative_cache_duration().seconds();
auto negCacheDuration = DurationToMs(r.negative_cache_duration());
aCallback->OnResponseParsed(minWaitDuration, negCacheDuration);

Просмотреть файл

@ -10,9 +10,6 @@
using namespace mozilla;
using namespace mozilla::safebrowsing;
#define GTEST_SAFEBROWSING_DIR NS_LITERAL_CSTRING("safebrowsing")
#define GTEST_TABLE NS_LITERAL_CSTRING("gtest-malware-proto")
template<typename Function>
void RunTestInNewThread(Function&& aFunction) {
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(mozilla::Forward<Function>(aFunction));
@ -160,22 +157,3 @@ GeneratePrefix(const nsCString& aFragment, uint8_t aLength)
return hash;
}
UniquePtr<LookupCacheV4>
SetupLookupCacheV4(const _PrefixArray& prefixArray)
{
nsCOMPtr<nsIFile> file;
NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
file->AppendNative(GTEST_SAFEBROWSING_DIR);
UniquePtr<LookupCacheV4> cache = MakeUnique<LookupCacheV4>(GTEST_TABLE, EmptyCString(), file);
nsresult rv = cache->Init();
EXPECT_EQ(rv, NS_OK);
PrefixStringMap map;
PrefixArrayToPrefixStringMap(prefixArray, map);
rv = cache->Build(map);
EXPECT_EQ(rv, NS_OK);
return Move(cache);
}

Просмотреть файл

@ -1,5 +1,4 @@
#include "HashStore.h"
#include "LookupCacheV4.h"
#include "nsIFile.h"
#include "nsTArray.h"
#include "gtest/gtest.h"
@ -13,9 +12,6 @@ namespace safebrowsing {
}
}
typedef nsCString _Fragment;
typedef nsTArray<nsCString> _PrefixArray;
template<typename Function>
void RunTestInNewThread(Function&& aFunction);
@ -43,6 +39,3 @@ nsresult PrefixArrayToAddPrefixArrayV2(const nsTArray<nsCString>& prefixArray,
// Generate a hash prefix from string
nsCString GeneratePrefix(const nsCString& aFragment, uint8_t aLength);
// Create a LookupCacheV4 object with sepecified prefix array.
UniquePtr<LookupCacheV4> SetupLookupCacheV4(const _PrefixArray& prefixArray);

Просмотреть файл

@ -1,232 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Common.h"
#define EXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC - 3600)
#define NOTEXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC + 3600)
static void
SetupCacheEntry(LookupCacheV4* aLookupCache,
const nsCString& aCompletion,
bool aNegExpired = false,
bool aPosExpired = false)
{
FullHashResponseMap map;
CachedFullHashResponse* response = map.LookupOrAdd(
GeneratePrefix(aCompletion, PREFIX_SIZE));
response->negativeCacheExpirySec = aNegExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
response->fullHashes.Put(GeneratePrefix(aCompletion, COMPLETE_SIZE),
aPosExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC);
aLookupCache->AddFullHashResponseToCache(map);
}
void
TestCache(const Completion aCompletion,
bool aExpectedHas,
bool aExpectedConfirmed,
bool aExpectedFromCache,
LookupCacheV4* aCache = nullptr)
{
bool has, fromCache, confirmed;
uint32_t matchLength;
TableFreshnessMap dummy;
if (aCache) {
aCache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
} else {
_PrefixArray array = { GeneratePrefix(_Fragment("cache.notexpired.com/"), 10),
GeneratePrefix(_Fragment("cache.expired.com/"), 8),
GeneratePrefix(_Fragment("gound.com/"), 5),
GeneratePrefix(_Fragment("small.com/"), 4)
};
UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
// Create an expired entry and a non-expired entry
SetupCacheEntry(cache.get(), _Fragment("cache.notexpired.com/"));
SetupCacheEntry(cache.get(), _Fragment("cache.expired.com/"), true, true);
cache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
}
EXPECT_EQ(has, aExpectedHas);
EXPECT_EQ(confirmed, aExpectedConfirmed);
EXPECT_EQ(fromCache, aExpectedFromCache);
}
void
TestCache(const _Fragment& aFragment,
bool aExpectedHas,
bool aExpectedConfirmed,
bool aExpectedFromCache,
LookupCacheV4* aCache = nullptr)
{
Completion lookupHash;
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
lookupHash.FromPlaintext(aFragment, cryptoHash);
TestCache(lookupHash, aExpectedHas, aExpectedConfirmed, aExpectedFromCache, aCache);
}
// This testcase check the returned result of |Has| API if fullhash cannot match
// any prefix in the local database.
TEST(CachingV4, NotFound)
{
TestCache(_Fragment("nomatch.com/"), false, false, false);
}
// This testcase check the returned result of |Has| API if fullhash find a match
// in the local database but not in the cache.
TEST(CachingV4, NotInCache)
{
TestCache(_Fragment("gound.com/"), true, false, false);
}
// This testcase check the returned result of |Has| API if fullhash matches
// a cache entry in positive cache.
TEST(CachingV4, InPositiveCacheNotExpired)
{
TestCache(_Fragment("cache.notexpired.com/"), true, true, true);
}
// This testcase check the returned result of |Has| API if fullhash matches
// a cache entry in positive cache but that it is expired.
TEST(CachingV4, InPositiveCacheExpired)
{
TestCache(_Fragment("cache.expired.com/"), true, false, true);
}
// This testcase check the returned result of |Has| API if fullhash matches
// a cache entry in negative cache.
TEST(CachingV4, InNegativeCacheNotExpired)
{
// Create a fullhash whose prefix matches the prefix in negative cache
// but completion doesn't match any fullhash in positive cache.
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
Completion prefix;
prefix.FromPlaintext(_Fragment("cache.notexpired.com/"), cryptoHash);
Completion fullhash;
fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
// Overwrite the 4-byte prefix of `fullhash` so that it conflicts with `prefix`.
// Since "cache.notexpired.com" is added to database in TestCache as a
// 10-byte prefix, we should copy more than 10 bytes to fullhash to ensure
// it can match the prefix in database.
memcpy(fullhash.buf, prefix.buf, 10);
TestCache(fullhash, false, false, true);
}
// This testcase check the returned result of |Has| API if fullhash matches
// a cache entry in negative cache but that entry is expired.
TEST(CachingV4, InNegativeCacheExpired)
{
// Create a fullhash whose prefix is in the cache.
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
Completion prefix;
prefix.FromPlaintext(_Fragment("cache.expired.com/"), cryptoHash);
Completion fullhash;
fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
memcpy(fullhash.buf, prefix.buf, 10);
TestCache(fullhash, true, false, true);
}
#define CACHED_URL _Fragment("cache.com/")
#define NEG_CACHE_EXPIRED_URL _Fragment("cache.negExpired.com/")
#define POS_CACHE_EXPIRED_URL _Fragment("cache.posExpired.com/")
#define BOTH_CACHE_EXPIRED_URL _Fragment("cache.negAndposExpired.com/")
// This testcase create 4 cache entries.
// 1. unexpired entry.
// 2. an entry whose negative cache time is expired but whose positive cache
// is not expired.
// 3. an entry whose positive cache time is expired
// 4. an entry whose negative cache time and positive cache time are expired
// After calling |InvalidateExpiredCacheEntry| API, entries with expired
// negative time should be removed from cache(2 & 4)
TEST(CachingV4, InvalidateExpiredCacheEntry)
{
_PrefixArray array = { GeneratePrefix(CACHED_URL, 10),
GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8),
GeneratePrefix(POS_CACHE_EXPIRED_URL, 5),
GeneratePrefix(BOTH_CACHE_EXPIRED_URL, 4)
};
UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
SetupCacheEntry(cache.get(), CACHED_URL, false, false);
SetupCacheEntry(cache.get(), NEG_CACHE_EXPIRED_URL, true, false);
SetupCacheEntry(cache.get(), POS_CACHE_EXPIRED_URL, false, true);
SetupCacheEntry(cache.get(), BOTH_CACHE_EXPIRED_URL, true, true);
// Before invalidate
TestCache(CACHED_URL, true, true, true, cache.get());
TestCache(NEG_CACHE_EXPIRED_URL, true, true, true, cache.get());
TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
TestCache(BOTH_CACHE_EXPIRED_URL, true, false, true, cache.get());
// Call InvalidateExpiredCacheEntry to remove cache entries whose negative cache
// time is expired
cache->InvalidateExpiredCacheEntry();
// After invalidate, NEG_CACHE_EXPIRED_URL & BOTH_CACHE_EXPIRED_URL should
// not be found in cache.
TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
TestCache(BOTH_CACHE_EXPIRED_URL, true, false, false, cache.get());
// Other entries should remain the same result.
TestCache(CACHED_URL, true, true, true, cache.get());
TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
}
// This testcase check if an cache entry whose negative cache time is expired
// and it doesn't have any postive cache entries in it, it should be removed
// from cache after calling |Has|.
TEST(CachingV4, NegativeCacheExpire)
{
_PrefixArray array = { GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8) };
UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
FullHashResponseMap map;
CachedFullHashResponse* response = map.LookupOrAdd(
GeneratePrefix(NEG_CACHE_EXPIRED_URL, PREFIX_SIZE));
response->negativeCacheExpirySec = EXPIRED_TIME_SEC;
cache->AddFullHashResponseToCache(map);
// The first time we should found it in the cache but the result is not
// confirmed(because it is expired).
TestCache(NEG_CACHE_EXPIRED_URL, true, false, true, cache.get());
// The second time it should not be found in the cache again
TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
}
// This testcase check we only lookup cache with 4-bytes prefix
TEST(CachingV4, Ensure4BytesLookup)
{
_PrefixArray array = { GeneratePrefix(CACHED_URL, 8) };
UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
FullHashResponseMap map;
CachedFullHashResponse* response = map.LookupOrAdd(
GeneratePrefix(CACHED_URL, 5));
response->negativeCacheExpirySec = NOTEXPIRED_TIME_SEC;
response->fullHashes.Put(GeneratePrefix(CACHED_URL, COMPLETE_SIZE),
NOTEXPIRED_TIME_SEC);
cache->AddFullHashResponseToCache(map);
TestCache(CACHED_URL, true, false, false, cache.get());
}

Просмотреть файл

@ -159,7 +159,7 @@ public:
OnResponseParsed(uint32_t aMinWaitDuration,
uint32_t aNegCacheDuration) override
{
VerifyDuration(aMinWaitDuration / 1000, EXPECTED_MIN_WAIT_DURATION);
VerifyDuration(aMinWaitDuration, EXPECTED_MIN_WAIT_DURATION);
VerifyDuration(aNegCacheDuration, EXPECTED_NEG_CACHE_DURATION);
return NS_OK;
@ -191,7 +191,7 @@ private:
void
VerifyDuration(uint32_t aToVerify, const MyDuration& aExpected)
{
ASSERT_TRUE(aToVerify == aExpected.mSecs);
ASSERT_TRUE(aToVerify == (aExpected.mSecs * 1000));
}
~MyParseCallback() {}

Просмотреть файл

@ -2,8 +2,35 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "LookupCacheV4.h"
#include "Common.h"
#define GTEST_SAFEBROWSING_DIR NS_LITERAL_CSTRING("safebrowsing")
#define GTEST_TABLE NS_LITERAL_CSTRING("gtest-malware-proto")
typedef nsCString _Fragment;
typedef nsTArray<nsCString> _PrefixArray;
static UniquePtr<LookupCacheV4>
SetupLookupCacheV4(const _PrefixArray& prefixArray)
{
nsCOMPtr<nsIFile> file;
NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
file->AppendNative(GTEST_SAFEBROWSING_DIR);
UniquePtr<LookupCacheV4> cache = MakeUnique<LookupCacheV4>(GTEST_TABLE, EmptyCString(), file);
nsresult rv = cache->Init();
EXPECT_EQ(rv, NS_OK);
PrefixStringMap map;
PrefixArrayToPrefixStringMap(prefixArray, map);
rv = cache->Build(map);
EXPECT_EQ(rv, NS_OK);
return Move(cache);
}
void
TestHasPrefix(const _Fragment& aFragment, bool aExpectedHas, bool aExpectedComplete)
{
@ -20,18 +47,13 @@ TestHasPrefix(const _Fragment& aFragment, bool aExpectedHas, bool aExpectedCompl
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
lookupHash.FromPlaintext(aFragment, cryptoHash);
bool has, confirmed, fromCache;
bool has, fromCache;
uint32_t matchLength;
// Freshness is not used in V4 so we just put dummy values here.
TableFreshnessMap dummy;
nsresult rv = cache->Has(lookupHash, dummy, 0,
&has, &matchLength, &confirmed, &fromCache);
nsresult rv = cache->Has(lookupHash, &has, &matchLength, &fromCache);
EXPECT_EQ(rv, NS_OK);
EXPECT_EQ(has, aExpectedHas);
EXPECT_EQ(matchLength == COMPLETE_SIZE, aExpectedComplete);
EXPECT_EQ(confirmed, false);
EXPECT_EQ(fromCache, false);
cache->ClearAll();
});

Просмотреть файл

@ -10,7 +10,6 @@ LOCAL_INCLUDES += [
UNIFIED_SOURCES += [
'Common.cpp',
'TestCachingV4.cpp',
'TestChunkSet.cpp',
'TestClassifier.cpp',
'TestFailUpdate.cpp',

Просмотреть файл

@ -355,7 +355,7 @@ function callback(completion) {
}
callback.prototype = {
completionV2: function completion(hash, table, chunkId, trusted) {
completion: function completion(hash, table, chunkId, trusted) {
do_check_true(this._completion.expectCompletion);
if (this._completion.multipleCompletions) {
for (let completion of this._completion.completions) {

Просмотреть файл

@ -90,18 +90,11 @@ add_test(function test_getHashRequestV4() {
let completeFinishedCnt = 0;
gCompleter.complete("0123", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
completionV4(hash, table, duration, fullhashes) {
equal(hash, "0123");
completion(hash, table, chunkId) {
equal(hash, "01234567890123456789012345678901");
equal(table, TEST_TABLE_DATA_V4.tableName);
equal(duration, 120);
equal(fullhashes.length, 1);
let match = fullhashes.QueryInterface(Ci.nsIArray)
.queryElementAt(0, Ci.nsIFullHashMatch);
equal(match.fullHash, "01234567890123456789012345678901");
equal(match.cacheDuration, 8)
do_print("completion: " + match.fullHash + ", " + table);
equal(chunkId, 0);
do_print("completion: " + hash + ", " + table + ", " + chunkId);
},
completionFinished(status) {
@ -114,18 +107,11 @@ add_test(function test_getHashRequestV4() {
});
gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
completionV4(hash, table, duration, fullhashes) {
equal(hash, "1234567");
completion(hash, table, chunkId) {
equal(hash, "12345678901234567890123456789012");
equal(table, TEST_TABLE_DATA_V4.tableName);
equal(duration, 120);
equal(fullhashes.length, 1);
let match = fullhashes.QueryInterface(Ci.nsIArray)
.queryElementAt(0, Ci.nsIFullHashMatch);
equal(match.fullHash, "12345678901234567890123456789012");
equal(match.cacheDuration, 7)
do_print("completion: " + match.fullHash + ", " + table);
equal(chunkId, 0);
do_print("completion: " + hash + ", " + table + ", " + chunkId);
},
completionFinished(status) {
@ -138,11 +124,8 @@ add_test(function test_getHashRequestV4() {
});
gCompleter.complete("1111", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
completionV4(hash, table, duration, fullhashes) {
equal(hash, "1111");
equal(table, TEST_TABLE_DATA_V4.tableName);
equal(duration, 120);
equal(fullhashes.length, 0);
completion(hash, table, chunkId) {
ok(false, "1111 is not the prefix of " + hash);
},
completionFinished(status) {
@ -166,17 +149,11 @@ add_test(function test_minWaitDuration() {
let successComplete = function() {
gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
completionV4(hash, table, duration, fullhashes) {
equal(hash, "1234567");
completion(hash, table, chunkId) {
equal(hash, "12345678901234567890123456789012");
equal(table, TEST_TABLE_DATA_V4.tableName);
equal(fullhashes.length, 1);
let match = fullhashes.QueryInterface(Ci.nsIArray)
.queryElementAt(0, Ci.nsIFullHashMatch);
equal(match.fullHash, "12345678901234567890123456789012");
equal(match.cacheDuration, 7)
do_print("completion: " + match.fullHash + ", " + table);
equal(chunkId, 0);
do_print("completion: " + hash + ", " + table + ", " + chunkId);
},
completionFinished(status) {

Просмотреть файл

@ -35,7 +35,7 @@ complete: function(partialHash, gethashUrl, tableName, cb)
for (var i = 0; i < fragments[partialHash].length; i++) {
var chunkId = fragments[partialHash][i][0];
var hash = fragments[partialHash][i][1];
cb.completionV2(hash, self.tableName, chunkId);
cb.completion(hash, self.tableName, chunkId);
}
}
cb.completionFinished(0);