Bug 673470 - Replace the SQLite SafeBrowsing store with an optimized store. r=dcamp

This commit is contained in:
Gian-Carlo Pascutto 2011-12-06 19:03:05 +01:00
Родитель e026e5fb10
Коммит 00125c4e7a
34 изменённых файлов: 5149 добавлений и 3791 удалений

Просмотреть файл

@ -67,7 +67,6 @@ struct Histograms {
Histograms gHistograms[] = {
SQLITE_TELEMETRY("places.sqlite", PLACES),
SQLITE_TELEMETRY("urlclassifier3.sqlite", URLCLASSIFIER),
SQLITE_TELEMETRY("cookies.sqlite", COOKIES),
SQLITE_TELEMETRY("webappsstore.sqlite", WEBAPPS),
SQLITE_TELEMETRY(NULL, OTHER)

Просмотреть файл

@ -163,9 +163,9 @@
#define NS_TYPEAHEADFIND_CID \
{ 0xe7f70966, 0x9a37, 0x48d7, { 0x8a, 0xeb, 0x35, 0x99, 0x8f, 0x31, 0x09, 0x0e} }
// {15a892dd-cb0f-4a9f-a27f-8291d5e16653}
#define NS_URLCLASSIFIERPREFIXSET_CID \
{ 0x15a892dd, 0xcb0f, 0x4a9f, { 0xa2, 0x7f, 0x82, 0x91, 0xd5, 0xe1, 0x66, 0x53} }
// {b21b0fa1-20d2-422a-b2cc-b289c9325811}
#define NS_URLCLASSIFIERPREFIXSET_CID \
{ 0xb21b0fa1, 0x20d2, 0x422a, { 0xb2, 0xcc, 0xb2, 0x89, 0xc9, 0x32, 0x58, 0x11} }
// {5eb7c3c1-ec1f-4007-87cc-eefb37d68ce6}
#define NS_URLCLASSIFIERDBSERVICE_CID \

Просмотреть файл

@ -225,7 +225,6 @@ HISTOGRAM(CHECK_JAVA_ENABLED, 1, 3000, 10, EXPONENTIAL, "Time spent checking if
SQLITE_TIME_SPENT(OTHER_ ## NAME, DESC) \
SQLITE_TIME_SPENT(PLACES_ ## NAME, DESC) \
SQLITE_TIME_SPENT(COOKIES_ ## NAME, DESC) \
SQLITE_TIME_SPENT(URLCLASSIFIER_ ## NAME, DESC) \
SQLITE_TIME_SPENT(WEBAPPS_ ## NAME, DESC)
SQLITE_TIME_SPENT(OPEN, "Time spent on SQLite open() (ms)")
@ -238,11 +237,9 @@ SQLITE_TIME_PER_FILE(SYNC, "Time spent on SQLite fsync() (ms)")
HISTOGRAM(MOZ_SQLITE_OTHER_READ_B, 1, 32768, 3, LINEAR, "SQLite read() (bytes)")
HISTOGRAM(MOZ_SQLITE_PLACES_READ_B, 1, 32768, 3, LINEAR, "SQLite read() (bytes)")
HISTOGRAM(MOZ_SQLITE_COOKIES_READ_B, 1, 32768, 3, LINEAR, "SQLite read() (bytes)")
HISTOGRAM(MOZ_SQLITE_URLCLASSIFIER_READ_B, 1, 32768, 3, LINEAR, "SQLite read() (bytes)")
HISTOGRAM(MOZ_SQLITE_WEBAPPS_READ_B, 1, 32768, 3, LINEAR, "SQLite read() (bytes)")
HISTOGRAM(MOZ_SQLITE_PLACES_WRITE_B, 1, 32768, 3, LINEAR, "SQLite write (bytes)")
HISTOGRAM(MOZ_SQLITE_COOKIES_WRITE_B, 1, 32768, 3, LINEAR, "SQLite write (bytes)")
HISTOGRAM(MOZ_SQLITE_URLCLASSIFIER_WRITE_B, 1, 32768, 3, LINEAR, "SQLite write (bytes)")
HISTOGRAM(MOZ_SQLITE_WEBAPPS_WRITE_B, 1, 32768, 3, LINEAR, "SQLite write (bytes)")
HISTOGRAM(MOZ_SQLITE_OTHER_WRITE_B, 1, 32768, 3, LINEAR, "SQLite write (bytes)")
HISTOGRAM(MOZ_STORAGE_ASYNC_REQUESTS_MS, 1, 32768, 20, EXPONENTIAL, "mozStorage async requests completion (ms)")
@ -263,10 +260,14 @@ HISTOGRAM(NETWORK_DISK_CACHE_OUTPUT_STREAM_CLOSE_INTERNAL_MAIN_THREAD, 1, 10000,
* Url-Classifier telemetry
*/
#ifdef MOZ_URL_CLASSIFIER
HISTOGRAM(URLCLASSIFIER_LOOKUP_TIME, 1, 500, 10, EXPONENTIAL, "Time spent per dbservice lookup (ms)")
HISTOGRAM(URLCLASSIFIER_CL_CHECK_TIME, 1, 500, 10, EXPONENTIAL, "Time spent per classifier lookup (ms)")
HISTOGRAM(URLCLASSIFIER_CL_UPDATE_TIME, 20, 15000, 15, EXPONENTIAL, "Time spent per classifier update (ms)")
HISTOGRAM(URLCLASSIFIER_PS_FILELOAD_TIME, 1, 1000, 10, EXPONENTIAL, "Time spent loading PrefixSet from file (ms)")
HISTOGRAM(URLCLASSIFIER_PS_FALLOCATE_TIME, 1, 1000, 10, EXPONENTIAL, "Time spent fallocating PrefixSet (ms)")
HISTOGRAM(URLCLASSIFIER_PS_CONSTRUCT_TIME, 1, 5000, 15, EXPONENTIAL, "Time spent constructing PrefixSet from DB (ms)")
HISTOGRAM(URLCLASSIFIER_PS_LOOKUP_TIME, 1, 500, 10, EXPONENTIAL, "Time spent per PrefixSet lookup (ms)")
HISTOGRAM(URLCLASSIFIER_LC_PREFIXES, 1, 1500000, 15, LINEAR, "Size of the prefix cache in entries")
HISTOGRAM(URLCLASSIFIER_LC_COMPLETIONS, 1, 200, 10, EXPONENTIAL, "Size of the completion cache in entries")
HISTOGRAM_BOOLEAN(URLCLASSIFIER_PS_OOM, "Did UrlClassifier run out of memory during PrefixSet construction?")
#endif

Просмотреть файл

@ -0,0 +1,136 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "ChunkSet.h"
namespace mozilla {
namespace safebrowsing {
nsresult
ChunkSet::Serialize(nsACString& aChunkStr)
{
aChunkStr.Truncate();
PRUint32 i = 0;
while (i < mChunks.Length()) {
if (i != 0) {
aChunkStr.Append(',');
}
aChunkStr.AppendInt((PRInt32)mChunks[i]);
PRUint32 first = i;
PRUint32 last = first;
i++;
while (i < mChunks.Length() && (mChunks[i] == mChunks[i - 1] + 1 || mChunks[i] == mChunks[i - 1])) {
last = i++;
}
if (last != first) {
aChunkStr.Append('-');
aChunkStr.AppendInt((PRInt32)mChunks[last]);
}
}
return NS_OK;
}
nsresult
ChunkSet::Set(PRUint32 aChunk)
{
PRUint32 idx = mChunks.BinaryIndexOf(aChunk);
if (idx == nsTArray<uint32>::NoIndex) {
mChunks.InsertElementSorted(aChunk);
}
return NS_OK;
}
nsresult
ChunkSet::Unset(PRUint32 aChunk)
{
mChunks.RemoveElementSorted(aChunk);
return NS_OK;
}
bool
ChunkSet::Has(PRUint32 aChunk) const
{
return mChunks.BinaryIndexOf(aChunk) != nsTArray<uint32>::NoIndex;
}
nsresult
ChunkSet::Merge(const ChunkSet& aOther)
{
const uint32 *dupIter = aOther.mChunks.Elements();
const uint32 *end = aOther.mChunks.Elements() + aOther.mChunks.Length();
for (const uint32 *iter = dupIter; iter != end; iter++) {
nsresult rv = Set(*iter);
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}
nsresult
ChunkSet::Remove(const ChunkSet& aOther)
{
uint32 *addIter = mChunks.Elements();
uint32 *end = mChunks.Elements() + mChunks.Length();
for (uint32 *iter = addIter; iter != end; iter++) {
if (!aOther.Has(*iter)) {
*addIter = *iter;
addIter++;
}
}
mChunks.SetLength(addIter - mChunks.Elements());
return NS_OK;
}
void
ChunkSet::Clear()
{
mChunks.Clear();
}
}
}

Просмотреть файл

@ -0,0 +1,90 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef ChunkSet_h__
#define ChunkSet_h__
#include "Entries.h"
#include "nsString.h"
#include "nsTArray.h"
namespace mozilla {
namespace safebrowsing {
/**
* Store the chunks as an array of uint32.
* XXX: We should optimize this further to compress the
* many consecutive numbers.
*/
class ChunkSet {
public:
ChunkSet() {}
~ChunkSet() {}
nsresult Serialize(nsACString& aStr);
nsresult Set(PRUint32 aChunk);
nsresult Unset(PRUint32 aChunk);
void Clear();
nsresult Merge(const ChunkSet& aOther);
nsresult Remove(const ChunkSet& aOther);
bool Has(PRUint32 chunk) const;
uint32 Length() const { return mChunks.Length(); }
nsresult Write(nsIOutputStream* aOut) {
return WriteTArray(aOut, mChunks);
}
nsresult Read(nsIInputStream* aIn, PRUint32 aNumElements) {
return ReadTArray(aIn, &mChunks, aNumElements);
}
uint32 *Begin() { return mChunks.Elements(); }
uint32 *End() { return mChunks.Elements() + mChunks.Length(); }
private:
nsTArray<uint32> mChunks;
};
}
}
#endif

Просмотреть файл

@ -0,0 +1,653 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "Classifier.h"
#include "nsISimpleEnumerator.h"
#include "nsIRandomGenerator.h"
#include "nsIInputStream.h"
#include "nsISeekableStream.h"
#include "nsIFile.h"
#include "nsAutoPtr.h"
#include "mozilla/Telemetry.h"
#include "prlog.h"
// NSPR_LOG_MODULES=UrlClassifierDbService:5
extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
#if defined(PR_LOGGING)
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
#else
#define LOG(args)
#define LOG_ENABLED() (PR_FALSE)
#endif
namespace mozilla {
namespace safebrowsing {
Classifier::Classifier()
: mFreshTime(45 * 60)
{
}
Classifier::~Classifier()
{
Close();
}
/*
* Generate a unique 32-bit key for this user, which we will
* use to rehash all prefixes. This ensures that different users
* will get hash collisions on different prefixes, which in turn
* avoids that "unlucky" URLs get mysterious slowdowns, and that
* the servers get spammed if any such URL should get slashdotted.
* https://bugzilla.mozilla.org/show_bug.cgi?id=669407#c10
*/
nsresult
Classifier::InitKey()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(NS_LITERAL_CSTRING("classifier.hashkey"));
NS_ENSURE_SUCCESS(rv, rv);
bool exists;
rv = storeFile->Exists(&exists);
NS_ENSURE_SUCCESS(rv, rv);
if (!exists) {
// generate and store key
nsCOMPtr<nsIRandomGenerator> rg =
do_GetService("@mozilla.org/security/random-generator;1");
NS_ENSURE_STATE(rg);
PRUint8 *temp;
nsresult rv = rg->GenerateRandomBytes(sizeof(mHashKey), &temp);
NS_ENSURE_SUCCESS(rv, rv);
memcpy(&mHashKey, temp, sizeof(mHashKey));
NS_Free(temp);
nsCOMPtr<nsIOutputStream> out;
rv = NS_NewSafeLocalFileOutputStream(getter_AddRefs(out), storeFile,
-1, -1, 0);
NS_ENSURE_SUCCESS(rv, rv);
PRUint32 written;
rv = out->Write(reinterpret_cast<char*>(&mHashKey), sizeof(PRUint32), &written);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out);
rv = safeOut->Finish();
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Initialized classifier, key = %X", mHashKey));
} else {
// read key
nsCOMPtr<nsIInputStream> inputStream;
rv = NS_NewLocalFileInputStream(getter_AddRefs(inputStream), storeFile,
-1, -1, 0);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(inputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(rv, rv);
void *buffer = &mHashKey;
rv = NS_ReadInputStreamToBuffer(inputStream,
&buffer,
sizeof(PRUint32));
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Loaded classifier key = %X", mHashKey));
}
return NS_OK;
}
nsresult
Classifier::Open(nsIFile& aCacheDirectory)
{
nsresult rv;
mCryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
NS_ENSURE_SUCCESS(rv, rv);
// Ensure the safebrowsing directory exists.
rv = aCacheDirectory.Clone(getter_AddRefs(mStoreDirectory));
NS_ENSURE_SUCCESS(rv, rv);
rv = mStoreDirectory->AppendNative(NS_LITERAL_CSTRING("safebrowsing"));
NS_ENSURE_SUCCESS(rv, rv);
bool storeExists;
rv = mStoreDirectory->Exists(&storeExists);
NS_ENSURE_SUCCESS(rv, rv);
if (!storeExists) {
rv = mStoreDirectory->Create(nsIFile::DIRECTORY_TYPE, 0755);
NS_ENSURE_SUCCESS(rv, rv);
} else {
bool storeIsDir;
rv = mStoreDirectory->IsDirectory(&storeIsDir);
NS_ENSURE_SUCCESS(rv, rv);
if (!storeIsDir)
return NS_ERROR_FILE_DESTINATION_NOT_DIR;
}
rv = InitKey();
if (NS_FAILED(rv)) {
// Without a usable key the database is useless
Reset();
return NS_ERROR_FAILURE;
}
if (!mTableFreshness.Init()) {
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
Classifier::Close()
{
DropStores();
return NS_OK;
}
nsresult
Classifier::Reset()
{
DropStores();
nsCOMPtr<nsISimpleEnumerator> entries;
nsresult rv = mStoreDirectory->GetDirectoryEntries(getter_AddRefs(entries));
NS_ENSURE_SUCCESS(rv, rv);
bool hasMore;
while (NS_SUCCEEDED(rv = entries->HasMoreElements(&hasMore)) && hasMore) {
nsCOMPtr<nsIFile> file;
rv = entries->GetNext(getter_AddRefs(file));
NS_ENSURE_SUCCESS(rv, rv);
rv = file->Remove(PR_FALSE);
NS_ENSURE_SUCCESS(rv, rv);
}
NS_ENSURE_SUCCESS(rv, rv);
mTableFreshness.Clear();
return NS_OK;
}
void
Classifier::TableRequest(nsACString& aResult)
{
nsTArray<nsCString> tables;
ActiveTables(tables);
for (uint32 i = 0; i < tables.Length(); i++) {
nsAutoPtr<HashStore> store(new HashStore(tables[i], mStoreDirectory));
if (!store)
continue;
nsresult rv = store->Open();
if (NS_FAILED(rv))
continue;
aResult.Append(store->TableName());
aResult.Append(";");
ChunkSet &adds = store->AddChunks();
ChunkSet &subs = store->SubChunks();
if (adds.Length() > 0) {
aResult.Append("a:");
nsCAutoString addList;
adds.Serialize(addList);
aResult.Append(addList);
}
if (subs.Length() > 0) {
if (adds.Length() > 0)
aResult.Append(':');
aResult.Append("s:");
nsCAutoString subList;
subs.Serialize(subList);
aResult.Append(subList);
}
aResult.Append('\n');
}
}
nsresult
Classifier::Check(const nsACString& aSpec, LookupResultArray& aResults)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_CHECK_TIME> timer;
// Get the set of fragments to look up.
nsTArray<nsCString> fragments;
nsresult rv = LookupCache::GetLookupFragments(aSpec, &fragments);
NS_ENSURE_SUCCESS(rv, rv);
nsTArray<nsCString> activeTables;
ActiveTables(activeTables);
nsTArray<LookupCache*> cacheArray;
for (PRUint32 i = 0; i < activeTables.Length(); i++) {
LookupCache *cache = GetLookupCache(activeTables[i]);
if (cache) {
cacheArray.AppendElement(cache);
} else {
return NS_ERROR_FAILURE;
}
}
// Now check each lookup fragment against the entries in the DB.
for (PRUint32 i = 0; i < fragments.Length(); i++) {
Completion lookupHash;
lookupHash.FromPlaintext(fragments[i], mCryptoHash);
// Get list of host keys to look up
Completion hostKey;
rv = LookupCache::GetKey(fragments[i], &hostKey, mCryptoHash);
if (NS_FAILED(rv)) {
// Local host on the network
continue;
}
#if DEBUG && defined(PR_LOGGING)
if (LOG_ENABLED()) {
nsCAutoString checking;
lookupHash.ToString(checking);
LOG(("Checking %s (%X)", checking.get(), lookupHash.ToUint32()));
}
#endif
for (PRUint32 i = 0; i < cacheArray.Length(); i++) {
LookupCache *cache = cacheArray[i];
bool has, complete;
Prefix codedPrefix;
rv = cache->Has(lookupHash, hostKey, mHashKey,
&has, &complete, &codedPrefix);
NS_ENSURE_SUCCESS(rv, rv);
if (has) {
LookupResult *result = aResults.AppendElement();
if (!result)
return NS_ERROR_OUT_OF_MEMORY;
PRInt64 age;
bool found = mTableFreshness.Get(cache->TableName(), &age);
if (!found) {
age = 24 * 60 * 60; // just a large number
} else {
PRInt64 now = (PR_Now() / PR_USEC_PER_SEC);
age = now - age;
}
LOG(("Found a result in %s: %s (Age: %Lds)",
cache->TableName().get(),
complete ? "complete." : "Not complete.",
age));
result->hash.complete = lookupHash;
result->mCodedPrefix = codedPrefix;
result->mComplete = complete;
result->mFresh = (age < mFreshTime);
result->mTableName.Assign(cache->TableName());
}
}
}
return NS_OK;
}
nsresult
Classifier::ApplyUpdates(nsTArray<TableUpdate*>* aUpdates)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_UPDATE_TIME> timer;
#if defined(PR_LOGGING)
PRIntervalTime clockStart = 0;
if (LOG_ENABLED() || true) {
clockStart = PR_IntervalNow();
}
#endif
LOG(("Applying table updates."));
nsresult rv;
for (uint32 i = 0; i < aUpdates->Length(); i++) {
// Previous ApplyTableUpdates() may have consumed this update..
if ((*aUpdates)[i]) {
// Run all updates for one table
rv = ApplyTableUpdates(aUpdates, aUpdates->ElementAt(i)->TableName());
if (NS_FAILED(rv)) {
Reset();
return rv;
}
}
}
aUpdates->Clear();
LOG(("Done applying updates."));
#if defined(PR_LOGGING)
if (LOG_ENABLED() || true) {
PRIntervalTime clockEnd = PR_IntervalNow();
LOG(("update took %dms\n",
PR_IntervalToMilliseconds(clockEnd - clockStart)));
}
#endif
return NS_OK;
}
nsresult
Classifier::MarkSpoiled(nsTArray<nsCString>& aTables)
{
for (uint32 i = 0; i < aTables.Length(); i++) {
LOG(("Spoiling table: %s", aTables[i].get()));
// Spoil this table by marking it as no known freshness
mTableFreshness.Remove(aTables[i]);
}
return NS_OK;
}
void
Classifier::DropStores()
{
for (uint32 i = 0; i < mHashStores.Length(); i++) {
delete mHashStores[i];
}
mHashStores.Clear();
for (uint32 i = 0; i < mLookupCaches.Length(); i++) {
delete mLookupCaches[i];
}
mLookupCaches.Clear();
}
nsresult
Classifier::ScanStoreDir(nsTArray<nsCString>& aTables)
{
nsCOMPtr<nsISimpleEnumerator> entries;
nsresult rv = mStoreDirectory->GetDirectoryEntries(getter_AddRefs(entries));
NS_ENSURE_SUCCESS(rv, rv);
bool hasMore;
while (NS_SUCCEEDED(rv = entries->HasMoreElements(&hasMore)) && hasMore) {
nsCOMPtr<nsIFile> file;
rv = entries->GetNext(getter_AddRefs(file));
NS_ENSURE_SUCCESS(rv, rv);
nsCString leafName;
rv = file->GetNativeLeafName(leafName);
NS_ENSURE_SUCCESS(rv, rv);
nsCString suffix(NS_LITERAL_CSTRING(".sbstore"));
PRInt32 dot = leafName.RFind(suffix, 0);
if (dot != -1) {
leafName.Cut(dot, suffix.Length());
aTables.AppendElement(leafName);
}
}
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
Classifier::ActiveTables(nsTArray<nsCString>& aTables)
{
aTables.Clear();
nsTArray<nsCString> foundTables;
ScanStoreDir(foundTables);
for (uint32 i = 0; i < foundTables.Length(); i++) {
nsAutoPtr<HashStore> store(new HashStore(nsCString(foundTables[i]), mStoreDirectory));
if (!store)
return NS_ERROR_OUT_OF_MEMORY;
nsresult rv = store->Open();
if (NS_FAILED(rv))
continue;
LookupCache *lookupCache = GetLookupCache(store->TableName());
if (!lookupCache) {
continue;
}
const ChunkSet &adds = store->AddChunks();
const ChunkSet &subs = store->SubChunks();
if (adds.Length() == 0 && subs.Length() == 0)
continue;
LOG(("Active table: %s", store->TableName().get()));
aTables.AppendElement(store->TableName());
}
return NS_OK;
}
/*
* This will consume+delete updates from the passed nsTArray.
*/
nsresult
Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable)
{
LOG(("Classifier::ApplyTableUpdates(%s)",
PromiseFlatCString(aTable).get()));
nsAutoPtr<HashStore> store(new HashStore(aTable, mStoreDirectory));
if (!store)
return NS_ERROR_FAILURE;
// take the quick exit if there is no valid update for us
// (common case)
uint32 validupdates = 0;
for (uint32 i = 0; i < aUpdates->Length(); i++) {
TableUpdate *update = aUpdates->ElementAt(i);
if (!update || !update->TableName().Equals(store->TableName()))
continue;
if (update->Empty()) {
aUpdates->ElementAt(i) = nsnull;
delete update;
continue;
}
validupdates++;
}
if (!validupdates) {
return NS_OK;
}
nsresult rv = store->Open();
NS_ENSURE_SUCCESS(rv, rv);
rv = store->BeginUpdate();
NS_ENSURE_SUCCESS(rv, rv);
// Read the part of the store that is (only) in the cache
LookupCache *prefixSet = GetLookupCache(store->TableName());
if (!prefixSet) {
return NS_ERROR_FAILURE;
}
nsTArray<PRUint32> AddPrefixHashes;
rv = prefixSet->GetPrefixes(&AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
rv = store->AugmentAdds(AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
uint32 applied = 0;
bool updateFreshness = false;
for (uint32 i = 0; i < aUpdates->Length(); i++) {
TableUpdate *update = aUpdates->ElementAt(i);
if (!update || !update->TableName().Equals(store->TableName()))
continue;
rv = store->ApplyUpdate(*update);
NS_ENSURE_SUCCESS(rv, rv);
applied++;
LOG(("Applied update to table %s:", PromiseFlatCString(store->TableName()).get()));
LOG((" %d add chunks", update->AddChunks().Length()));
LOG((" %d add prefixes", update->AddPrefixes().Length()));
LOG((" %d add completions", update->AddCompletes().Length()));
LOG((" %d sub chunks", update->SubChunks().Length()));
LOG((" %d sub prefixes", update->SubPrefixes().Length()));
LOG((" %d sub completions", update->SubCompletes().Length()));
LOG((" %d add expirations", update->AddExpirations().Length()));
LOG((" %d sub expirations", update->SubExpirations().Length()));
if (!update->IsLocalUpdate()) {
updateFreshness = true;
LOG(("Remote update, updating freshness"));
}
aUpdates->ElementAt(i) = nsnull;
delete update;
}
LOG(("Applied %d update(s) to %s.", applied, PromiseFlatCString(store->TableName()).get()));
rv = store->Rebuild();
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Table %s now has:", PromiseFlatCString(store->TableName()).get()));
LOG((" %d add chunks", store->AddChunks().Length()));
LOG((" %d add prefixes", store->AddPrefixes().Length()));
LOG((" %d add completions", store->AddCompletes().Length()));
LOG((" %d sub chunks", store->SubChunks().Length()));
LOG((" %d sub prefixes", store->SubPrefixes().Length()));
LOG((" %d sub completions", store->SubCompletes().Length()));
rv = store->WriteFile();
NS_ENSURE_SUCCESS(rv, rv);
// At this point the store is updated and written out to disk, but
// the data is still in memory. Build our quick-lookup table here.
rv = prefixSet->Build(store->AddPrefixes(), store->AddCompletes());
NS_ENSURE_SUCCESS(rv, rv);
#if defined(DEBUG) && defined(PR_LOGGING)
prefixSet->Dump();
#endif
prefixSet->WriteFile();
// This will drop all the temporary storage used during the update.
rv = store->FinishUpdate();
NS_ENSURE_SUCCESS(rv, rv);
if (updateFreshness) {
PRInt64 now = (PR_Now() / PR_USEC_PER_SEC);
LOG(("Successfully updated %s", PromiseFlatCString(store->TableName()).get()));
rv = (mTableFreshness.Put(store->TableName(), now) ? NS_OK : NS_ERROR_FAILURE);
}
return rv;
}
LookupCache *
Classifier::GetLookupCache(const nsACString& aTable)
{
for (uint32 i = 0; i < mLookupCaches.Length(); i++) {
if (mLookupCaches[i]->TableName().Equals(aTable)) {
return mLookupCaches[i];
}
}
LookupCache *cache = new LookupCache(aTable, mStoreDirectory);
nsresult rv = cache->Init();
if (NS_FAILED(rv)) {
return nsnull;
}
rv = cache->Open();
if (NS_FAILED(rv)) {
if (rv == NS_ERROR_FILE_CORRUPTED) {
Reset();
}
return nsnull;
}
mLookupCaches.AppendElement(cache);
return cache;
}
nsresult
Classifier::ReadNoiseEntries(const Prefix& aPrefix,
const nsACString& aTableName,
PRInt32 aCount,
PrefixArray* aNoiseEntries)
{
LookupCache *cache = GetLookupCache(aTableName);
if (!cache) {
return NS_ERROR_FAILURE;
}
nsTArray<PRUint32> prefixes;
nsresult rv = cache->GetPrefixes(&prefixes);
NS_ENSURE_SUCCESS(rv, rv);
PRInt32 idx = prefixes.BinaryIndexOf(aPrefix.ToUint32());
if (idx == nsTArray<PRUint32>::NoIndex) {
NS_WARNING("Could not find prefix in PrefixSet during noise lookup");
return NS_ERROR_FAILURE;
}
idx -= idx % aCount;
for (PRInt32 i = 0; (i < aCount) && ((idx+i) < prefixes.Length()); i++) {
Prefix newPref;
newPref.FromUint32(prefixes[idx+i]);
aNoiseEntries->AppendElement(newPref);
}
return NS_OK;
}
}
}

Просмотреть файл

@ -0,0 +1,128 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef Classifier_h__
#define Classifier_h__
#include "Entries.h"
#include "HashStore.h"
#include "ProtocolParser.h"
#include "LookupCache.h"
#include "nsCOMPtr.h"
#include "nsString.h"
#include "nsIFile.h"
#include "nsICryptoHash.h"
#include "nsDataHashtable.h"
namespace mozilla {
namespace safebrowsing {
/**
* Maintains the stores and LookupCaches for the url classifier.
*/
class Classifier {
public:
Classifier();
~Classifier();
nsresult Open(nsIFile& aCacheDirectory);
nsresult Close();
nsresult Reset();
/**
* Get the list of active tables and their chunks in a format
* suitable for an update request.
*/
void TableRequest(nsACString& aResult);
/*
* Get all tables that we know about.
*/
nsresult ActiveTables(nsTArray<nsCString>& aTables);
/**
* Check a URL against the database.
*/
nsresult Check(const nsACString& aSpec, LookupResultArray& aResults);
/**
* Apply the table updates in the array. Takes ownership of
* the updates in the array and clears it. Wacky!
*/
nsresult ApplyUpdates(nsTArray<TableUpdate*>* aUpdates);
/**
* Failed update. Spoil the entries so we don't block hosts
* unnecessarily
*/
nsresult MarkSpoiled(nsTArray<nsCString>& aTables);
nsresult CacheCompletions(const CacheResultArray& aResults);
PRUint32 GetHashKey(void) { return mHashKey; };
void SetFreshTime(PRUint32 aTime) { mFreshTime = aTime; };
/*
* Get a bunch of extra prefixes to query for completion
* and mask the real entry being requested
*/
nsresult ReadNoiseEntries(const Prefix& aPrefix,
const nsACString& aTableName,
PRInt32 aCount,
PrefixArray* aNoiseEntries);
private:
void DropStores();
nsresult ScanStoreDir(nsTArray<nsCString>& aTables);
nsresult ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable);
LookupCache *GetLookupCache(const nsACString& aTable);
nsresult InitKey();
nsCOMPtr<nsICryptoHash> mCryptoHash;
nsCOMPtr<nsIFile> mStoreDirectory;
nsTArray<HashStore*> mHashStores;
nsTArray<LookupCache*> mLookupCaches;
PRUint32 mHashKey;
// Stores the last time a given table was updated (seconds).
nsDataHashtable<nsCStringHashKey, PRInt64> mTableFreshness;
PRUint32 mFreshTime;
};
}
}
#endif

Просмотреть файл

@ -0,0 +1,335 @@
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
#ifndef SBEntries_h__
#define SBEntries_h__
#include "nsTArray.h"
#include "nsString.h"
#include "nsICryptoHash.h"
#include "nsNetUtil.h"
#include "prlog.h"
extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
#if defined(PR_LOGGING)
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
#else
#define LOG(args)
#define LOG_ENABLED() (PR_FALSE)
#endif
#if DEBUG
#include "plbase64.h"
#endif
namespace mozilla {
namespace safebrowsing {
#define PREFIX_SIZE 4
#define COMPLETE_SIZE 32
template <uint32 S, class Comparator>
struct SafebrowsingHash
{
static const uint32 sHashSize = S;
typedef SafebrowsingHash<S, Comparator> self_type;
uint8 buf[S];
nsresult FromPlaintext(const nsACString& aPlainText, nsICryptoHash* aHash) {
// From the protocol doc:
// Each entry in the chunk is composed
// of the SHA 256 hash of a suffix/prefix expression.
nsresult rv = aHash->Init(nsICryptoHash::SHA256);
NS_ENSURE_SUCCESS(rv, rv);
rv = aHash->Update
(reinterpret_cast<const uint8*>(aPlainText.BeginReading()),
aPlainText.Length());
NS_ENSURE_SUCCESS(rv, rv);
nsCAutoString hashed;
rv = aHash->Finish(PR_FALSE, hashed);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(hashed.Length() >= sHashSize,
"not enough characters in the hash");
memcpy(buf, hashed.BeginReading(), sHashSize);
return NS_OK;
}
void Assign(const nsACString& aStr) {
NS_ASSERTION(aStr.Length() >= sHashSize,
"string must be at least sHashSize characters long");
memcpy(buf, aStr.BeginReading(), sHashSize);
}
int Compare(const self_type& aOther) const {
return Comparator::Compare(buf, aOther.buf);
}
bool operator==(const self_type& aOther) const {
return Comparator::Compare(buf, aOther.buf) == 0;
}
bool operator!=(const self_type& aOther) const {
return Comparator::Compare(buf, aOther.buf) != 0;
}
bool operator<(const self_type& aOther) const {
return Comparator::Compare(buf, aOther.buf) < 0;
}
#ifdef DEBUG
void ToString(nsACString& aStr) const {
uint32 len = ((sHashSize + 2) / 3) * 4;
aStr.SetCapacity(len + 1);
PL_Base64Encode((char*)buf, sHashSize, aStr.BeginWriting());
aStr.BeginWriting()[len] = '\0';
}
#endif
PRUint32 ToUint32() const {
PRUint32 res = 0;
memcpy(&res, buf, NS_MIN<size_t>(4, S));
return res;
}
void FromUint32(PRUint32 aHash) {
memcpy(buf, &aHash, NS_MIN<size_t>(4, S));
}
};
class PrefixComparator {
public:
static int Compare(const PRUint8* a, const PRUint8* b) {
return *((uint32*)a) - *((uint32*)b);
}
};
typedef SafebrowsingHash<PREFIX_SIZE, PrefixComparator> Prefix;
typedef nsTArray<Prefix> PrefixArray;
class CompletionComparator {
public:
static int Compare(const PRUint8* a, const PRUint8* b) {
return memcmp(a, b, COMPLETE_SIZE);
}
};
typedef SafebrowsingHash<COMPLETE_SIZE, CompletionComparator> Completion;
typedef nsTArray<Completion> CompletionArray;
struct AddPrefix {
Prefix prefix;
uint32 addChunk;
AddPrefix() : addChunk(0) {}
uint32 Chunk() const { return addChunk; }
const Prefix &PrefixHash() const { return prefix; }
template<class T>
int Compare(const T& other) const {
int cmp = prefix.Compare(other.PrefixHash());
if (cmp != 0) {
return cmp;
}
return addChunk - other.addChunk;
}
};
struct AddComplete {
union {
Prefix prefix;
Completion complete;
} hash;
uint32 addChunk;
AddComplete() : addChunk(0) {}
uint32 Chunk() const { return addChunk; }
const Prefix &PrefixHash() const { return hash.prefix; }
const Completion &CompleteHash() const { return hash.complete; }
template<class T>
int Compare(const T& other) const {
int cmp = hash.complete.Compare(other.CompleteHash());
if (cmp != 0) {
return cmp;
}
return addChunk - other.addChunk;
}
};
struct SubPrefix {
Prefix prefix;
uint32 addChunk;
uint32 subChunk;
SubPrefix(): addChunk(0), subChunk(0) {}
uint32 Chunk() const { return subChunk; }
uint32 AddChunk() const { return addChunk; }
const Prefix &PrefixHash() const { return prefix; }
template<class T>
int Compare(const T& aOther) const {
int cmp = prefix.Compare(aOther.PrefixHash());
if (cmp != 0)
return cmp;
if (addChunk != aOther.addChunk)
return addChunk - aOther.addChunk;
return subChunk - aOther.subChunk;
}
template<class T>
int CompareAlt(const T& aOther) const {
int cmp = prefix.Compare(aOther.PrefixHash());
if (cmp != 0)
return cmp;
return addChunk - aOther.addChunk;
}
};
struct SubComplete {
union {
Prefix prefix;
Completion complete;
} hash;
uint32 addChunk;
uint32 subChunk;
SubComplete() : addChunk(0), subChunk(0) {}
uint32 Chunk() const { return subChunk; }
uint32 AddChunk() const { return addChunk; }
const Prefix &PrefixHash() const { return hash.prefix; }
const Completion &CompleteHash() const { return hash.complete; }
int Compare(const SubComplete& aOther) const {
int cmp = hash.complete.Compare(aOther.hash.complete);
if (cmp != 0)
return cmp;
if (addChunk != aOther.addChunk)
return addChunk - aOther.addChunk;
return subChunk - aOther.subChunk;
}
};
typedef nsTArray<AddPrefix> AddPrefixArray;
typedef nsTArray<AddComplete> AddCompleteArray;
typedef nsTArray<SubPrefix> SubPrefixArray;
typedef nsTArray<SubComplete> SubCompleteArray;
/**
* Compares chunks by their add chunk, then their prefix.
*/
template<class T>
class EntryCompare {
public:
typedef T elem_type;
static int Compare(const void* e1, const void* e2, void* data) {
const elem_type* a = static_cast<const elem_type*>(e1);
const elem_type* b = static_cast<const elem_type*>(e2);
return a->Compare(*b);
}
};
template<>
class EntryCompare<SubPrefix> {
public:
typedef SubPrefix elem_type;
static int Compare(const void* e1, const void* e2, void* data) {
const elem_type* a = static_cast<const elem_type*>(e1);
const elem_type* b = static_cast<const elem_type*>(e2);
return a->Compare(*b);
}
};
template<>
class EntryCompare<SubComplete> {
public:
typedef SubComplete elem_type;
static int Compare(const void* e1, const void* e2, void* data) {
const elem_type *a = static_cast<const elem_type*>(e1);
const elem_type *b = static_cast<const elem_type*>(e2);
return a->Compare(*b);
}
};
/**
* Sort an array of store entries. nsTArray::Sort uses Equal/LessThan
* to sort, this does a single Compare so it's a bit quicker over the
* large sorts we do.
*/
template<class T>
void
EntrySort(nsTArray<T>& aArray)
{
NS_QuickSort(aArray.Elements(), aArray.Length(), sizeof(T),
EntryCompare<T>::Compare, 0);
}
template<class T>
nsresult
ReadTArray(nsIInputStream* aStream, nsTArray<T>* aArray, PRUint32 aNumElements)
{
if (!aArray->SetLength(aNumElements))
return NS_ERROR_OUT_OF_MEMORY;
void *buffer = aArray->Elements();
nsresult rv = NS_ReadInputStreamToBuffer(aStream, &buffer,
(aNumElements * sizeof(T)));
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
template<class T>
nsresult
WriteTArray(nsIOutputStream* aStream, nsTArray<T>& aArray)
{
PRUint32 written;
return aStream->Write(reinterpret_cast<char*>(aArray.Elements()),
aArray.Length() * sizeof(T),
&written);
}
}
}
#endif

Просмотреть файл

@ -0,0 +1,950 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
// Originally based on Chrome sources:
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "HashStore.h"
#include "nsAutoPtr.h"
#include "nsICryptoHash.h"
#include "nsISeekableStream.h"
#include "nsIStreamConverterService.h"
#include "nsNetUtil.h"
#include "nsCheckSummedOutputStream.h"
#include "prlog.h"
#include "zlib.h"
// Main store for SafeBrowsing protocol data. We store
// known add/sub chunks, prefixe and completions s in memory
// during an update, and serialize to disk.
// We do not store the add prefixes, those are retrieved by
// decompressing the PrefixSet cache whenever we need to apply
// an update.
// Data format:
// uint32 magic
// uint32 version
// uint32 numAddChunks
// uint32 numSubChunks
// uint32 numAddPrefixes
// uint32 numSubPrefixes
// uint32 numAddCompletes
// uint32 numSubCompletes
// 0...numAddChunks uint32 addChunk
// 0...numSubChunks uint32 subChunk
// uint32 compressed-size
// compressed-size bytes zlib inflate data
// 0...numAddPrefixes uint32 addChunk
// uint32 compressed-size
// compressed-size bytes zlib inflate data
// 0...numSubPrefixes uint32 addChunk
// uint32 compressed-size
// compressed-size bytes zlib inflate data
// 0...numSubPrefixes uint32 subChunk
// 0...numSubPrefixes uint32 subPrefix
// 0...numAddCompletes 32-byte Completions
// 0...numSubCompletes 32-byte Completions
// 16-byte MD5 of all preceding data
// NSPR_LOG_MODULES=UrlClassifierDbService:5
extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
#if defined(PR_LOGGING)
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
#else
#define LOG(args)
#define LOG_ENABLED() (PR_FALSE)
#endif
namespace mozilla {
namespace safebrowsing {
const uint32 STORE_MAGIC = 0x1231af3b;
const uint32 CURRENT_VERSION = 1;
void
TableUpdate::NewAddPrefix(PRUint32 aAddChunk, const Prefix& aHash)
{
AddPrefix *add = mAddPrefixes.AppendElement();
add->addChunk = aAddChunk;
add->prefix = aHash;
}
void
TableUpdate::NewSubPrefix(PRUint32 aAddChunk, const Prefix& aHash, PRUint32 aSubChunk)
{
SubPrefix *sub = mSubPrefixes.AppendElement();
sub->addChunk = aAddChunk;
sub->prefix = aHash;
sub->subChunk = aSubChunk;
}
void
TableUpdate::NewAddComplete(PRUint32 aAddChunk, const Completion& aHash)
{
AddComplete *add = mAddCompletes.AppendElement();
add->addChunk = aAddChunk;
add->hash.complete = aHash;
}
void
TableUpdate::NewSubComplete(PRUint32 aAddChunk, const Completion& aHash, PRUint32 aSubChunk)
{
SubComplete *sub = mSubCompletes.AppendElement();
sub->addChunk = aAddChunk;
sub->hash.complete = aHash;
sub->subChunk = aSubChunk;
}
HashStore::HashStore(const nsACString& aTableName, nsIFile* aStoreDir)
: mTableName(aTableName)
, mStoreDirectory(aStoreDir)
, mInUpdate(false)
{
}
HashStore::~HashStore()
{
}
nsresult
HashStore::Reset()
{
LOG(("HashStore resetting"));
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->Remove(PR_FALSE);
NS_ENSURE_SUCCESS(rv, rv);
Clear();
return NS_OK;
}
nsresult
HashStore::CheckChecksum(nsIFile* aStoreFile)
{
// Check for file corruption by
// comparing the stored checksum to actual checksum of data
nsCAutoString hash;
nsCAutoString compareHash;
char *data;
PRUint32 read;
PRInt64 fileSize;
nsresult rv = aStoreFile->GetFileSize(&fileSize);
NS_ENSURE_SUCCESS(rv, rv);
if (fileSize < 0) {
return NS_ERROR_FAILURE;
}
rv = CalculateChecksum(hash, true);
NS_ENSURE_SUCCESS(rv, rv);
compareHash.GetMutableData(&data, hash.Length());
nsCOMPtr<nsISeekableStream> seekIn = do_QueryInterface(mInputStream);
rv = seekIn->Seek(nsISeekableStream::NS_SEEK_SET, fileSize-hash.Length());
NS_ENSURE_SUCCESS(rv, rv);
rv = mInputStream->Read(data, hash.Length(), &read);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(read == hash.Length(), "Could not read hash bytes");
if (!hash.Equals(compareHash)) {
NS_WARNING("Safebrowing file failed checksum.");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
HashStore::Open()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIInputStream> origStream;
rv = NS_NewLocalFileInputStream(getter_AddRefs(origStream), storeFile,
PR_RDONLY);
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
Reset();
return rv;
}
if (rv == NS_ERROR_FILE_NOT_FOUND) {
Clear();
UpdateHeader();
return NS_OK;
}
rv = NS_NewBufferedInputStream(getter_AddRefs(mInputStream), origStream,
BUFFER_SIZE);
NS_ENSURE_SUCCESS(rv, rv);
rv = CheckChecksum(storeFile);
if (NS_FAILED(rv)) {
Reset();
return rv;
}
rv = ReadHeader();
if (NS_FAILED(rv)) {
Reset();
return rv;
}
rv = SanityCheck(storeFile);
if (NS_FAILED(rv)) {
NS_WARNING("Safebrowsing file failed sanity check. probably out of date.");
Reset();
return rv;
}
rv = ReadChunkNumbers();
if (NS_FAILED(rv)) {
Reset();
return rv;
}
return NS_OK;
}
void
HashStore::Clear()
{
mAddChunks.Clear();
mSubChunks.Clear();
mAddExpirations.Clear();
mSubExpirations.Clear();
mAddPrefixes.Clear();
mSubPrefixes.Clear();
mAddCompletes.Clear();
mSubCompletes.Clear();
}
nsresult
HashStore::ReadEntireStore()
{
Clear();
nsresult rv = ReadHeader();
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadChunkNumbers();
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadHashes();
if (NS_FAILED(rv)) {
// we are the only one reading this so it's up to us to detect corruption
Reset();
}
return rv;
}
nsresult
HashStore::ReadHeader()
{
if (!mInputStream) {
Clear();
UpdateHeader();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(rv, rv);
void *buffer = &mHeader;
rv = NS_ReadInputStreamToBuffer(mInputStream,
&buffer,
sizeof(Header));
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::SanityCheck(nsIFile *storeFile)
{
if (mHeader.magic != STORE_MAGIC || mHeader.version != CURRENT_VERSION) {
NS_WARNING("Unexpected header data in the store.");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
HashStore::CalculateChecksum(nsCAutoString& aChecksum, bool aChecksumPresent)
{
aChecksum.Truncate();
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIInputStream> hashStream;
rv = NS_NewLocalFileInputStream(getter_AddRefs(hashStream), storeFile,
PR_RDONLY);
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
Reset();
return rv;
}
PRInt64 fileSize;
rv = storeFile->GetFileSize(&fileSize);
NS_ENSURE_SUCCESS(rv, rv);
if (fileSize < 0) {
return NS_ERROR_FAILURE;
}
nsCOMPtr<nsICryptoHash> hash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
NS_ENSURE_SUCCESS(rv, rv);
// Size of MD5 hash in bytes
const uint32 CHECKSUM_SIZE = 16;
rv = hash->Init(nsICryptoHash::MD5);
NS_ENSURE_SUCCESS(rv, rv);
if (!aChecksumPresent) {
// Hash entire file
rv = hash->UpdateFromStream(hashStream, PR_UINT32_MAX);
} else {
// Hash everything but last checksum bytes
rv = hash->UpdateFromStream(hashStream, fileSize-CHECKSUM_SIZE);
}
NS_ENSURE_SUCCESS(rv, rv);
rv = hash->Finish(PR_FALSE, aChecksum);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
void
HashStore::UpdateHeader()
{
mHeader.magic = STORE_MAGIC;
mHeader.version = CURRENT_VERSION;
mHeader.numAddChunks = mAddChunks.Length();
mHeader.numSubChunks = mSubChunks.Length();
mHeader.numAddPrefixes = mAddPrefixes.Length();
mHeader.numSubPrefixes = mSubPrefixes.Length();
mHeader.numAddCompletes = mAddCompletes.Length();
mHeader.numSubCompletes = mSubCompletes.Length();
}
nsresult
HashStore::ReadChunkNumbers()
{
if (!mInputStream) {
LOG(("Clearing."));
Clear();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET,
sizeof(Header));
rv = mAddChunks.Read(mInputStream, mHeader.numAddChunks);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(mAddChunks.Length() == mHeader.numAddChunks, "Read the right amount of add chunks.");
rv = mSubChunks.Read(mInputStream, mHeader.numSubChunks);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(mSubChunks.Length() == mHeader.numSubChunks, "Read the right amount of sub chunks.");
return NS_OK;
}
nsresult
HashStore::ReadHashes()
{
if (!mInputStream) {
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
uint32 offset = sizeof(Header);
offset += (mHeader.numAddChunks + mHeader.numSubChunks) * sizeof(uint32);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
rv = ReadAddPrefixes();
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadSubPrefixes();
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes);
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::BeginUpdate()
{
mInUpdate = true;
nsresult rv = ReadEntireStore();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
template<class T>
static nsresult
Merge(ChunkSet* aStoreChunks,
nsTArray<T>* aStorePrefixes,
ChunkSet& aUpdateChunks,
nsTArray<T>& aUpdatePrefixes)
{
EntrySort(aUpdatePrefixes);
T* updateIter = aUpdatePrefixes.Elements();
T* updateEnd = aUpdatePrefixes.Elements() + aUpdatePrefixes.Length();
T* storeIter = aStorePrefixes->Elements();
T* storeEnd = aStorePrefixes->Elements() + aStorePrefixes->Length();
// use a separate array so we can keep the iterators valid
// if the nsTArray grows
nsTArray<T> adds;
for (; updateIter != updateEnd; updateIter++) {
// XXX: binary search for insertion point might be faster in common
// case?
while (storeIter < storeEnd && (storeIter->Compare(*updateIter) < 0)) {
// skip forward to matching element (or not...)
storeIter++;
}
// no match, add
if (storeIter == storeEnd
|| storeIter->Compare(*updateIter) != 0) {
if (!adds.AppendElement(*updateIter))
return NS_ERROR_OUT_OF_MEMORY;
}
}
// chunks can be empty, but we should still report we have them
// to make the chunkranges continuous
aStoreChunks->Merge(aUpdateChunks);
aStorePrefixes->AppendElements(adds);
EntrySort(*aStorePrefixes);
return NS_OK;
}
nsresult
HashStore::ApplyUpdate(TableUpdate &update)
{
nsresult rv = mAddExpirations.Merge(update.AddExpirations());
NS_ENSURE_SUCCESS(rv, rv);
rv = mSubExpirations.Merge(update.SubExpirations());
NS_ENSURE_SUCCESS(rv, rv);
rv = Expire();
NS_ENSURE_SUCCESS(rv, rv);
rv = Merge(&mAddChunks, &mAddPrefixes,
update.AddChunks(), update.AddPrefixes());
NS_ENSURE_SUCCESS(rv, rv);
rv = Merge(&mAddChunks, &mAddCompletes,
update.AddChunks(), update.AddCompletes());
NS_ENSURE_SUCCESS(rv, rv);
rv = Merge(&mSubChunks, &mSubPrefixes,
update.SubChunks(), update.SubPrefixes());
NS_ENSURE_SUCCESS(rv, rv);
rv = Merge(&mSubChunks, &mSubCompletes,
update.SubChunks(), update.SubCompletes());
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::Rebuild()
{
NS_ASSERTION(mInUpdate, "Must be in update to rebuild.");
nsresult rv = ProcessSubs();
NS_ENSURE_SUCCESS(rv, rv);
UpdateHeader();
return NS_OK;
}
template<class T>
static void
ExpireEntries(nsTArray<T>* aEntries, ChunkSet& aExpirations)
{
T* addIter = aEntries->Elements();
T* end = aEntries->Elements() + aEntries->Length();
for (T *iter = addIter; iter != end; iter++) {
if (!aExpirations.Has(iter->Chunk())) {
*addIter = *iter;
addIter++;
}
}
aEntries->SetLength(addIter - aEntries->Elements());
}
nsresult
HashStore::Expire()
{
ExpireEntries(&mAddPrefixes, mAddExpirations);
ExpireEntries(&mAddCompletes, mAddExpirations);
ExpireEntries(&mSubPrefixes, mSubExpirations);
ExpireEntries(&mSubCompletes, mSubExpirations);
mAddChunks.Remove(mAddExpirations);
mSubChunks.Remove(mSubExpirations);
mAddExpirations.Clear();
mSubExpirations.Clear();
return NS_OK;
}
template<class T>
nsresult DeflateWriteTArray(nsIOutputStream* aStream, nsTArray<T>& aIn)
{
uLongf insize = aIn.Length() * sizeof(T);
uLongf outsize = compressBound(insize);
nsTArray<char> outBuff;
outBuff.SetLength(outsize);
int zerr = compress(reinterpret_cast<Bytef*>(outBuff.Elements()),
&outsize,
reinterpret_cast<const Bytef*>(aIn.Elements()),
insize);
if (zerr != Z_OK) {
return NS_ERROR_FAILURE;
}
LOG(("DeflateWriteTArray: %d in %d out", insize, outsize));
outBuff.TruncateLength(outsize);
// Length of compressed data stream
PRUint32 dataLen = outBuff.Length();
PRUint32 written;
nsresult rv = aStream->Write(reinterpret_cast<char*>(&dataLen), sizeof(dataLen), &written);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(written == sizeof(dataLen), "Error writing deflate length");
// Store to stream
rv = WriteTArray(aStream, outBuff);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
template<class T>
nsresult InflateReadTArray(nsIInputStream* aStream, nsTArray<T>* aOut,
PRUint32 aExpectedSize)
{
PRUint32 inLen;
PRUint32 read;
nsresult rv = aStream->Read(reinterpret_cast<char*>(&inLen), sizeof(inLen), &read);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(read == sizeof(inLen), "Error reading inflate length");
nsTArray<char> inBuff;
inBuff.SetLength(inLen);
rv = ReadTArray(aStream, &inBuff, inLen);
NS_ENSURE_SUCCESS(rv, rv);
uLongf insize = inLen;
uLongf outsize = aExpectedSize * sizeof(T);
aOut->SetLength(aExpectedSize);
int zerr = uncompress(reinterpret_cast<Bytef*>(aOut->Elements()),
&outsize,
reinterpret_cast<const Bytef*>(inBuff.Elements()),
insize);
if (zerr != Z_OK) {
return NS_ERROR_FAILURE;
}
LOG(("InflateReadTArray: %d in %d out", insize, outsize));
NS_ASSERTION(outsize == aExpectedSize * sizeof(T), "Decompression size mismatch");
return NS_OK;
}
nsresult
HashStore::ReadAddPrefixes()
{
nsTArray<uint32> chunks;
PRUint32 count = mHeader.numAddPrefixes;
nsresult rv = InflateReadTArray(mInputStream, &chunks, count);
NS_ENSURE_SUCCESS(rv, rv);
mAddPrefixes.SetCapacity(count);
for (uint32 i = 0; i < count; i++) {
AddPrefix *add = mAddPrefixes.AppendElement();
add->prefix.FromUint32(0);
add->addChunk = chunks[i];
}
return NS_OK;
}
nsresult
HashStore::ReadSubPrefixes()
{
nsTArray<PRUint32> addchunks;
nsTArray<PRUint32> subchunks;
nsTArray<Prefix> prefixes;
PRUint32 count = mHeader.numSubPrefixes;
nsresult rv = InflateReadTArray(mInputStream, &addchunks, count);
NS_ENSURE_SUCCESS(rv, rv);
rv = InflateReadTArray(mInputStream, &subchunks, count);
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &prefixes, count);
NS_ENSURE_SUCCESS(rv, rv);
mSubPrefixes.SetCapacity(count);
for (uint32 i = 0; i < count; i++) {
SubPrefix *sub = mSubPrefixes.AppendElement();
sub->addChunk = addchunks[i];
sub->prefix = prefixes[i];
sub->subChunk = subchunks[i];
}
return NS_OK;
}
// Split up PrefixArray back into the constituents
nsresult
HashStore::WriteAddPrefixes(nsIOutputStream* aOut)
{
nsTArray<uint32> chunks;
PRUint32 count = mAddPrefixes.Length();
chunks.SetCapacity(count);
for (uint32 i = 0; i < count; i++) {
chunks.AppendElement(mAddPrefixes[i].Chunk());
}
nsresult rv = DeflateWriteTArray(aOut, chunks);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::WriteSubPrefixes(nsIOutputStream* aOut)
{
nsTArray<uint32> addchunks;
nsTArray<uint32> subchunks;
nsTArray<Prefix> prefixes;
PRUint32 count = mSubPrefixes.Length();
addchunks.SetCapacity(count);
subchunks.SetCapacity(count);
prefixes.SetCapacity(count);
for (uint32 i = 0; i < count; i++) {
addchunks.AppendElement(mSubPrefixes[i].AddChunk());
prefixes.AppendElement(mSubPrefixes[i].PrefixHash());
subchunks.AppendElement(mSubPrefixes[i].Chunk());
}
nsresult rv = DeflateWriteTArray(aOut, addchunks);
NS_ENSURE_SUCCESS(rv, rv);
rv = DeflateWriteTArray(aOut, subchunks);
NS_ENSURE_SUCCESS(rv, rv);
// chunk-ordered prefixes are not compressible
rv = WriteTArray(aOut, prefixes);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::WriteFile()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
NS_ENSURE_SUCCESS(rv, rv);
// Need to close the inputstream here *before* rewriting its file.
// Windows will fail with an access violation if we don't.
if (mInputStream) {
rv = mInputStream->Close();
NS_ENSURE_SUCCESS(rv, rv);
}
nsCOMPtr<nsIOutputStream> out;
rv = NS_NewCheckSummedOutputStream(getter_AddRefs(out), storeFile,
PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE);
NS_ENSURE_SUCCESS(rv, rv);
PRUint32 written;
rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written);
NS_ENSURE_SUCCESS(rv, rv);
// Write chunk numbers...
rv = mAddChunks.Write(out);
NS_ENSURE_SUCCESS(rv, rv);
rv = mSubChunks.Write(out);
NS_ENSURE_SUCCESS(rv, rv);
// Write hashes..
rv = WriteAddPrefixes(out);
NS_ENSURE_SUCCESS(rv, rv);
rv = WriteSubPrefixes(out);
NS_ENSURE_SUCCESS(rv, rv);
rv = WriteTArray(out, mAddCompletes);
NS_ENSURE_SUCCESS(rv, rv);
rv = WriteTArray(out, mSubCompletes);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out, &rv);
NS_ENSURE_SUCCESS(rv, rv);
rv = safeOut->Finish();
NS_ENSURE_SUCCESS(rv, rv);
// Reopen the file now that we've rewritten it.
nsCOMPtr<nsIInputStream> origStream;
rv = NS_NewLocalFileInputStream(getter_AddRefs(origStream), storeFile,
PR_RDONLY);
NS_ENSURE_SUCCESS(rv, rv);
rv = NS_NewBufferedInputStream(getter_AddRefs(mInputStream), origStream,
BUFFER_SIZE);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
HashStore::FinishUpdate()
{
// Drop add/sub data, it's only used during updates.
mAddPrefixes.Clear();
mSubPrefixes.Clear();
mAddCompletes.Clear();
mSubCompletes.Clear();
return NS_OK;
}
template <class T>
static void
Erase(nsTArray<T>* array, T* iterStart, T* iterEnd)
{
uint32 start = iterStart - array->Elements();
uint32 count = iterEnd - iterStart;
if (count > 0) {
array->RemoveElementsAt(start, count);
}
}
// Find items matching between |subs| and |adds|, and remove them,
// recording the item from |adds| in |adds_removed|. To minimize
// copies, the inputs are processing in parallel, so |subs| and |adds|
// should be compatibly ordered (either by SBAddPrefixLess or
// SBAddPrefixHashLess).
//
// |predAS| provides add < sub, |predSA| provides sub < add, for the
// tightest compare appropriate (see calls in SBProcessSubs).
template<class TSub, class TAdd>
static void
KnockoutSubs(nsTArray<TSub>* aSubs, nsTArray<TAdd>* aAdds)
{
// Keep a pair of output iterators for writing kept items. Due to
// deletions, these may lag the main iterators. Using erase() on
// individual items would result in O(N^2) copies. Using a list
// would work around that, at double or triple the memory cost.
TAdd* addOut = aAdds->Elements();
TAdd* addIter = aAdds->Elements();
TSub* subOut = aSubs->Elements();
TSub* subIter = aSubs->Elements();
TAdd* addEnd = addIter + aAdds->Length();
TSub* subEnd = subIter + aSubs->Length();
while (addIter != addEnd && subIter != subEnd) {
// additer compare, so it compares on add chunk
int32 cmp = addIter->Compare(*subIter);
if (cmp > 0) {
// If |*sub_iter| < |*add_iter|, retain the sub.
*subOut = *subIter;
++subOut;
++subIter;
} else if (cmp < 0) {
// If |*add_iter| < |*sub_iter|, retain the add.
*addOut = *addIter;
++addOut;
++addIter;
} else {
// Drop equal items
++addIter;
++subIter;
}
}
Erase(aAdds, addOut, addIter);
Erase(aSubs, subOut, subIter);
}
// Remove items in |removes| from |fullHashes|. |fullHashes| and
// |removes| should be ordered by SBAddPrefix component.
template <class T>
static void
RemoveMatchingPrefixes(const SubPrefixArray& aSubs, nsTArray<T>* aFullHashes)
{
// Where to store kept items.
T* out = aFullHashes->Elements();
T* hashIter = out;
T* hashEnd = aFullHashes->Elements() + aFullHashes->Length();
SubPrefix const * removeIter = aSubs.Elements();
SubPrefix const * removeEnd = aSubs.Elements() + aSubs.Length();
while (hashIter != hashEnd && removeIter != removeEnd) {
int32 cmp = removeIter->CompareAlt(*hashIter);
if (cmp > 0) {
// Keep items less than |*removeIter|.
*out = *hashIter;
++out;
++hashIter;
} else if (cmp < 0) {
// No hit for |*removeIter|, bump it forward.
++removeIter;
} else {
// Drop equal items, there may be multiple hits.
do {
++hashIter;
} while (hashIter != hashEnd &&
!(removeIter->CompareAlt(*hashIter) < 0));
++removeIter;
}
}
Erase(aFullHashes, out, hashIter);
}
nsresult
HashStore::ProcessSubs()
{
EntrySort(mAddPrefixes);
EntrySort(mSubPrefixes);
EntrySort(mAddCompletes);
EntrySort(mSubCompletes);
KnockoutSubs(&mSubPrefixes, &mAddPrefixes);
RemoveMatchingPrefixes(mSubPrefixes, &mAddCompletes);
RemoveMatchingPrefixes(mSubPrefixes, &mSubCompletes);
KnockoutSubs(&mSubCompletes, &mAddCompletes);
// Clean up temporary subs used for knocking out completes
ChunkSet dummyChunks;
dummyChunks.Set(0);
ExpireEntries(&mSubPrefixes, dummyChunks);
ExpireEntries(&mSubCompletes, dummyChunks);
mSubChunks.Remove(dummyChunks);
return NS_OK;
}
nsresult
HashStore::AugmentAdds(const nsTArray<PRUint32>& aPrefixes)
{
uint32 cnt = aPrefixes.Length();
if (cnt != mAddPrefixes.Length()) {
LOG(("Amount of prefixes in cache not consistent with store (%d vs %d)",
aPrefixes.Length(), mAddPrefixes.Length()));
return NS_ERROR_FAILURE;
}
for (uint32 i = 0; i < cnt; i++) {
mAddPrefixes[i].prefix.FromUint32(aPrefixes[i]);
}
return NS_OK;
}
}
}

Просмотреть файл

@ -0,0 +1,213 @@
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef HashStore_h__
#define HashStore_h__
#include "Entries.h"
#include "ChunkSet.h"
#include "nsString.h"
#include "nsTArray.h"
#include "nsIFile.h"
#include "nsIFileStreams.h"
#include "nsCOMPtr.h"
namespace mozilla {
namespace safebrowsing {
class TableUpdate {
public:
TableUpdate(const nsACString& aTable)
: mTable(aTable), mLocalUpdate(false) {}
const nsCString& TableName() const { return mTable; }
bool Empty() const {
return mAddChunks.Length() == 0 &&
mSubChunks.Length() == 0 &&
mAddExpirations.Length() == 0 &&
mSubExpirations.Length() == 0 &&
mAddPrefixes.Length() == 0 &&
mSubPrefixes.Length() == 0 &&
mAddCompletes.Length() == 0 &&
mSubCompletes.Length() == 0;
}
void NewAddChunk(PRUint32 aChunk) { mAddChunks.Set(aChunk); }
void NewSubChunk(PRUint32 aChunk) { mSubChunks.Set(aChunk); }
void NewAddExpiration(PRUint32 aChunk) { mAddExpirations.Set(aChunk); }
void NewSubExpiration(PRUint32 aChunk) { mSubExpirations.Set(aChunk); }
void NewAddPrefix(PRUint32 aAddChunk, const Prefix& aPrefix);
void NewSubPrefix(PRUint32 aAddChunk, const Prefix& aPprefix, PRUint32 aSubChunk);
void NewAddComplete(PRUint32 aChunk, const Completion& aCompletion);
void NewSubComplete(PRUint32 aAddChunk, const Completion& aCompletion,
PRUint32 aSubChunk);
void SetLocalUpdate(void) { mLocalUpdate = true; };
bool IsLocalUpdate(void) { return mLocalUpdate; };
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
ChunkSet& AddExpirations() { return mAddExpirations; }
ChunkSet& SubExpirations() { return mSubExpirations; }
AddPrefixArray& AddPrefixes() { return mAddPrefixes; }
SubPrefixArray& SubPrefixes() { return mSubPrefixes; }
AddCompleteArray& AddCompletes() { return mAddCompletes; }
SubCompleteArray& SubCompletes() { return mSubCompletes; }
private:
nsCString mTable;
// Update not from the remote server (no freshness)
bool mLocalUpdate;
ChunkSet mAddChunks;
ChunkSet mSubChunks;
ChunkSet mAddExpirations;
ChunkSet mSubExpirations;
AddPrefixArray mAddPrefixes;
SubPrefixArray mSubPrefixes;
AddCompleteArray mAddCompletes;
SubCompleteArray mSubCompletes;
};
class HashStore {
public:
HashStore(const nsACString& aTableName, nsIFile* aStoreFile);
~HashStore();
const nsCString& TableName() const { return mTableName; };
nsresult Open();
nsresult AugmentAdds(const nsTArray<PRUint32>& aPrefixes);
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
const AddPrefixArray& AddPrefixes() const { return mAddPrefixes; }
const AddCompleteArray& AddCompletes() const { return mAddCompletes; }
const SubPrefixArray& SubPrefixes() const { return mSubPrefixes; }
const SubCompleteArray& SubCompletes() const { return mSubCompletes; }
// =======
// Updates
// =======
// Begin the update process. Reads the store into memory.
nsresult BeginUpdate();
// Imports the data from a TableUpdate.
nsresult ApplyUpdate(TableUpdate &aUpdate);
// Process expired chunks
nsresult Expire();
// Rebuild the store, Incorporating all the applied updates.
nsresult Rebuild();
// Write the current state of the store to disk.
// If you call between ApplyUpdate() and Rebuild(), you'll
// have a mess on your hands.
nsresult WriteFile();
// Drop memory used during the update process.
nsresult FinishUpdate();
// Force the entire store in memory
nsresult ReadEntireStore();
private:
static const int BUFFER_SIZE = 6 * 1024 * 1024;
void Clear();
nsresult Reset();
nsresult ReadHeader();
nsresult SanityCheck(nsIFile* aStoreFile);
nsresult CalculateChecksum(nsCAutoString& aChecksum, bool aChecksumPresent);
nsresult CheckChecksum(nsIFile* aStoreFile);
void UpdateHeader();
nsresult EnsureChunkNumbers();
nsresult ReadChunkNumbers();
nsresult ReadHashes();
nsresult ReadAddPrefixes();
nsresult ReadSubPrefixes();
nsresult WriteAddPrefixes(nsIOutputStream* aOut);
nsresult WriteSubPrefixes(nsIOutputStream* aOut);
nsresult ProcessSubs();
struct Header {
uint32 magic;
uint32 version;
uint32 numAddChunks;
uint32 numSubChunks;
uint32 numAddPrefixes;
uint32 numSubPrefixes;
uint32 numAddCompletes;
uint32 numSubCompletes;
};
Header mHeader;
nsCString mTableName;
nsCOMPtr<nsIFile> mStoreDirectory;
bool mInUpdate;
nsCOMPtr<nsIInputStream> mInputStream;
bool haveChunks;
ChunkSet mAddChunks;
ChunkSet mSubChunks;
ChunkSet mAddExpirations;
ChunkSet mSubExpirations;
AddPrefixArray mAddPrefixes;
AddCompleteArray mAddCompletes;
SubPrefixArray mSubPrefixes;
SubCompleteArray mSubCompletes;
};
}
}
#endif

Просмотреть файл

@ -0,0 +1,776 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "LookupCache.h"
#include "HashStore.h"
#include "nsISeekableStream.h"
#include "mozilla/Telemetry.h"
#include "prlog.h"
#include "prprf.h"
// We act as the main entry point for all the real lookups,
// so note that those are not done to the actual HashStore.
// The latter solely exists to store the data needed to handle
// the updates from the protocol.
// This module has its own store, which stores the Completions,
// mostly caching lookups that have happened over the net.
// The prefixes are cached/checked by looking them up in the
// PrefixSet.
// Data format for the ".cache" files:
// uint32 magic Identify the file type
// uint32 version Version identifier for file format
// uint32 numCompletions Amount of completions stored
// 0...numCompletions 256-bit Completions
// Name of the lookupcomplete cache
#define CACHE_SUFFIX ".cache"
// Name of the persistent PrefixSet storage
#define PREFIXSET_SUFFIX ".pset"
// NSPR_LOG_MODULES=UrlClassifierDbService:5
extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
#if defined(PR_LOGGING)
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
#else
#define LOG(args)
#define LOG_ENABLED() (false)
#endif
namespace mozilla {
namespace safebrowsing {
const uint32 LOOKUPCACHE_MAGIC = 0x1231af3e;
const uint32 CURRENT_VERSION = 1;
LookupCache::LookupCache(const nsACString& aTableName, nsIFile* aStoreDir)
: mPrimed(false)
, mTableName(aTableName)
, mStoreDirectory(aStoreDir)
{
}
nsresult
LookupCache::Init()
{
mPrefixSet = new nsUrlClassifierPrefixSet();
nsresult rv = mPrefixSet->Init(mTableName);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
LookupCache::~LookupCache()
{
}
nsresult
LookupCache::Open()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = NS_NewLocalFileInputStream(getter_AddRefs(mInputStream), storeFile,
PR_RDONLY);
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
Reset();
return rv;
}
if (rv == NS_ERROR_FILE_NOT_FOUND) {
Clear();
UpdateHeader();
return NS_OK;
}
rv = ReadHeader();
NS_ENSURE_SUCCESS(rv, rv);
LOG(("ReadCompletions"));
rv = ReadCompletions();
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Loading PrefixSet"));
rv = LoadPrefixSet();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
LookupCache::Reset()
{
LOG(("LookupCache resetting"));
nsCOMPtr<nsIFile> storeFile;
nsCOMPtr<nsIFile> prefixsetFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = mStoreDirectory->Clone(getter_AddRefs(prefixsetFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = prefixsetFile->AppendNative(mTableName + NS_LITERAL_CSTRING(PREFIXSET_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->Remove(false);
NS_ENSURE_SUCCESS(rv, rv);
rv = prefixsetFile->Remove(false);
NS_ENSURE_SUCCESS(rv, rv);
Clear();
return NS_OK;
}
nsresult
LookupCache::Build(const AddPrefixArray& aAddPrefixes,
const AddCompleteArray& aAddCompletes)
{
mCompletions.Clear();
mCompletions.SetCapacity(aAddCompletes.Length());
for (uint32 i = 0; i < aAddCompletes.Length(); i++) {
mCompletions.AppendElement(aAddCompletes[i].CompleteHash());
}
mCompletions.Sort();
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_LC_COMPLETIONS,
static_cast<PRUint32>(mCompletions.Length()));
nsresult rv = ConstructPrefixSet(aAddPrefixes);
NS_ENSURE_SUCCESS(rv, rv);
mPrimed = true;
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_LC_PREFIXES,
static_cast<PRUint32>(aAddPrefixes.Length()));
return NS_OK;
}
#if defined(DEBUG) && defined(PR_LOGGING)
void
LookupCache::Dump()
{
if (!LOG_ENABLED())
return;
for (uint32 i = 0; i < mCompletions.Length(); i++) {
nsCAutoString str;
mCompletions[i].ToString(str);
LOG(("Completion: %s", str.get()));
}
}
#endif
nsresult
LookupCache::Has(const Completion& aCompletion,
const Completion& aHostkey,
const PRUint32 aHashKey,
bool* aHas, bool* aComplete,
Prefix* aOrigPrefix)
{
*aHas = *aComplete = false;
// check completion store first
if (mCompletions.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex) {
LOG(("Complete in %s", mTableName.get()));
*aComplete = true;
*aHas = true;
return NS_OK;
}
PRUint32 prefix = aCompletion.ToUint32();
PRUint32 hostkey = aHostkey.ToUint32();
PRUint32 codedkey;
nsresult rv = KeyedHash(prefix, hostkey, aHashKey, &codedkey);
NS_ENSURE_SUCCESS(rv, rv);
Prefix codedPrefix;
codedPrefix.FromUint32(codedkey);
*aOrigPrefix = codedPrefix;
bool ready = true;
bool found;
rv = mPrefixSet->Probe(codedkey, &ready, &found);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Probe in %s: %X, ready: %d found %d", mTableName.get(), prefix, ready, found));
if (found) {
*aHas = true;
}
return NS_OK;
}
nsresult
LookupCache::WriteFile()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIOutputStream> out;
rv = NS_NewSafeLocalFileOutputStream(getter_AddRefs(out), storeFile,
PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE);
NS_ENSURE_SUCCESS(rv, rv);
UpdateHeader();
LOG(("Writing %d completions", mHeader.numCompletions));
PRUint32 written;
rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written);
NS_ENSURE_SUCCESS(rv, rv);
rv = WriteTArray(out, mCompletions);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out);
rv = safeOut->Finish();
NS_ENSURE_SUCCESS(rv, rv);
rv = EnsureSizeConsistent();
NS_ENSURE_SUCCESS(rv, rv);
// Reopen the file now that we've rewritten it.
rv = NS_NewLocalFileInputStream(getter_AddRefs(mInputStream), storeFile,
PR_RDONLY);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIFile> psFile;
rv = mStoreDirectory->Clone(getter_AddRefs(psFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = psFile->AppendNative(mTableName + NS_LITERAL_CSTRING(PREFIXSET_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
rv = mPrefixSet->StoreToFile(psFile);
NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "failed to store the prefixset");
return NS_OK;
}
void
LookupCache::Clear()
{
mCompletions.Clear();
mPrefixSet->SetPrefixes(nsnull, 0);
mPrimed = false;
}
void
LookupCache::UpdateHeader()
{
mHeader.magic = LOOKUPCACHE_MAGIC;
mHeader.version = CURRENT_VERSION;
mHeader.numCompletions = mCompletions.Length();
}
nsresult
LookupCache::EnsureSizeConsistent()
{
nsCOMPtr<nsIFile> storeFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(CACHE_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
PRInt64 fileSize;
rv = storeFile->GetFileSize(&fileSize);
NS_ENSURE_SUCCESS(rv, rv);
if (fileSize < 0) {
return NS_ERROR_FAILURE;
}
PRInt64 expectedSize = sizeof(mHeader)
+ mHeader.numCompletions*sizeof(Completion);
if (expectedSize != fileSize) {
NS_WARNING("File length does not match. Probably corrupted.");
Reset();
return NS_ERROR_FILE_CORRUPTED;
}
return NS_OK;
}
nsresult
LookupCache::ReadHeader()
{
if (!mInputStream) {
Clear();
UpdateHeader();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(rv, rv);
void *buffer = &mHeader;
rv = NS_ReadInputStreamToBuffer(mInputStream,
&buffer,
sizeof(Header));
NS_ENSURE_SUCCESS(rv, rv);
if (mHeader.magic != LOOKUPCACHE_MAGIC || mHeader.version != CURRENT_VERSION) {
NS_WARNING("Unexpected header data in the store.");
Reset();
return NS_ERROR_FILE_CORRUPTED;
}
LOG(("%d completions present", mHeader.numCompletions));
rv = EnsureSizeConsistent();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
LookupCache::ReadCompletions()
{
if (!mHeader.numCompletions) {
mCompletions.Clear();
return NS_OK;
}
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, sizeof(Header));
NS_ENSURE_SUCCESS(rv, rv);
rv = ReadTArray(mInputStream, &mCompletions, mHeader.numCompletions);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Read %d completions", mCompletions.Length()));
return NS_OK;
}
/* static */ bool
LookupCache::IsCanonicalizedIP(const nsACString& aHost)
{
// The canonicalization process will have left IP addresses in dotted
// decimal with no surprises.
PRUint32 i1, i2, i3, i4;
char c;
if (PR_sscanf(PromiseFlatCString(aHost).get(), "%u.%u.%u.%u%c",
&i1, &i2, &i3, &i4, &c) == 4) {
return (i1 <= 0xFF && i2 <= 0xFF && i3 <= 0xFF && i4 <= 0xFF);
}
return false;
}
/* static */ nsresult
LookupCache::GetKey(const nsACString& aSpec,
Completion* aHash,
nsCOMPtr<nsICryptoHash>& aCryptoHash)
{
nsACString::const_iterator begin, end, iter;
aSpec.BeginReading(begin);
aSpec.EndReading(end);
iter = begin;
if (!FindCharInReadable('/', iter, end)) {
return NS_OK;
}
const nsCSubstring& host = Substring(begin, iter);
if (IsCanonicalizedIP(host)) {
nsCAutoString key;
key.Assign(host);
key.Append("/");
return aHash->FromPlaintext(key, aCryptoHash);
}
nsTArray<nsCString> hostComponents;
ParseString(PromiseFlatCString(host), '.', hostComponents);
if (hostComponents.Length() < 2)
return NS_ERROR_FAILURE;
PRInt32 last = PRInt32(hostComponents.Length()) - 1;
nsCAutoString lookupHost;
if (hostComponents.Length() > 2) {
lookupHost.Append(hostComponents[last - 2]);
lookupHost.Append(".");
}
lookupHost.Append(hostComponents[last - 1]);
lookupHost.Append(".");
lookupHost.Append(hostComponents[last]);
lookupHost.Append("/");
return aHash->FromPlaintext(lookupHost, aCryptoHash);
}
/* static */ nsresult
LookupCache::GetLookupFragments(const nsACString& aSpec,
nsTArray<nsCString>* aFragments)
{
aFragments->Clear();
nsACString::const_iterator begin, end, iter;
aSpec.BeginReading(begin);
aSpec.EndReading(end);
iter = begin;
if (!FindCharInReadable('/', iter, end)) {
return NS_OK;
}
const nsCSubstring& host = Substring(begin, iter++);
nsCAutoString path;
path.Assign(Substring(iter, end));
/**
* From the protocol doc:
* For the hostname, the client will try at most 5 different strings. They
* are:
* a) The exact hostname of the url
* b) The 4 hostnames formed by starting with the last 5 components and
* successivly removing the leading component. The top-level component
* can be skipped. This is not done if the hostname is a numerical IP.
*/
nsTArray<nsCString> hosts;
hosts.AppendElement(host);
if (!IsCanonicalizedIP(host)) {
host.BeginReading(begin);
host.EndReading(end);
int numHostComponents = 0;
while (RFindInReadable(NS_LITERAL_CSTRING("."), begin, end) &&
numHostComponents < MAX_HOST_COMPONENTS) {
// don't bother checking toplevel domains
if (++numHostComponents >= 2) {
host.EndReading(iter);
hosts.AppendElement(Substring(end, iter));
}
end = begin;
host.BeginReading(begin);
}
}
/**
* From the protocol doc:
* For the path, the client will also try at most 6 different strings.
* They are:
* a) the exact path of the url, including query parameters
* b) the exact path of the url, without query parameters
* c) the 4 paths formed by starting at the root (/) and
* successively appending path components, including a trailing
* slash. This behavior should only extend up to the next-to-last
* path component, that is, a trailing slash should never be
* appended that was not present in the original url.
*/
nsTArray<nsCString> paths;
nsCAutoString pathToAdd;
path.BeginReading(begin);
path.EndReading(end);
iter = begin;
if (FindCharInReadable('?', iter, end)) {
pathToAdd = Substring(begin, iter);
paths.AppendElement(pathToAdd);
end = iter;
}
int numPathComponents = 1;
iter = begin;
while (FindCharInReadable('/', iter, end) &&
numPathComponents < MAX_PATH_COMPONENTS) {
iter++;
pathToAdd.Assign(Substring(begin, iter));
paths.AppendElement(pathToAdd);
numPathComponents++;
}
// If we haven't already done so, add the full path
if (!pathToAdd.Equals(path)) {
paths.AppendElement(path);
}
// Check an empty path (for whole-domain blacklist entries)
paths.AppendElement(EmptyCString());
for (PRUint32 hostIndex = 0; hostIndex < hosts.Length(); hostIndex++) {
for (PRUint32 pathIndex = 0; pathIndex < paths.Length(); pathIndex++) {
nsCString key;
key.Assign(hosts[hostIndex]);
key.Append('/');
key.Append(paths[pathIndex]);
LOG(("Chking %s", key.get()));
aFragments->AppendElement(key);
}
}
return NS_OK;
}
/* static */ nsresult
LookupCache::GetHostKeys(const nsACString& aSpec,
nsTArray<nsCString>* aHostKeys)
{
nsACString::const_iterator begin, end, iter;
aSpec.BeginReading(begin);
aSpec.EndReading(end);
iter = begin;
if (!FindCharInReadable('/', iter, end)) {
return NS_OK;
}
const nsCSubstring& host = Substring(begin, iter);
if (IsCanonicalizedIP(host)) {
nsCString *key = aHostKeys->AppendElement();
if (!key)
return NS_ERROR_OUT_OF_MEMORY;
key->Assign(host);
key->Append("/");
return NS_OK;
}
nsTArray<nsCString> hostComponents;
ParseString(PromiseFlatCString(host), '.', hostComponents);
if (hostComponents.Length() < 2) {
// no host or toplevel host, this won't match anything in the db
return NS_OK;
}
// First check with two domain components
PRInt32 last = PRInt32(hostComponents.Length()) - 1;
nsCString *lookupHost = aHostKeys->AppendElement();
if (!lookupHost)
return NS_ERROR_OUT_OF_MEMORY;
lookupHost->Assign(hostComponents[last - 1]);
lookupHost->Append(".");
lookupHost->Append(hostComponents[last]);
lookupHost->Append("/");
// Now check with three domain components
if (hostComponents.Length() > 2) {
nsCString *lookupHost2 = aHostKeys->AppendElement();
if (!lookupHost2)
return NS_ERROR_OUT_OF_MEMORY;
lookupHost2->Assign(hostComponents[last - 2]);
lookupHost2->Append(".");
lookupHost2->Append(*lookupHost);
}
return NS_OK;
}
/* We have both a prefix and a domain. Drop the domain, but
hash the domain, the prefix and a random value together,
ensuring any collisions happens at a different points for
different users.
*/
/* static */ nsresult LookupCache::KeyedHash(PRUint32 aPref, PRUint32 aDomain,
PRUint32 aKey, PRUint32* aOut)
{
/* This is a reimplementation of MurmurHash3 32-bit
based on the public domain C++ sources.
http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
for nblocks = 2
*/
PRUint32 c1 = 0xCC9E2D51;
PRUint32 c2 = 0x1B873593;
PRUint32 c3 = 0xE6546B64;
PRUint32 c4 = 0x85EBCA6B;
PRUint32 c5 = 0xC2B2AE35;
PRUint32 h1 = aPref; // seed
PRUint32 k1;
PRUint32 karr[2];
karr[0] = aDomain;
karr[1] = aKey;
for (PRUint32 i = 0; i < 2; i++) {
k1 = karr[i];
k1 *= c1;
k1 = (k1 << 15) | (k1 >> (32-15));
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >> (32-13));
h1 *= 5;
h1 += c3;
}
h1 ^= 2; // len
// fmix
h1 ^= h1 >> 16;
h1 *= c4;
h1 ^= h1 >> 13;
h1 *= c5;
h1 ^= h1 >> 16;
*aOut = h1;
return NS_OK;
}
bool LookupCache::IsPrimed()
{
return mPrimed;
}
nsresult
LookupCache::ConstructPrefixSet(const AddPrefixArray& aAddPrefixes)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_CONSTRUCT_TIME> timer;
nsTArray<PRUint32> array;
array.SetCapacity(aAddPrefixes.Length());
for (uint32 i = 0; i < aAddPrefixes.Length(); i++) {
array.AppendElement(aAddPrefixes[i].PrefixHash().ToUint32());
}
// clear old tree
if (array.IsEmpty()) {
// DB is empty, but put a sentinel to show that we looked
array.AppendElement(0);
}
// PrefixSet requires sorted order
array.Sort();
// construct new one, replace old entries
nsresult rv = mPrefixSet->SetPrefixes(array.Elements(), array.Length());
if (NS_FAILED(rv)) {
goto error_bailout;
}
#ifdef DEBUG
PRUint32 size;
size = mPrefixSet->SizeOfIncludingThis(moz_malloc_size_of);
LOG(("SB tree done, size = %d bytes\n", size));
#endif
mPrimed = true;
return NS_OK;
error_bailout:
// load an empty prefixset so the browser can work
nsAutoTArray<PRUint32, 1> sentinel;
sentinel.Clear();
sentinel.AppendElement(0);
mPrefixSet->SetPrefixes(sentinel.Elements(), sentinel.Length());
if (rv == NS_ERROR_OUT_OF_MEMORY) {
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_PS_OOM, 1);
}
return rv;
}
nsresult
LookupCache::LoadPrefixSet()
{
nsCOMPtr<nsIFile> psFile;
nsresult rv = mStoreDirectory->Clone(getter_AddRefs(psFile));
NS_ENSURE_SUCCESS(rv, rv);
rv = psFile->AppendNative(mTableName + NS_LITERAL_CSTRING(PREFIXSET_SUFFIX));
NS_ENSURE_SUCCESS(rv, rv);
bool exists;
rv = psFile->Exists(&exists);
NS_ENSURE_SUCCESS(rv, rv);
if (exists) {
LOG(("stored PrefixSet exists, loading from disk"));
rv = mPrefixSet->LoadFromFile(psFile);
}
if (!exists || NS_FAILED(rv)) {
LOG(("no (usable) stored PrefixSet found"));
} else {
mPrimed = true;
}
#ifdef DEBUG
if (mPrimed) {
PRUint32 size = mPrefixSet->SizeOfIncludingThis(moz_malloc_size_of);
LOG(("SB tree done, size = %d bytes\n", size));
}
#endif
return NS_OK;
}
nsresult
LookupCache::GetPrefixes(nsTArray<PRUint32>* aAddPrefixes)
{
if (!mPrimed) {
// This can happen if its a new table, so no error.
LOG(("GetPrefixes from empty LookupCache"));
return NS_OK;
}
PRUint32 cnt;
PRUint32 *arr;
nsresult rv = mPrefixSet->GetPrefixes(&cnt, &arr);
NS_ENSURE_SUCCESS(rv, rv);
if (!aAddPrefixes->AppendElements(arr, cnt))
return NS_ERROR_FAILURE;
nsMemory::Free(arr);
return NS_OK;
}
}
}

Просмотреть файл

@ -0,0 +1,186 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef LookupCache_h__
#define LookupCache_h__
#include "Entries.h"
#include "nsString.h"
#include "nsTArray.h"
#include "nsAutoPtr.h"
#include "nsCOMPtr.h"
#include "nsIFile.h"
#include "nsUrlClassifierPrefixSet.h"
#include "prlog.h"
namespace mozilla {
namespace safebrowsing {
#define MAX_HOST_COMPONENTS 5
#define MAX_PATH_COMPONENTS 4
class LookupResult {
public:
LookupResult() : mComplete(false), mNoise(false), mFresh(false), mProtocolConfirmed(false) {}
// The fragment that matched in the LookupCache
union {
Prefix prefix;
Completion complete;
} hash;
const Prefix &PrefixHash() { return hash.prefix; }
const Completion &CompleteHash() { return hash.complete; }
bool Confirmed() const { return (mComplete && mFresh) || mProtocolConfirmed; }
bool Complete() const { return mComplete; }
// True if we have a complete match for this hash in the table.
bool mComplete;
// True if this is a noise entry, i.e. an extra entry
// that is inserted to mask the true URL we are requesting
bool mNoise;
// Value of actual key looked up in the prefixset (coded with client key)
Prefix mCodedPrefix;
// True if we've updated this table recently-enough.
bool mFresh;
bool mProtocolConfirmed;
nsCString mTableName;
};
typedef nsTArray<LookupResult> LookupResultArray;
struct CacheResult {
AddComplete entry;
nsCString table;
};
typedef nsTArray<CacheResult> CacheResultArray;
class LookupCache {
public:
// Check for a canonicalized IP address.
static bool IsCanonicalizedIP(const nsACString& aHost);
// take a lookup string (www.hostname.com/path/to/resource.html) and
// expand it into the set of fragments that should be searched for in an
// entry
static nsresult GetLookupFragments(const nsACString& aSpec,
nsTArray<nsCString>* aFragments);
// Similar to GetKey(), but if the domain contains three or more components,
// two keys will be returned:
// hostname.com/foo/bar -> [hostname.com]
// mail.hostname.com/foo/bar -> [hostname.com, mail.hostname.com]
// www.mail.hostname.com/foo/bar -> [hostname.com, mail.hostname.com]
static nsresult GetHostKeys(const nsACString& aSpec,
nsTArray<nsCString>* aHostKeys);
// Get the database key for a given URI. This is the top three
// domain components if they exist, otherwise the top two.
// hostname.com/foo/bar -> hostname.com
// mail.hostname.com/foo/bar -> mail.hostname.com
// www.mail.hostname.com/foo/bar -> mail.hostname.com
static nsresult GetKey(const nsACString& aSpec, Completion* aHash,
nsCOMPtr<nsICryptoHash>& aCryptoHash);
/* We have both a prefix and a domain. Drop the domain, but
hash the domain, the prefix and a random value together,
ensuring any collisions happens at a different points for
different users.
*/
static nsresult KeyedHash(PRUint32 aPref, PRUint32 aDomain,
PRUint32 aKey, PRUint32* aOut);
LookupCache(const nsACString& aTableName, nsIFile* aStoreFile);
~LookupCache();
const nsCString &TableName() const { return mTableName; }
nsresult Init();
nsresult Open();
nsresult Build(const AddPrefixArray& aAddPrefixes,
const AddCompleteArray& aAddCompletes);
nsresult GetPrefixes(nsTArray<PRUint32>* aAddPrefixes);
#if DEBUG && defined(PR_LOGGING)
void Dump();
#endif
nsresult WriteFile();
nsresult Has(const Completion& aCompletion,
const Completion& aHostkey,
PRUint32 aHashKey,
bool* aHas, bool* aComplete,
Prefix* aOrigPrefix);
bool IsPrimed();
private:
void Clear();
nsresult Reset();
void UpdateHeader();
nsresult ReadHeader();
nsresult EnsureSizeConsistent();
nsresult ReadCompletions();
// Construct a Prefix Set with known prefixes
nsresult LoadPrefixSet();
nsresult ConstructPrefixSet(const AddPrefixArray& aAddPrefixes);
struct Header {
uint32 magic;
uint32 version;
uint32 numCompletions;
};
Header mHeader;
bool mPrimed;
nsCString mTableName;
nsCOMPtr<nsIFile> mStoreDirectory;
nsCOMPtr<nsIInputStream> mInputStream;
CompletionArray mCompletions;
// Set of prefixes known to be in the database
nsRefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
};
}
}
#endif

Просмотреть файл

@ -59,11 +59,17 @@ XPIDLSRCS = \
$(NULL)
CPPSRCS = \
ChunkSet.cpp \
Classifier.cpp \
HashStore.cpp \
ProtocolParser.cpp \
LookupCache.cpp \
nsUrlClassifierDBService.cpp \
nsUrlClassifierStreamUpdater.cpp \
nsUrlClassifierUtils.cpp \
nsUrlClassifierPrefixSet.cpp \
nsUrlClassifierProxies.cpp \
nsCheckSummedOutputStream.cpp \
$(NULL)
LOCAL_INCLUDES = \

Просмотреть файл

@ -0,0 +1,777 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
#include "ProtocolParser.h"
#include "LookupCache.h"
#include "nsIKeyModule.h"
#include "nsNetCID.h"
#include "prlog.h"
#include "prnetdb.h"
#include "prprf.h"
#include "nsUrlClassifierUtils.h"
// NSPR_LOG_MODULES=UrlClassifierDbService:5
extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
#if defined(PR_LOGGING)
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
#else
#define LOG(args)
#define LOG_ENABLED() (PR_FALSE)
#endif
namespace mozilla {
namespace safebrowsing {
// Updates will fail if fed chunks larger than this
const uint32 MAX_CHUNK_SIZE = (1024 * 1024);
const uint32 DOMAIN_SIZE = 4;
// Parse one stringified range of chunks of the form "n" or "n-m" from a
// comma-separated list of chunks. Upon return, 'begin' will point to the
// next range of chunks in the list of chunks.
static bool
ParseChunkRange(nsACString::const_iterator& aBegin,
const nsACString::const_iterator& aEnd,
PRUint32* aFirst, PRUint32* aLast)
{
nsACString::const_iterator iter = aBegin;
FindCharInReadable(',', iter, aEnd);
nsCAutoString element(Substring(aBegin, iter));
aBegin = iter;
if (aBegin != aEnd)
aBegin++;
PRUint32 numRead = PR_sscanf(element.get(), "%u-%u", aFirst, aLast);
if (numRead == 2) {
if (*aFirst > *aLast) {
PRUint32 tmp = *aFirst;
*aFirst = *aLast;
*aLast = tmp;
}
return true;
}
if (numRead == 1) {
*aLast = *aFirst;
return true;
}
return false;
}
ProtocolParser::ProtocolParser(PRUint32 aHashKey)
: mState(PROTOCOL_STATE_CONTROL)
, mHashKey(aHashKey)
, mUpdateStatus(NS_OK)
, mUpdateWait(0)
, mResetRequested(false)
, mRekeyRequested(false)
{
}
ProtocolParser::~ProtocolParser()
{
CleanupUpdates();
}
nsresult
ProtocolParser::Init(nsICryptoHash* aHasher)
{
mCryptoHash = aHasher;
return NS_OK;
}
/**
* Initialize HMAC for the stream.
*
* If serverMAC is empty, the update stream will need to provide a
* server MAC.
*/
nsresult
ProtocolParser::InitHMAC(const nsACString& aClientKey,
const nsACString& aServerMAC)
{
mServerMAC = aServerMAC;
nsresult rv;
nsCOMPtr<nsIKeyObjectFactory> keyObjectFactory(
do_GetService("@mozilla.org/security/keyobjectfactory;1", &rv));
if (NS_FAILED(rv)) {
NS_WARNING("Failed to get nsIKeyObjectFactory service");
mUpdateStatus = rv;
return mUpdateStatus;
}
nsCOMPtr<nsIKeyObject> keyObject;
rv = keyObjectFactory->KeyFromString(nsIKeyObject::HMAC, aClientKey,
getter_AddRefs(keyObject));
if (NS_FAILED(rv)) {
NS_WARNING("Failed to create key object, maybe not FIPS compliant?");
mUpdateStatus = rv;
return mUpdateStatus;
}
mHMAC = do_CreateInstance(NS_CRYPTO_HMAC_CONTRACTID, &rv);
if (NS_FAILED(rv)) {
NS_WARNING("Failed to create nsICryptoHMAC instance");
mUpdateStatus = rv;
return mUpdateStatus;
}
rv = mHMAC->Init(nsICryptoHMAC::SHA1, keyObject);
if (NS_FAILED(rv)) {
NS_WARNING("Failed to initialize nsICryptoHMAC instance");
mUpdateStatus = rv;
return mUpdateStatus;
}
return NS_OK;
}
nsresult
ProtocolParser::FinishHMAC()
{
if (NS_FAILED(mUpdateStatus)) {
return mUpdateStatus;
}
if (mRekeyRequested) {
mUpdateStatus = NS_ERROR_FAILURE;
return mUpdateStatus;
}
if (!mHMAC) {
return NS_OK;
}
nsCAutoString clientMAC;
mHMAC->Finish(PR_TRUE, clientMAC);
if (clientMAC != mServerMAC) {
NS_WARNING("Invalid update MAC!");
LOG(("Invalid update MAC: expected %s, got %s",
clientMAC.get(), mServerMAC.get()));
mUpdateStatus = NS_ERROR_FAILURE;
}
return mUpdateStatus;
}
void
ProtocolParser::SetCurrentTable(const nsACString& aTable)
{
mTableUpdate = GetTableUpdate(aTable);
}
nsresult
ProtocolParser::AppendStream(const nsACString& aData)
{
if (NS_FAILED(mUpdateStatus))
return mUpdateStatus;
nsresult rv;
// Digest the data if we have a server MAC.
if (mHMAC && !mServerMAC.IsEmpty()) {
rv = mHMAC->Update(reinterpret_cast<const PRUint8*>(aData.BeginReading()),
aData.Length());
if (NS_FAILED(rv)) {
mUpdateStatus = rv;
return rv;
}
}
mPending.Append(aData);
bool done = false;
while (!done) {
if (mState == PROTOCOL_STATE_CONTROL) {
rv = ProcessControl(&done);
} else if (mState == PROTOCOL_STATE_CHUNK) {
rv = ProcessChunk(&done);
} else {
NS_ERROR("Unexpected protocol state");
rv = NS_ERROR_FAILURE;
}
if (NS_FAILED(rv)) {
mUpdateStatus = rv;
return rv;
}
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessControl(bool* aDone)
{
nsresult rv;
nsCAutoString line;
*aDone = true;
while (NextLine(line)) {
//LOG(("Processing %s\n", line.get()));
if (line.EqualsLiteral("e:pleaserekey")) {
mRekeyRequested = true;
return NS_OK;
} else if (mHMAC && mServerMAC.IsEmpty()) {
rv = ProcessMAC(line);
NS_ENSURE_SUCCESS(rv, rv);
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("i:"))) {
SetCurrentTable(Substring(line, 2));
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("n:"))) {
if (PR_sscanf(line.get(), "n:%d", &mUpdateWait) != 1) {
LOG(("Error parsing n: '%s' (%d)", line.get(), mUpdateWait));
mUpdateWait = 0;
}
} else if (line.EqualsLiteral("r:pleasereset")) {
mResetRequested = true;
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("u:"))) {
rv = ProcessForward(line);
NS_ENSURE_SUCCESS(rv, rv);
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("a:")) ||
StringBeginsWith(line, NS_LITERAL_CSTRING("s:"))) {
rv = ProcessChunkControl(line);
NS_ENSURE_SUCCESS(rv, rv);
*aDone = false;
return NS_OK;
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("ad:")) ||
StringBeginsWith(line, NS_LITERAL_CSTRING("sd:"))) {
rv = ProcessExpirations(line);
NS_ENSURE_SUCCESS(rv, rv);
}
}
*aDone = true;
return NS_OK;
}
nsresult
ProtocolParser::ProcessMAC(const nsCString& aLine)
{
nsresult rv;
LOG(("line: %s", aLine.get()));
if (StringBeginsWith(aLine, NS_LITERAL_CSTRING("m:"))) {
mServerMAC = Substring(aLine, 2);
nsUrlClassifierUtils::UnUrlsafeBase64(mServerMAC);
// The remainder of the pending update wasn't digested, digest it now.
rv = mHMAC->Update(reinterpret_cast<const PRUint8*>(mPending.BeginReading()),
mPending.Length());
return rv;
}
LOG(("No MAC specified!"));
return NS_ERROR_FAILURE;
}
nsresult
ProtocolParser::ProcessExpirations(const nsCString& aLine)
{
if (!mTableUpdate) {
NS_WARNING("Got an expiration without a table.");
return NS_ERROR_FAILURE;
}
const nsCSubstring &list = Substring(aLine, 3);
nsACString::const_iterator begin, end;
list.BeginReading(begin);
list.EndReading(end);
while (begin != end) {
PRUint32 first, last;
if (ParseChunkRange(begin, end, &first, &last)) {
for (PRUint32 num = first; num <= last; num++) {
if (aLine[0] == 'a')
mTableUpdate->NewAddExpiration(num);
else
mTableUpdate->NewSubExpiration(num);
}
} else {
return NS_ERROR_FAILURE;
}
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessChunkControl(const nsCString& aLine)
{
if (!mTableUpdate) {
NS_WARNING("Got a chunk before getting a table.");
return NS_ERROR_FAILURE;
}
mState = PROTOCOL_STATE_CHUNK;
char command;
mChunkState.Clear();
if (PR_sscanf(aLine.get(),
"%c:%d:%d:%d",
&command,
&mChunkState.num, &mChunkState.hashSize, &mChunkState.length)
!= 4)
{
return NS_ERROR_FAILURE;
}
if (mChunkState.length > MAX_CHUNK_SIZE) {
return NS_ERROR_FAILURE;
}
if (!(mChunkState.hashSize == PREFIX_SIZE || mChunkState.hashSize == COMPLETE_SIZE)) {
NS_WARNING("Invalid hash size specified in update.");
return NS_ERROR_FAILURE;
}
mChunkState.type = (command == 'a') ? CHUNK_ADD : CHUNK_SUB;
if (mChunkState.type == CHUNK_ADD) {
mTableUpdate->NewAddChunk(mChunkState.num);
} else {
mTableUpdate->NewSubChunk(mChunkState.num);
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessForward(const nsCString& aLine)
{
const nsCSubstring &forward = Substring(aLine, 2);
if (mHMAC) {
// We're expecting MACs alongside any url forwards.
nsCSubstring::const_iterator begin, end, sepBegin, sepEnd;
forward.BeginReading(begin);
sepBegin = begin;
forward.EndReading(end);
sepEnd = end;
if (!RFindInReadable(NS_LITERAL_CSTRING(","), sepBegin, sepEnd)) {
NS_WARNING("No MAC specified for a redirect in a request that expects a MAC");
return NS_ERROR_FAILURE;
}
nsCString serverMAC(Substring(sepEnd, end));
nsUrlClassifierUtils::UnUrlsafeBase64(serverMAC);
return AddForward(Substring(begin, sepBegin), serverMAC);
}
return AddForward(forward, mServerMAC);
}
nsresult
ProtocolParser::AddForward(const nsACString& aUrl, const nsACString& aMac)
{
if (!mTableUpdate) {
NS_WARNING("Forward without a table name.");
return NS_ERROR_FAILURE;
}
ForwardedUpdate *forward = mForwards.AppendElement();
forward->table = mTableUpdate->TableName();
forward->url.Assign(aUrl);
forward->mac.Assign(aMac);
return NS_OK;
}
nsresult
ProtocolParser::ProcessChunk(bool* aDone)
{
if (!mTableUpdate) {
NS_WARNING("Processing chunk without an active table.");
return NS_ERROR_FAILURE;
}
NS_ASSERTION(mChunkState.num != 0, "Must have a chunk number.");
if (mPending.Length() < mChunkState.length) {
*aDone = true;
return NS_OK;
}
// Pull the chunk out of the pending stream data.
nsCAutoString chunk;
chunk.Assign(Substring(mPending, 0, mChunkState.length));
mPending = Substring(mPending, mChunkState.length);
*aDone = false;
mState = PROTOCOL_STATE_CONTROL;
//LOG(("Handling a %d-byte chunk", chunk.Length()));
if (StringEndsWith(mTableUpdate->TableName(), NS_LITERAL_CSTRING("-shavar"))) {
return ProcessShaChunk(chunk);
} else {
return ProcessPlaintextChunk(chunk);
}
}
/**
* Process a plaintext chunk (currently only used in unit tests).
*/
nsresult
ProtocolParser::ProcessPlaintextChunk(const nsACString& aChunk)
{
if (!mTableUpdate) {
NS_WARNING("Chunk received with no table.");
return NS_ERROR_FAILURE;
}
nsresult rv;
nsTArray<nsCString> lines;
ParseString(PromiseFlatCString(aChunk), '\n', lines);
// non-hashed tables need to be hashed
for (uint32 i = 0; i < lines.Length(); i++) {
nsCString& line = lines[i];
if (mChunkState.type == CHUNK_ADD) {
if (mChunkState.hashSize == COMPLETE_SIZE) {
Completion hash;
hash.FromPlaintext(line, mCryptoHash);
mTableUpdate->NewAddComplete(mChunkState.num, hash);
} else {
NS_ASSERTION(mChunkState.hashSize == 4, "Only 32- or 4-byte hashes can be used for add chunks.");
Completion hash;
Completion domHash;
Prefix newHash;
rv = LookupCache::GetKey(line, &domHash, mCryptoHash);
NS_ENSURE_SUCCESS(rv, rv);
hash.FromPlaintext(line, mCryptoHash);
PRUint32 codedHash;
rv = LookupCache::KeyedHash(hash.ToUint32(), domHash.ToUint32(), mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
newHash.FromUint32(codedHash);
mTableUpdate->NewAddPrefix(mChunkState.num, newHash);
}
} else {
nsCString::const_iterator begin, iter, end;
line.BeginReading(begin);
line.EndReading(end);
iter = begin;
uint32 addChunk;
if (!FindCharInReadable(':', iter, end) ||
PR_sscanf(lines[i].get(), "%d:", &addChunk) != 1) {
NS_WARNING("Received sub chunk without associated add chunk.");
return NS_ERROR_FAILURE;
}
iter++;
if (mChunkState.hashSize == COMPLETE_SIZE) {
Completion hash;
hash.FromPlaintext(Substring(iter, end), mCryptoHash);
mTableUpdate->NewSubComplete(addChunk, hash, mChunkState.num);
} else {
NS_ASSERTION(mChunkState.hashSize == 4, "Only 32- or 4-byte hashes can be used for add chunks.");
Prefix hash;
Completion domHash;
Prefix newHash;
rv = LookupCache::GetKey(Substring(iter, end), &domHash, mCryptoHash);
NS_ENSURE_SUCCESS(rv, rv);
hash.FromPlaintext(Substring(iter, end), mCryptoHash);
PRUint32 codedHash;
rv = LookupCache::KeyedHash(hash.ToUint32(), domHash.ToUint32(), mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
newHash.FromUint32(codedHash);
mTableUpdate->NewSubPrefix(addChunk, newHash, mChunkState.num);
// Needed to knock out completes
// Fake chunk nr, will cause it to be removed next update
mTableUpdate->NewSubPrefix(addChunk, hash, 0);
mTableUpdate->NewSubChunk(0);
}
}
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessShaChunk(const nsACString& aChunk)
{
PRUint32 start = 0;
while (start < aChunk.Length()) {
// First four bytes are the domain key.
Prefix domain;
domain.Assign(Substring(aChunk, start, DOMAIN_SIZE));
start += DOMAIN_SIZE;
// Then a count of entries.
uint8 numEntries = static_cast<uint8>(aChunk[start]);
start++;
nsresult rv;
if (mChunkState.type == CHUNK_ADD && mChunkState.hashSize == PREFIX_SIZE) {
rv = ProcessHostAdd(domain, numEntries, aChunk, &start);
} else if (mChunkState.type == CHUNK_ADD && mChunkState.hashSize == COMPLETE_SIZE) {
rv = ProcessHostAddComplete(numEntries, aChunk, &start);
} else if (mChunkState.type == CHUNK_SUB && mChunkState.hashSize == PREFIX_SIZE) {
rv = ProcessHostSub(domain, numEntries, aChunk, &start);
} else if (mChunkState.type == CHUNK_SUB && mChunkState.hashSize == COMPLETE_SIZE) {
rv = ProcessHostSubComplete(numEntries, aChunk, &start);
} else {
NS_WARNING("Unexpected chunk type/hash size!");
LOG(("Got an unexpected chunk type/hash size: %s:%d",
mChunkState.type == CHUNK_ADD ? "add" : "sub",
mChunkState.hashSize));
return NS_ERROR_FAILURE;
}
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessHostAdd(const Prefix& aDomain, PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32* aStart)
{
NS_ASSERTION(mChunkState.hashSize == PREFIX_SIZE,
"ProcessHostAdd should only be called for prefix hashes.");
PRUint32 codedHash;
PRUint32 domHash = aDomain.ToUint32();
if (aNumEntries == 0) {
nsresult rv = LookupCache::KeyedHash(domHash, domHash, mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
Prefix newHash;
newHash.FromUint32(codedHash);
mTableUpdate->NewAddPrefix(mChunkState.num, newHash);
return NS_OK;
}
if (*aStart + (PREFIX_SIZE * aNumEntries) > aChunk.Length()) {
NS_WARNING("Chunk is not long enough to contain the expected entries.");
return NS_ERROR_FAILURE;
}
for (uint8 i = 0; i < aNumEntries; i++) {
Prefix hash;
hash.Assign(Substring(aChunk, *aStart, PREFIX_SIZE));
nsresult rv = LookupCache::KeyedHash(domHash, hash.ToUint32(), mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
Prefix newHash;
newHash.FromUint32(codedHash);
mTableUpdate->NewAddPrefix(mChunkState.num, newHash);
*aStart += PREFIX_SIZE;
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessHostSub(const Prefix& aDomain, PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32 *aStart)
{
NS_ASSERTION(mChunkState.hashSize == PREFIX_SIZE,
"ProcessHostSub should only be called for prefix hashes.");
PRUint32 codedHash;
PRUint32 domHash = aDomain.ToUint32();
if (aNumEntries == 0) {
if ((*aStart) + 4 > aChunk.Length()) {
NS_WARNING("Received a zero-entry sub chunk without an associated add.");
return NS_ERROR_FAILURE;
}
const nsCSubstring& addChunkStr = Substring(aChunk, *aStart, 4);
*aStart += 4;
uint32 addChunk;
memcpy(&addChunk, addChunkStr.BeginReading(), 4);
addChunk = PR_ntohl(addChunk);
nsresult rv = LookupCache::KeyedHash(domHash, domHash, mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
Prefix newHash;
newHash.FromUint32(codedHash);
mTableUpdate->NewSubPrefix(addChunk, newHash, mChunkState.num);
// Needed to knock out completes
// Fake chunk nr, will cause it to be removed next update
mTableUpdate->NewSubPrefix(addChunk, aDomain, 0);
mTableUpdate->NewSubChunk(0);
return NS_OK;
}
if (*aStart + ((PREFIX_SIZE + 4) * aNumEntries) > aChunk.Length()) {
NS_WARNING("Chunk is not long enough to contain the expected entries.");
return NS_ERROR_FAILURE;
}
for (uint8 i = 0; i < aNumEntries; i++) {
const nsCSubstring& addChunkStr = Substring(aChunk, *aStart, 4);
*aStart += 4;
uint32 addChunk;
memcpy(&addChunk, addChunkStr.BeginReading(), 4);
addChunk = PR_ntohl(addChunk);
Prefix prefix;
prefix.Assign(Substring(aChunk, *aStart, PREFIX_SIZE));
*aStart += PREFIX_SIZE;
nsresult rv = LookupCache::KeyedHash(prefix.ToUint32(), domHash, mHashKey, &codedHash);
NS_ENSURE_SUCCESS(rv, rv);
Prefix newHash;
newHash.FromUint32(codedHash);
mTableUpdate->NewSubPrefix(addChunk, newHash, mChunkState.num);
// Needed to knock out completes
// Fake chunk nr, will cause it to be removed next update
mTableUpdate->NewSubPrefix(addChunk, prefix, 0);
mTableUpdate->NewSubChunk(0);
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessHostAddComplete(PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32* aStart)
{
NS_ASSERTION(mChunkState.hashSize == COMPLETE_SIZE,
"ProcessHostAddComplete should only be called for complete hashes.");
if (aNumEntries == 0) {
// this is totally comprehensible.
NS_WARNING("Expected > 0 entries for a 32-byte hash add.");
return NS_OK;
}
if (*aStart + (COMPLETE_SIZE * aNumEntries) > aChunk.Length()) {
NS_WARNING("Chunk is not long enough to contain the expected entries.");
return NS_ERROR_FAILURE;
}
for (uint8 i = 0; i < aNumEntries; i++) {
Completion hash;
hash.Assign(Substring(aChunk, *aStart, COMPLETE_SIZE));
mTableUpdate->NewAddComplete(mChunkState.num, hash);
*aStart += COMPLETE_SIZE;
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessHostSubComplete(PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32* aStart)
{
NS_ASSERTION(mChunkState.hashSize == PREFIX_SIZE,
"ProcessHostSub should only be called for prefix hashes.");
if (aNumEntries == 0) {
// this is totally comprehensible.
NS_WARNING("Expected > 0 entries for a 32-byte hash add.");
return NS_OK;
}
if (*aStart + ((COMPLETE_SIZE + 4) * aNumEntries) > aChunk.Length()) {
NS_WARNING("Chunk is not long enough to contain the expected entries.");
return NS_ERROR_FAILURE;
}
for (PRUint8 i = 0; i < aNumEntries; i++) {
Completion hash;
hash.Assign(Substring(aChunk, *aStart, COMPLETE_SIZE));
*aStart += COMPLETE_SIZE;
const nsCSubstring& addChunkStr = Substring(aChunk, *aStart, 4);
*aStart += 4;
uint32 addChunk;
memcpy(&addChunk, addChunkStr.BeginReading(), 4);
addChunk = PR_ntohl(addChunk);
mTableUpdate->NewSubComplete(addChunk, hash, mChunkState.num);
}
return NS_OK;
}
bool
ProtocolParser::NextLine(nsACString& line)
{
int32 newline = mPending.FindChar('\n');
if (newline == kNotFound) {
return false;
}
line.Assign(Substring(mPending, 0, newline));
mPending = Substring(mPending, newline + 1);
return true;
}
void
ProtocolParser::CleanupUpdates()
{
for (uint32 i = 0; i < mTableUpdates.Length(); i++) {
delete mTableUpdates[i];
}
mTableUpdates.Clear();
}
TableUpdate *
ProtocolParser::GetTableUpdate(const nsACString& aTable)
{
for (uint32 i = 0; i < mTableUpdates.Length(); i++) {
if (aTable.Equals(mTableUpdates[i]->TableName())) {
return mTableUpdates[i];
}
}
// We free automatically on destruction, ownership of these
// updates can be transferred to DBServiceWorker, which passes
// them back to Classifier when doing the updates, and that
// will free them.
TableUpdate *update = new TableUpdate(aTable);
mTableUpdates.AppendElement(update);
return update;
}
}
}

Просмотреть файл

@ -0,0 +1,151 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Dave Camp <dcamp@mozilla.com>
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef ProtocolParser_h__
#define ProtocolParser_h__
#include "HashStore.h"
#include "nsICryptoHMAC.h"
namespace mozilla {
namespace safebrowsing {
/**
* Some helpers for parsing the safe
*/
class ProtocolParser {
public:
struct ForwardedUpdate {
nsCString table;
nsCString url;
nsCString mac;
};
ProtocolParser(PRUint32 aHashKey);
~ProtocolParser();
nsresult Status() const { return mUpdateStatus; }
nsresult Init(nsICryptoHash* aHasher);
nsresult InitHMAC(const nsACString& aClientKey,
const nsACString& aServerMAC);
nsresult FinishHMAC();
void SetCurrentTable(const nsACString& aTable);
nsresult Begin();
nsresult AppendStream(const nsACString& aData);
// Forget the table updates that were created by this pass. It
// becomes the caller's responsibility to free them. This is shitty.
TableUpdate *GetTableUpdate(const nsACString& aTable);
void ForgetTableUpdates() { mTableUpdates.Clear(); }
nsTArray<TableUpdate*> &GetTableUpdates() { return mTableUpdates; }
// Update information.
const nsTArray<ForwardedUpdate> &Forwards() const { return mForwards; }
int32 UpdateWait() { return mUpdateWait; }
bool ResetRequested() { return mResetRequested; }
bool RekeyRequested() { return mRekeyRequested; }
private:
nsresult ProcessControl(bool* aDone);
nsresult ProcessMAC(const nsCString& aLine);
nsresult ProcessExpirations(const nsCString& aLine);
nsresult ProcessChunkControl(const nsCString& aLine);
nsresult ProcessForward(const nsCString& aLine);
nsresult AddForward(const nsACString& aUrl, const nsACString& aMac);
nsresult ProcessChunk(bool* done);
nsresult ProcessPlaintextChunk(const nsACString& aChunk);
nsresult ProcessShaChunk(const nsACString& aChunk);
nsresult ProcessHostAdd(const Prefix& aDomain, PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32* aStart);
nsresult ProcessHostSub(const Prefix& aDomain, PRUint8 aNumEntries,
const nsACString& aChunk, PRUint32* aStart);
nsresult ProcessHostAddComplete(PRUint8 aNumEntries, const nsACString& aChunk,
PRUint32 *aStart);
nsresult ProcessHostSubComplete(PRUint8 numEntries, const nsACString& aChunk,
PRUint32* start);
bool NextLine(nsACString& aLine);
void CleanupUpdates();
enum ParserState {
PROTOCOL_STATE_CONTROL,
PROTOCOL_STATE_CHUNK
};
ParserState mState;
enum ChunkType {
CHUNK_ADD,
CHUNK_SUB
};
struct ChunkState {
ChunkType type;
uint32 num;
uint32 hashSize;
uint32 length;
void Clear() { num = 0; hashSize = 0; length = 0; }
};
ChunkState mChunkState;
PRUint32 mHashKey;
nsCOMPtr<nsICryptoHash> mCryptoHash;
nsresult mUpdateStatus;
nsCString mPending;
nsCOMPtr<nsICryptoHMAC> mHMAC;
nsCString mServerMAC;
uint32 mUpdateWait;
bool mResetRequested;
bool mRekeyRequested;
nsTArray<ForwardedUpdate> mForwards;
nsTArray<TableUpdate*> mTableUpdates;
TableUpdate *mTableUpdate;
};
}
}
#endif

Просмотреть файл

@ -304,7 +304,7 @@ PROT_ListManager.prototype.maybeToggleUpdateChecking = function() {
*/
PROT_ListManager.prototype.startUpdateChecker = function() {
this.stopUpdateChecker();
// Schedule the first check for between 15 and 45 minutes.
var repeatingUpdateDelay = this.updateInterval / 2;
repeatingUpdateDelay += Math.floor(Math.random() * this.updateInterval);

Просмотреть файл

@ -0,0 +1,92 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code.
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "nsILocalFile.h"
#include "nsCRT.h"
#include "nsIFile.h"
#include "nsISupportsImpl.h"
#include "nsCheckSummedOutputStream.h"
////////////////////////////////////////////////////////////////////////////////
// nsCheckSummedOutputStream
NS_IMPL_ISUPPORTS_INHERITED3(nsCheckSummedOutputStream,
nsSafeFileOutputStream,
nsISafeOutputStream,
nsIOutputStream,
nsIFileOutputStream)
NS_IMETHODIMP
nsCheckSummedOutputStream::Init(nsIFile* file, PRInt32 ioFlags, PRInt32 perm,
PRInt32 behaviorFlags)
{
nsresult rv;
mHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
NS_ENSURE_SUCCESS(rv, rv);
rv = mHash->Init(nsICryptoHash::MD5);
NS_ENSURE_SUCCESS(rv, rv);
return nsSafeFileOutputStream::Init(file, ioFlags, perm, behaviorFlags);
}
NS_IMETHODIMP
nsCheckSummedOutputStream::Finish()
{
nsresult rv = mHash->Finish(false, mCheckSum);
NS_ENSURE_SUCCESS(rv, rv);
PRUint32 written;
rv = nsSafeFileOutputStream::Write(reinterpret_cast<const char*>(mCheckSum.BeginReading()),
mCheckSum.Length(), &written);
NS_ASSERTION(written == mCheckSum.Length(), "Error writing stream checksum");
NS_ENSURE_SUCCESS(rv, rv);
return nsSafeFileOutputStream::Finish();
}
NS_IMETHODIMP
nsCheckSummedOutputStream::Write(const char *buf, PRUint32 count, PRUint32 *result)
{
nsresult rv = mHash->Update(reinterpret_cast<const uint8*>(buf), count);
NS_ENSURE_SUCCESS(rv, rv);
return nsSafeFileOutputStream::Write(buf, count, result);
}
////////////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -0,0 +1,86 @@
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Url Classifier code.
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2011
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef nsCheckSummedOutputStream_h__
#define nsCheckSummedOutputStream_h__
#include "nsILocalFile.h"
#include "nsIFile.h"
#include "nsIOutputStream.h"
#include "nsICryptoHash.h"
#include "nsNetCID.h"
#include "nsString.h"
#include "../../../netwerk/base/src/nsFileStreams.h"
#include "nsToolkitCompsCID.h"
class nsCheckSummedOutputStream : public nsSafeFileOutputStream
{
public:
NS_DECL_ISUPPORTS_INHERITED
// Size of MD5 hash in bytes
static const PRUint32 CHECKSUM_SIZE = 16;
nsCheckSummedOutputStream() {}
virtual ~nsCheckSummedOutputStream() { nsSafeFileOutputStream::Close(); }
NS_IMETHOD Finish();
NS_IMETHOD Write(const char *buf, PRUint32 count, PRUint32 *result);
NS_IMETHOD Init(nsIFile* file, PRInt32 ioFlags, PRInt32 perm, PRInt32 behaviorFlags);
protected:
nsCOMPtr<nsICryptoHash> mHash;
nsCAutoString mCheckSum;
};
// returns a file output stream which can be QI'ed to nsIFileOutputStream.
inline nsresult
NS_NewCheckSummedOutputStream(nsIOutputStream **result,
nsIFile *file,
PRInt32 ioFlags = -1,
PRInt32 perm = -1,
PRInt32 behaviorFlags = 0)
{
nsCOMPtr<nsIFileOutputStream> out = new nsCheckSummedOutputStream();
nsresult rv = out->Init(file, ioFlags, perm, behaviorFlags);
if (NS_SUCCEEDED(rv))
NS_ADDREF(*result = out); // cannot use nsCOMPtr::swap
return rv;
}
#endif

Просмотреть файл

@ -40,10 +40,12 @@
%{C++
#include "nsTArray.h"
class nsUrlClassifierLookupResult;
#include "Entries.h"
#include "LookupCache.h"
%}
[ptr] native ResultArray(nsTArray<nsUrlClassifierLookupResult>);
[ptr] native ResultArray(nsTArray<mozilla::safebrowsing::LookupResult>);
[ptr] native CacheCompletionArray(nsTArray<mozilla::safebrowsing::CacheResult>);
[ptr] native PrefixArray(mozilla::safebrowsing::PrefixArray);
interface nsIUrlClassifierHashCompleter;
// Interface for JS function callbacks
@ -231,14 +233,14 @@ interface nsIUrlClassifierDBService : nsISupports
* Interface for the actual worker thread. Implementations of this need not
* be thread aware and just work on the database.
*/
[scriptable, uuid(2af84c09-269e-4fc2-b28f-af56717db118)]
[scriptable, uuid(0445be75-b114-43ea-89dc-aa16af26e77e)]
interface nsIUrlClassifierDBServiceWorker : nsIUrlClassifierDBService
{
// Provide a way to forcibly close the db connection.
void closeDb();
// Cache the results of a hash completion.
[noscript]void cacheCompletions(in ResultArray entries);
[noscript]void cacheCompletions(in CacheCompletionArray completions);
[noscript]void cacheMisses(in PrefixArray misses);
};
/**
@ -247,7 +249,7 @@ interface nsIUrlClassifierDBServiceWorker : nsIUrlClassifierDBService
* lookup to provide a set of possible results, which the main thread
* may need to expand using an nsIUrlClassifierCompleter.
*/
[uuid(f1dc83c6-ad43-4f0f-a809-fd43de7de8a4)]
[uuid(b903dc8f-dff1-42fe-894b-36e7a59bb801)]
interface nsIUrlClassifierLookupCallback : nsISupports
{
/**

Просмотреть файл

@ -39,28 +39,26 @@
#include "nsISupports.idl"
#include "nsIFile.idl"
interface nsIArray;
// Note that the PrefixSet name is historical and we do properly support
// duplicated values, so it's really a Prefix Trie.
// All methods are thread-safe.
[scriptable, uuid(519c8519-0f30-426b-bb7b-c400ba0318e2)]
[scriptable, uuid(b21b0fa1-20d2-422a-b2cc-b289c9325811)]
interface nsIUrlClassifierPrefixSet : nsISupports
{
// Initialize the PrefixSet. Give it a name for memory reporting.
void init(in ACString aName);
// Fills the PrefixSet with the given array of prefixes.
// Can send an empty Array to clear the tree. A truly "empty tree"
// cannot be represented, so put a sentinel value if that is required
// Requires array to be sorted.
void setPrefixes([const, array, size_is(aLength)] in unsigned long aPrefixes,
in unsigned long aLength);
void getPrefixes(out unsigned long aCount,
[array, size_is(aCount), retval] out unsigned long aPrefixes);
// Do a lookup in the PrefixSet, return whether the value is present.
// If aReady is set, we will block until there are any entries.
// If not set, we will return in aReady whether we were ready or not.
boolean probe(in unsigned long aPrefix, in unsigned long aKey,
inout boolean aReady);
// Return the key that is used to randomize the collisions in the prefixes.
PRUint32 getKey();
boolean probe(in unsigned long aPrefix, inout boolean aReady);
boolean isEmpty();
void loadFromFile(in nsIFile aFile);
void storeToFile(in nsIFile aFile);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -53,6 +53,8 @@
#include "nsICryptoHash.h"
#include "nsICryptoHMAC.h"
#include "LookupCache.h"
// The hash length for a domain key.
#define DOMAIN_LENGTH 4
@ -88,7 +90,8 @@ public:
bool GetCompleter(const nsACString& tableName,
nsIUrlClassifierHashCompleter** completer);
nsresult CacheCompletions(nsTArray<nsUrlClassifierLookupResult> *results);
nsresult CacheCompletions(mozilla::safebrowsing::CacheResultArray *results);
nsresult CacheMisses(mozilla::safebrowsing::PrefixArray *results);
static nsIThread* BackgroundThread();
@ -131,10 +134,6 @@ private:
// The list of tables that can use the default hash completer object.
nsTArray<nsCString> mGethashWhitelist;
// Set of prefixes known to be in the database
nsRefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
nsCOMPtr<nsICryptoHash> mHash;
// Thread that we do the updates on.
static nsIThread* gDbBackgroundThread;
};

Просмотреть файл

@ -71,7 +71,7 @@ static const PRLogModuleInfo *gUrlClassifierPrefixSetLog = nsnull;
class nsPrefixSetReporter : public nsIMemoryReporter
{
public:
nsPrefixSetReporter(nsUrlClassifierPrefixSet * aParent, const nsACString & aName);
nsPrefixSetReporter(nsUrlClassifierPrefixSet* aParent, const nsACString& aName);
virtual ~nsPrefixSetReporter() {};
NS_DECL_ISUPPORTS
@ -79,7 +79,7 @@ public:
private:
nsCString mPath;
nsUrlClassifierPrefixSet * mParent;
nsUrlClassifierPrefixSet* mParent;
};
NS_IMPL_THREADSAFE_ISUPPORTS1(nsPrefixSetReporter, nsIMemoryReporter)
@ -87,8 +87,8 @@ NS_IMPL_THREADSAFE_ISUPPORTS1(nsPrefixSetReporter, nsIMemoryReporter)
NS_MEMORY_REPORTER_MALLOC_SIZEOF_FUN(StoragePrefixSetMallocSizeOf,
"storage/prefixset")
nsPrefixSetReporter::nsPrefixSetReporter(nsUrlClassifierPrefixSet * aParent,
const nsACString & aName)
nsPrefixSetReporter::nsPrefixSetReporter(nsUrlClassifierPrefixSet* aParent,
const nsACString& aName)
: mParent(aParent)
{
mPath.Assign(NS_LITERAL_CSTRING("explicit/storage/prefixset"));
@ -99,42 +99,42 @@ nsPrefixSetReporter::nsPrefixSetReporter(nsUrlClassifierPrefixSet * aParent,
}
NS_IMETHODIMP
nsPrefixSetReporter::GetProcess(nsACString & aProcess)
nsPrefixSetReporter::GetProcess(nsACString& aProcess)
{
aProcess.Truncate();
return NS_OK;
}
NS_IMETHODIMP
nsPrefixSetReporter::GetPath(nsACString & aPath)
nsPrefixSetReporter::GetPath(nsACString& aPath)
{
aPath.Assign(mPath);
return NS_OK;
}
NS_IMETHODIMP
nsPrefixSetReporter::GetKind(PRInt32 * aKind)
nsPrefixSetReporter::GetKind(PRInt32* aKind)
{
*aKind = nsIMemoryReporter::KIND_HEAP;
return NS_OK;
}
NS_IMETHODIMP
nsPrefixSetReporter::GetUnits(PRInt32 * aUnits)
nsPrefixSetReporter::GetUnits(PRInt32* aUnits)
{
*aUnits = nsIMemoryReporter::UNITS_BYTES;
return NS_OK;
}
NS_IMETHODIMP
nsPrefixSetReporter::GetAmount(PRInt64 * aAmount)
nsPrefixSetReporter::GetAmount(PRInt64* aAmount)
{
*aAmount = mParent->SizeOfIncludingThis(StoragePrefixSetMallocSizeOf);
return NS_OK;
}
NS_IMETHODIMP
nsPrefixSetReporter::GetDescription(nsACString & aDescription)
nsPrefixSetReporter::GetDescription(nsACString& aDescription)
{
aDescription.Assign(NS_LITERAL_CSTRING("Memory used by a PrefixSet for "
"UrlClassifier, in bytes."));
@ -146,21 +146,21 @@ NS_IMPL_THREADSAFE_ISUPPORTS1(nsUrlClassifierPrefixSet, nsIUrlClassifierPrefixSe
nsUrlClassifierPrefixSet::nsUrlClassifierPrefixSet()
: mPrefixSetLock("mPrefixSetLock"),
mSetIsReady(mPrefixSetLock, "mSetIsReady"),
mHasPrefixes(false),
mRandomKey(0)
mHasPrefixes(false)
{
#if defined(PR_LOGGING)
if (!gUrlClassifierPrefixSetLog)
gUrlClassifierPrefixSetLog = PR_NewLogModule("UrlClassifierPrefixSet");
#endif
}
nsresult rv = InitKey();
if (NS_FAILED(rv)) {
LOG(("Failed to initialize PrefixSet"));
}
mReporter = new nsPrefixSetReporter(this, NS_LITERAL_CSTRING("all"));
NS_IMETHODIMP
nsUrlClassifierPrefixSet::Init(const nsACString& aName)
{
mReporter = new nsPrefixSetReporter(this, aName);
NS_RegisterMemoryReporter(mReporter);
return NS_OK;
}
nsUrlClassifierPrefixSet::~nsUrlClassifierPrefixSet()
@ -168,26 +168,8 @@ nsUrlClassifierPrefixSet::~nsUrlClassifierPrefixSet()
NS_UnregisterMemoryReporter(mReporter);
}
nsresult
nsUrlClassifierPrefixSet::InitKey()
{
nsCOMPtr<nsIRandomGenerator> rg =
do_GetService("@mozilla.org/security/random-generator;1");
NS_ENSURE_STATE(rg);
PRUint8 *temp;
nsresult rv = rg->GenerateRandomBytes(sizeof(mRandomKey), &temp);
NS_ENSURE_SUCCESS(rv, rv);
memcpy(&mRandomKey, temp, sizeof(mRandomKey));
NS_Free(temp);
LOG(("Initialized PrefixSet, key = %X", mRandomKey));
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::SetPrefixes(const PRUint32 * aArray, PRUint32 aLength)
nsUrlClassifierPrefixSet::SetPrefixes(const PRUint32* aArray, PRUint32 aLength)
{
if (aLength <= 0) {
MutexAutoLock lock(mPrefixSetLock);
@ -206,7 +188,7 @@ nsUrlClassifierPrefixSet::SetPrefixes(const PRUint32 * aArray, PRUint32 aLength)
}
nsresult
nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32 * prefixes, PRUint32 aLength)
nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32* aPrefixes, PRUint32 aLength)
{
if (aLength == 0) {
return NS_OK;
@ -214,7 +196,7 @@ nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32 * prefixes, PRUint32 aLen
#ifdef DEBUG
for (PRUint32 i = 1; i < aLength; i++) {
MOZ_ASSERT(prefixes[i] >= prefixes[i-1]);
MOZ_ASSERT(aPrefixes[i] >= aPrefixes[i-1]);
}
#endif
@ -222,7 +204,7 @@ nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32 * prefixes, PRUint32 aLen
FallibleTArray<PRUint32> newIndexStarts;
FallibleTArray<PRUint16> newDeltas;
if (!newIndexPrefixes.AppendElement(prefixes[0])) {
if (!newIndexPrefixes.AppendElement(aPrefixes[0])) {
return NS_ERROR_OUT_OF_MEMORY;
}
if (!newIndexStarts.AppendElement(newDeltas.Length())) {
@ -230,25 +212,25 @@ nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32 * prefixes, PRUint32 aLen
}
PRUint32 numOfDeltas = 0;
PRUint32 currentItem = prefixes[0];
PRUint32 currentItem = aPrefixes[0];
for (PRUint32 i = 1; i < aLength; i++) {
if ((numOfDeltas >= DELTAS_LIMIT) ||
(prefixes[i] - currentItem >= MAX_INDEX_DIFF)) {
(aPrefixes[i] - currentItem >= MAX_INDEX_DIFF)) {
if (!newIndexStarts.AppendElement(newDeltas.Length())) {
return NS_ERROR_OUT_OF_MEMORY;
}
if (!newIndexPrefixes.AppendElement(prefixes[i])) {
if (!newIndexPrefixes.AppendElement(aPrefixes[i])) {
return NS_ERROR_OUT_OF_MEMORY;
}
numOfDeltas = 0;
} else {
PRUint16 delta = prefixes[i] - currentItem;
PRUint16 delta = aPrefixes[i] - currentItem;
if (!newDeltas.AppendElement(delta)) {
return NS_ERROR_OUT_OF_MEMORY;
}
numOfDeltas++;
}
currentItem = prefixes[i];
currentItem = aPrefixes[i];
}
newIndexPrefixes.Compact();
@ -271,6 +253,53 @@ nsUrlClassifierPrefixSet::MakePrefixSet(const PRUint32 * prefixes, PRUint32 aLen
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::GetPrefixes(PRUint32* aCount,
PRUint32** aPrefixes)
{
NS_ENSURE_ARG_POINTER(aCount);
*aCount = 0;
NS_ENSURE_ARG_POINTER(aPrefixes);
*aPrefixes = nsnull;
nsTArray<PRUint32> aArray;
PRUint32 prefixLength = mIndexPrefixes.Length();
for (PRUint32 i = 0; i < prefixLength; i++) {
PRUint32 prefix = mIndexPrefixes[i];
PRUint32 start = mIndexStarts[i];
PRUint32 end = (i == (prefixLength - 1)) ? mDeltas.Length()
: mIndexStarts[i + 1];
aArray.AppendElement(prefix);
for (PRUint32 j = start; j < end; j++) {
prefix += mDeltas[j];
aArray.AppendElement(prefix);
}
}
NS_ASSERTION(mIndexStarts.Length() + mDeltas.Length() == aArray.Length(),
"Lengths are inconsistent");
PRUint32 itemCount = aArray.Length();
if (itemCount == 1 && aArray[0] == 0) {
/* sentinel for empty set */
aArray.Clear();
itemCount = 0;
}
PRUint32* retval = static_cast<PRUint32*>(nsMemory::Alloc(itemCount * sizeof(PRUint32)));
NS_ENSURE_TRUE(retval, NS_ERROR_OUT_OF_MEMORY);
for (PRUint32 i = 0; i < itemCount; i++) {
retval[i] = aArray[i];
}
*aCount = itemCount;
*aPrefixes = retval;
return NS_OK;
}
PRUint32 nsUrlClassifierPrefixSet::BinSearch(PRUint32 start,
PRUint32 end,
PRUint32 target)
@ -290,7 +319,7 @@ PRUint32 nsUrlClassifierPrefixSet::BinSearch(PRUint32 start,
}
nsresult
nsUrlClassifierPrefixSet::Contains(PRUint32 aPrefix, bool * aFound)
nsUrlClassifierPrefixSet::Contains(PRUint32 aPrefix, bool* aFound)
{
mPrefixSetLock.AssertCurrentThreadOwns();
@ -366,32 +395,13 @@ nsUrlClassifierPrefixSet::IsEmpty(bool * aEmpty)
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::GetKey(PRUint32 * aKey)
{
MutexAutoLock lock(mPrefixSetLock);
*aKey = mRandomKey;
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::Probe(PRUint32 aPrefix, PRUint32 aKey,
nsUrlClassifierPrefixSet::Probe(PRUint32 aPrefix,
bool* aReady, bool* aFound)
{
MutexAutoLock lock(mPrefixSetLock);
*aFound = false;
// We might have raced here with a LoadPrefixSet call,
// loading a saved PrefixSet with another key than the one used to probe us.
// This must occur exactly between the GetKey call and the Probe call.
// This could cause a false negative immediately after browser start.
// Claim we are still busy loading instead.
if (aKey != mRandomKey) {
LOG(("Potential race condition detected, avoiding"));
*aReady = false;
return NS_OK;
}
// check whether we are opportunistically probing or should wait
if (*aReady) {
// we should block until we are ready
@ -415,7 +425,7 @@ nsUrlClassifierPrefixSet::Probe(PRUint32 aPrefix, PRUint32 aKey,
}
nsresult
nsUrlClassifierPrefixSet::LoadFromFd(AutoFDClose & fileFd)
nsUrlClassifierPrefixSet::LoadFromFd(AutoFDClose& fileFd)
{
PRUint32 magic;
PRInt32 read;
@ -427,8 +437,6 @@ nsUrlClassifierPrefixSet::LoadFromFd(AutoFDClose & fileFd)
PRUint32 indexSize;
PRUint32 deltaSize;
read = PR_Read(fileFd, &mRandomKey, sizeof(PRUint32));
NS_ENSURE_TRUE(read == sizeof(PRUint32), NS_ERROR_FILE_CORRUPTED);
read = PR_Read(fileFd, &indexSize, sizeof(PRUint32));
NS_ENSURE_TRUE(read == sizeof(PRUint32), NS_ERROR_FILE_CORRUPTED);
read = PR_Read(fileFd, &deltaSize, sizeof(PRUint32));
@ -481,8 +489,10 @@ nsUrlClassifierPrefixSet::LoadFromFd(AutoFDClose & fileFd)
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::LoadFromFile(nsIFile * aFile)
nsUrlClassifierPrefixSet::LoadFromFile(nsIFile* aFile)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_FILELOAD_TIME> timer;
nsresult rv;
nsCOMPtr<nsILocalFile> file(do_QueryInterface(aFile, &rv));
NS_ENSURE_SUCCESS(rv, rv);
@ -495,7 +505,7 @@ nsUrlClassifierPrefixSet::LoadFromFile(nsIFile * aFile)
}
nsresult
nsUrlClassifierPrefixSet::StoreToFd(AutoFDClose & fileFd)
nsUrlClassifierPrefixSet::StoreToFd(AutoFDClose& fileFd)
{
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_FALLOCATE_TIME> timer;
@ -511,9 +521,6 @@ nsUrlClassifierPrefixSet::StoreToFd(AutoFDClose & fileFd)
written = PR_Write(fileFd, &magic, sizeof(PRUint32));
NS_ENSURE_TRUE(written > 0, NS_ERROR_FAILURE);
written = PR_Write(fileFd, &mRandomKey, sizeof(PRUint32));
NS_ENSURE_TRUE(written > 0, NS_ERROR_FAILURE);
PRUint32 indexSize = mIndexStarts.Length();
PRUint32 deltaSize = mDeltas.Length();
written = PR_Write(fileFd, &indexSize, sizeof(PRUint32));
@ -536,7 +543,7 @@ nsUrlClassifierPrefixSet::StoreToFd(AutoFDClose & fileFd)
}
NS_IMETHODIMP
nsUrlClassifierPrefixSet::StoreToFile(nsIFile * aFile)
nsUrlClassifierPrefixSet::StoreToFile(nsIFile* aFile)
{
if (!mHasPrefixes) {
LOG(("Attempt to serialize empty PrefixSet"));

Просмотреть файл

@ -44,6 +44,7 @@
#include "nsISupportsUtils.h"
#include "nsID.h"
#include "nsIFile.h"
#include "nsIMutableArray.h"
#include "nsIUrlClassifierPrefixSet.h"
#include "nsIMemoryReporter.h"
#include "nsToolkitCompsCID.h"
@ -59,12 +60,13 @@ public:
nsUrlClassifierPrefixSet();
virtual ~nsUrlClassifierPrefixSet();
NS_IMETHOD Init(const nsACString& aName);
NS_IMETHOD SetPrefixes(const PRUint32* aArray, PRUint32 aLength);
NS_IMETHOD Probe(PRUint32 aPrefix, PRUint32 aKey, bool* aReady, bool* aFound);
NS_IMETHOD IsEmpty(bool * aEmpty);
NS_IMETHOD GetPrefixes(PRUint32* aCount, PRUint32** aPrefixes);
NS_IMETHOD Probe(PRUint32 aPrefix, bool* aReady, bool* aFound);
NS_IMETHOD IsEmpty(bool* aEmpty);
NS_IMETHOD LoadFromFile(nsIFile* aFile);
NS_IMETHOD StoreToFile(nsIFile* aFile);
NS_IMETHOD GetKey(PRUint32* aKey);
NS_DECL_ISUPPORTS
@ -84,15 +86,12 @@ protected:
nsresult Contains(PRUint32 aPrefix, bool* aFound);
nsresult MakePrefixSet(const PRUint32* aArray, PRUint32 aLength);
PRUint32 BinSearch(PRUint32 start, PRUint32 end, PRUint32 target);
nsresult LoadFromFd(mozilla::AutoFDClose & fileFd);
nsresult StoreToFd(mozilla::AutoFDClose & fileFd);
nsresult InitKey();
nsresult LoadFromFd(mozilla::AutoFDClose& fileFd);
nsresult StoreToFd(mozilla::AutoFDClose& fileFd);
// boolean indicating whether |setPrefixes| has been
// called with a non-empty array.
bool mHasPrefixes;
// key used to randomize hash collisions
PRUint32 mRandomKey;
// the prefix for each index.
FallibleTArray<PRUint32> mIndexPrefixes;
// the value corresponds to the beginning of the run
@ -100,7 +99,6 @@ protected:
FallibleTArray<PRUint32> mIndexStarts;
// array containing deltas from indices.
FallibleTArray<PRUint16> mDeltas;
};
#endif

Просмотреть файл

@ -183,7 +183,7 @@ UrlClassifierDBServiceWorkerProxy::CloseDb()
}
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::CacheCompletions(nsTArray<nsUrlClassifierLookupResult>* aEntries)
UrlClassifierDBServiceWorkerProxy::CacheCompletions(CacheResultArray * aEntries)
{
nsCOMPtr<nsIRunnable> r = new CacheCompletionsRunnable(mTarget, aEntries);
return DispatchToWorkerThread(r);
@ -196,12 +196,27 @@ UrlClassifierDBServiceWorkerProxy::CacheCompletionsRunnable::Run()
return NS_OK;
}
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::CacheMisses(PrefixArray * aEntries)
{
nsCOMPtr<nsIRunnable> r = new CacheMissesRunnable(mTarget, aEntries);
return DispatchToWorkerThread(r);
}
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::CacheMissesRunnable::Run()
{
mTarget->CacheMisses(mEntries);
return NS_OK;
}
NS_IMPL_THREADSAFE_ISUPPORTS1(UrlClassifierLookupCallbackProxy,
nsIUrlClassifierLookupCallback)
NS_IMETHODIMP
UrlClassifierLookupCallbackProxy::LookupComplete
(nsTArray<nsUrlClassifierLookupResult>* aResults)
(LookupResultArray * aResults)
{
nsCOMPtr<nsIRunnable> r = new LookupCompleteRunnable(mTarget, aResults);
return NS_DispatchToMainThread(r);

Просмотреть файл

@ -40,6 +40,9 @@
#include "nsIUrlClassifierDBService.h"
#include "nsThreadUtils.h"
#include "LookupCache.h"
using namespace mozilla::safebrowsing;
/**
* Thread proxy from the main thread to the worker thread.
@ -150,7 +153,7 @@ public:
{
public:
CacheCompletionsRunnable(nsIUrlClassifierDBServiceWorker* aTarget,
nsTArray<nsUrlClassifierLookupResult>* aEntries)
CacheResultArray *aEntries)
: mTarget(aTarget)
, mEntries(aEntries)
{ }
@ -159,7 +162,23 @@ public:
private:
nsCOMPtr<nsIUrlClassifierDBServiceWorker> mTarget;
nsTArray<nsUrlClassifierLookupResult>* mEntries;
CacheResultArray *mEntries;
};
class CacheMissesRunnable : public nsRunnable
{
public:
CacheMissesRunnable(nsIUrlClassifierDBServiceWorker* aTarget,
PrefixArray *aEntries)
: mTarget(aTarget)
, mEntries(aEntries)
{ }
NS_DECL_NSIRUNNABLE
private:
nsCOMPtr<nsIUrlClassifierDBServiceWorker> mTarget;
PrefixArray *mEntries;
};
private:
@ -182,7 +201,7 @@ public:
{
public:
LookupCompleteRunnable(nsIUrlClassifierLookupCallback* aTarget,
nsTArray<nsUrlClassifierLookupResult>* aResults)
LookupResultArray *aResults)
: mTarget(aTarget)
, mResults(aResults)
{ }
@ -191,7 +210,7 @@ public:
private:
nsCOMPtr<nsIUrlClassifierLookupCallback> mTarget;
nsTArray<nsUrlClassifierLookupResult>* mResults;
LookupResultArray * mResults;
};
private:

Просмотреть файл

@ -171,11 +171,16 @@ nsUrlClassifierStreamUpdater::FetchUpdate(const nsACString & aUpdateUrl,
const nsACString & aStreamTable,
const nsACString & aServerMAC)
{
LOG(("(pre) Fetching update from %s\n", PromiseFlatCString(aUpdateUrl).get()));
nsCOMPtr<nsIURI> uri;
nsresult rv = NS_NewURI(getter_AddRefs(uri), aUpdateUrl);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Fetching update from %s\n", PromiseFlatCString(aUpdateUrl).get()));
nsCAutoString urlSpec;
uri->GetAsciiSpec(urlSpec);
LOG(("(post) Fetching update from %s\n", urlSpec.get()));
return FetchUpdate(uri, aRequestBody, aStreamTable, aServerMAC);
}
@ -240,6 +245,11 @@ nsUrlClassifierStreamUpdater::DownloadUpdates(
mIsUpdating = true;
*_retval = true;
nsCAutoString urlSpec;
mUpdateUrl->GetAsciiSpec(urlSpec);
LOG(("FetchUpdate: %s", urlSpec.get()));
//LOG(("requestBody: %s", aRequestBody.get()));
return FetchUpdate(mUpdateUrl, aRequestBody, EmptyCString(), EmptyCString());
}

Просмотреть файл

@ -24,14 +24,26 @@ prefBranch.setIntPref("urlclassifier.gethashnoise", 0);
prefBranch.setBoolPref("browser.safebrowsing.malware.enabled", true);
prefBranch.setBoolPref("browser.safebrowsing.enabled", true);
function cleanUp() {
function delFile(name) {
try {
// Delete a previously created sqlite file
var file = dirSvc.get('ProfLD', Ci.nsIFile);
file.append("urlclassifier3.sqlite");
file.append(name);
if (file.exists())
file.remove(false);
} catch (e) {}
} catch(e) {
}
}
function cleanUp() {
delFile("classifier.hashkey");
delFile("urlclassifier3.sqlite");
delFile("safebrowsing/test-phish-simple.sbstore");
delFile("safebrowsing/test-malware-simple.sbstore");
delFile("safebrowsing/test-phish-simple.cache");
delFile("safebrowsing/test-malware-simple.cache");
delFile("safebrowsing/test-phish-simple.pset");
delFile("safebrowsing/test-malware-simple.pset");
}
var dbservice = Cc["@mozilla.org/url-classifier/dbservice;1"].getService(Ci.nsIUrlClassifierDBService);
@ -276,11 +288,10 @@ function runNextTest()
dbservice.resetDatabase();
dbservice.setHashCompleter('test-phish-simple', null);
dumpn("running " + gTests[gNextTest]);
dump("running " + gTests[gNextTest]);
gTests[gNextTest++]();
let test = gTests[gNextTest++];
dump("running " + test.name + "\n");
test();
}
function runTests(tests)

Просмотреть файл

@ -55,6 +55,7 @@ function testSimpleSub()
"chunkType" : "s",
"urls": subUrls }]);
var assertions = {
"tableData" : "test-phish-simple;a:1:s:50",
"urlsExist" : [ "bar.com/b" ],
@ -361,7 +362,8 @@ function testExpireLists() {
{ "chunkType" : "sd:1-3,5" }]);
var assertions = {
"tableData" : "test-phish-simple;"
// "tableData" : "test-phish-simple;"
"tableData": ""
};
doTest([addUpdate, subUpdate, expireUpdate], assertions);
@ -479,10 +481,7 @@ function run_test()
testSubPartiallyMatches2,
testSubsDifferentChunks,
testSubsDifferentChunksSameHostId,
testExpireLists,
testDuplicateAddChunks,
testExpireWholeSub,
testPreventWholeSub,
testExpireLists
]);
}

Просмотреть файл

@ -1,195 +0,0 @@
//* -*- Mode: Javascript; tab-width: 8; indent-tabs-mode: nil; js-indent-level: 2 -*- *
// Test an add of two urls to a fresh database
function testCleanHostKeys() {
var addUrls = [ "foo.com/a" ];
var update = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : addUrls
}]);
doStreamUpdate(update, function() {
var ios = Components.classes["@mozilla.org/network/io-service;1"].
getService(Components.interfaces.nsIIOService);
// Check with a clean host key
var uri = ios.newURI("http://bar.com/a", null, null);
// Use the nsIURIClassifier interface (the
// nsIUrlClassifierDBService will always queue a lookup,
// nsIURIClassifier won't if the host key is known to be clean.
var classifier = dbservice.QueryInterface(Ci.nsIURIClassifier);
var result = classifier.classify(uri, function(errorCode) {
var result2 = classifier.classify(uri, function() {
do_throw("shouldn't get a callback");
});
// second call shouldn't result in a callback.
do_check_eq(result2, false);
do_throw("shouldn't get a callback");
});
// The first classifier call will not result in a callback
do_check_eq(result, false);
runNextTest();
}, updateError);
}
// Make sure that an update properly clears the host key cache
function testUpdate() {
var ios = Components.classes["@mozilla.org/network/io-service;1"].
getService(Components.interfaces.nsIIOService);
// Must put something in the PrefixSet
var preUrls = [ "foo.com/b" ];
var preUpdate = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : preUrls
}]);
doStreamUpdate(preUpdate, function() {
// First lookup won't happen...
var uri = ios.newURI("http://foo.com/a", null, null);
// Use the nsIURIClassifier interface (the
// nsIUrlClassifierDBService will always queue a lookup,
// nsIURIClassifier won't if the host key is known to be clean.
var classifier = dbservice.QueryInterface(Ci.nsIURIClassifier);
var result = classifier.classify(uri, function(errorCode) {
// shouldn't arrive here
do_check_eq(errorCode, Cr.NS_OK);
do_throw("shouldn't get a callback");
});
do_check_eq(result, false);
// Now add the url to the db...
var addUrls = [ "foo.com/a" ];
var update = buildPhishingUpdate(
[
{ "chunkNum" : 2,
"urls" : addUrls
}]);
doStreamUpdate(update, function() {
var result2 = classifier.classify(uri, function(errorCode) {
do_check_neq(errorCode, Cr.NS_OK);
runNextTest();
});
// second call should result in a callback.
do_check_eq(result2, true);
}, updateError);
}, updateError);
}
function testResetFullCache() {
// Must put something in the PrefixSet
var preUrls = [ "zaz.com/b" ];
var preUpdate = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : preUrls
}]);
doStreamUpdate(preUpdate, function() {
// First do enough queries to fill up the clean hostkey cache
var ios = Components.classes["@mozilla.org/network/io-service;1"].
getService(Components.interfaces.nsIIOService);
// Use the nsIURIClassifier interface (the
// nsIUrlClassifierDBService will always queue a lookup,
// nsIURIClassifier won't if the host key is known to be clean.
var classifier = dbservice.QueryInterface(Ci.nsIURIClassifier);
var uris1 = [
"www.foo.com/",
"www.bar.com/",
"www.blah.com/",
"www.site.com/",
"www.example.com/",
"www.test.com/",
"www.malware.com/",
"www.phishing.com/",
"www.clean.com/" ];
var uris2 = [];
var runSecondLookup = function() {
if (uris2.length == 0) {
runNextTest();
return;
}
var spec = uris2.pop();
var uri = ios.newURI("http://" + spec, null, null);
var result = classifier.classify(uri, function(errorCode) {
});
runSecondLookup();
// now look up a few more times.
}
var runInitialLookup = function() {
if (uris1.length == 0) {
// We're done filling up the cache. Run an update to flush it,
// then start lookup up again.
var addUrls = [ "notgoingtocheck.com/a" ];
var update = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : addUrls
}]);
doStreamUpdate(update, function() {
runSecondLookup();
}, updateError);
return;
}
var spec = uris1.pop();
uris2.push(spec);
var uri = ios.newURI("http://" + spec, null, null);
var result = classifier.classify(uri, function(errorCode) {
});
runInitialLookup();
// None of these will generate a callback
do_check_eq(result, false);
if (!result) {
doNextTest();
}
}
// XXX bug 457790: dbservice.resetDatabase() doesn't have a way to
// wait to make sure it has been applied. Until this is added, we'll
// just use a timeout.
var t = new Timer(3000, runInitialLookup);
}, updateError);
}
function testBug475436() {
var addUrls = [ "foo.com/a", "www.foo.com/" ];
var update = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : addUrls
}]);
var assertions = {
"tableData" : "test-phish-simple;a:1",
"urlsExist" : ["foo.com/a", "foo.com/a" ]
};
doUpdateTest([update], assertions, runNextTest, updateError);
}
function run_test()
{
runTests([
// XXX: We need to run testUpdate first, because of a
// race condition (bug 457790) calling dbservice.classify()
// directly after dbservice.resetDatabase().
testUpdate,
testCleanHostKeys,
testResetFullCache,
testBug475436
]);
}
do_test_pending();

Просмотреть файл

@ -461,7 +461,8 @@ function testWrongTable()
"tableData" : "test-phish-simple;a:1",
// The urls were added as phishing urls, but the completer is claiming
// that they are malware urls, and we trust the completer in this case.
"malwareUrlsExist" : addUrls,
// The result will be discarded, so we can only check for non-existence.
"urlsDontExist" : addUrls,
// Make sure the completer was actually queried.
"completerQueried" : [completer, addUrls]
};
@ -470,57 +471,14 @@ function testWrongTable()
function() {
// Give the dbservice a chance to (not) cache the result.
var timer = new Timer(3000, function() {
// The dbservice shouldn't have cached this result,
// so this completer should be queried.
var newCompleter = installCompleter('test-malware-simple', [[1, addUrls]], []);
// The above installCompleter installs the
// completer for test-malware-simple, we want it
// to be used for test-phish-simple too.
dbservice.setHashCompleter("test-phish-simple",
// The miss earlier will have caused a miss to be cached.
// Resetting the completer does not count as an update,
// so we will not be probed again.
var newCompleter = installCompleter('test-malware-simple', [[1, addUrls]], []); dbservice.setHashCompleter("test-phish-simple",
newCompleter);
var assertions = {
"malwareUrlsExist" : addUrls,
"completerQueried" : [newCompleter, addUrls]
};
checkAssertions(assertions, runNextTest);
});
}, updateError);
}
function testWrongChunk()
{
var addUrls = [ "foo.com/a" ];
var update = buildPhishingUpdate(
[
{ "chunkNum" : 1,
"urls" : addUrls
}],
4);
var completer = installCompleter('test-phish-simple',
[[2, // wrong chunk number
addUrls]], []);
var assertions = {
"tableData" : "test-phish-simple;a:1",
"urlsExist" : addUrls,
// Make sure the completer was actually queried.
"completerQueried" : [completer, addUrls]
};
doUpdateTest([update], assertions,
function() {
// Give the dbservice a chance to (not) cache the result.
var timer = new Timer(3000, function() {
// The dbservice shouldn't have cached this result,
// so this completer should be queried.
var newCompleter = installCompleter('test-phish-simple', [[2, addUrls]], []);
var assertions = {
"urlsExist" : addUrls,
"completerQueried" : [newCompleter, addUrls]
"urlsDontExist" : addUrls
};
checkAssertions(assertions, runNextTest);
});
@ -818,7 +776,6 @@ function run_test()
testMixedSizesDifferentDomains,
testInvalidHashSize,
testWrongTable,
testWrongChunk,
testCachedResults,
testCachedResultsWithSub,
testCachedResultsWithExpire,
@ -826,7 +783,7 @@ function run_test()
testStaleList,
testStaleListEmpty,
testErrorList,
testErrorListIndependent,
testErrorListIndependent
]);
}

Просмотреть файл

@ -1,7 +1,9 @@
// newPset: returns an empty nsIUrlClassifierPrefixSet.
function newPset() {
return Cc["@mozilla.org/url-classifier/prefixset;1"]
.createInstance(Ci.nsIUrlClassifierPrefixSet);
let pset = Cc["@mozilla.org/url-classifier/prefixset;1"]
.createInstance(Ci.nsIUrlClassifierPrefixSet);
pset.init("all");
return pset;
}
// arrContains: returns true if |arr| contains the element |target|. Uses binary
@ -28,10 +30,22 @@ function arrContains(arr, target) {
return (!(i < 0 || i >= arr.length) && arr[i] == target);
}
// checkContents: Check whether the PrefixSet pset contains
// the prefixes in the passed array.
function checkContents(pset, prefixes) {
var outcount = {}, outset = {};
outset = pset.getPrefixes(outcount);
let inset = prefixes;
do_check_eq(inset.length, outset.length);
inset.sort(function(x,y) x - y);
for (let i = 0; i < inset.length; i++) {
do_check_eq(inset[i], outset[i]);
}
}
function wrappedProbe(pset, prefix) {
let key = pset.getKey();
let dummy = {};
return pset.probe(prefix, key, dummy);
return pset.probe(prefix, dummy);
};
// doRandomLookups: we use this to test for false membership with random input
@ -74,6 +88,9 @@ function testBasicPset() {
do_check_true(wrappedProbe(pset, 1593203));
do_check_false(wrappedProbe(pset, 999));
do_check_false(wrappedProbe(pset, 0));
checkContents(pset, prefixes);
}
function testDuplicates() {
@ -88,6 +105,9 @@ function testDuplicates() {
do_check_true(wrappedProbe(pset, 9));
do_check_false(wrappedProbe(pset, 4));
do_check_false(wrappedProbe(pset, 8));
checkContents(pset, prefixes);
}
function testSimplePset() {
@ -97,6 +117,9 @@ function testSimplePset() {
doRandomLookups(pset, prefixes, 100);
doExpectedLookups(pset, prefixes, 1);
checkContents(pset, prefixes);
}
function testReSetPrefixes() {
@ -113,6 +136,9 @@ function testReSetPrefixes() {
for (let i = 0; i < prefixes.length; i++) {
do_check_false(wrappedProbe(pset, prefixes[i]));
}
checkContents(pset, secondPrefixes);
}
function testLargeSet() {
@ -131,6 +157,9 @@ function testLargeSet() {
doExpectedLookups(pset, arr, 1);
doRandomLookups(pset, arr, 1000);
checkContents(pset, arr);
}
function testTinySet() {
@ -141,10 +170,12 @@ function testTinySet() {
do_check_true(wrappedProbe(pset, 1));
do_check_false(wrappedProbe(pset, 100000));
checkContents(pset, prefixes);
prefixes = [];
pset.setPrefixes(prefixes, prefixes.length);
do_check_false(wrappedProbe(pset, 1));
checkContents(pset, prefixes);
}
let tests = [testBasicPset,

Просмотреть файл

@ -80,8 +80,6 @@ function testSimpleForward() {
// Make sure that a nested forward (a forward within a forward) causes
// the update to fail.
function testNestedForward() {
testFillDb(); // Make sure the db isn't empty
var add1Urls = [ "foo.com/a", "bar.com/c" ];
var add2Urls = [ "foo.com/b" ];
@ -203,8 +201,6 @@ function testValidMAC() {
// Test a simple update with an invalid message authentication code.
function testInvalidMAC() {
testFillDb(); // Make sure the db isn't empty
var addUrls = [ "foo.com/a", "foo.com/b", "bar.com/c" ];
var update = buildPhishingUpdate(
[
@ -224,8 +220,6 @@ function testInvalidMAC() {
// Test a simple update without a message authentication code, when it is
// expecting one.
function testNoMAC() {
testFillDb(); // Make sure the db isn't empty
var addUrls = [ "foo.com/a", "foo.com/b", "bar.com/c" ];
var update = buildPhishingUpdate(
[
@ -282,8 +276,6 @@ function testValidForwardMAC() {
// Test an update with a valid message authentication code, but with
// invalid MACs on the forwards.
function testInvalidForwardMAC() {
testFillDb(); // Make sure the db isn't empty
var add1Urls = [ "foo.com/a", "bar.com/c" ];
var add2Urls = [ "foo.com/b" ];
var add3Urls = [ "bar.com/d" ];
@ -323,8 +315,6 @@ function testInvalidForwardMAC() {
// Test an update with a valid message authentication code, but no MAC
// specified for sub-urls.
function testNoForwardMAC() {
testFillDb(); // Make sure the db isn't empty
var add1Urls = [ "foo.com/a", "bar.com/c" ];
var add2Urls = [ "foo.com/b" ];
var add3Urls = [ "bar.com/d" ];
@ -391,8 +381,6 @@ gAssertions.gotRekey = function(data, cb)
// Tests a rekey request.
function testRekey() {
testFillDb();
var addUrls = [ "foo.com/a", "foo.com/b", "bar.com/c" ];
var update = buildPhishingUpdate(
[
@ -457,6 +445,9 @@ function run_test()
testInvalidUrlForward,
testErrorUrlForward,
testMultipleTables,
testReset,
// XXX: we're currently "once MAC, always MAC",
// so any test not using a MAC must go above
testValidMAC,
testInvalidMAC,
testNoMAC,
@ -464,7 +455,6 @@ function run_test()
testInvalidForwardMAC,
testNoForwardMAC,
testRekey,
testReset,
]);
}

Просмотреть файл

@ -4,7 +4,6 @@ tail = tail_urlclassifier.js
[test_addsub.js]
[test_backoff.js]
[test_cleankeycache.js]
[test_dbservice.js]
[test_hashcompleter.js]
[test_partial.js]