Bug 904607: Add protocol parser for -digest256 lists (r=gcp).

This commit is contained in:
Monica Chew 2013-09-06 17:12:33 -07:00
Родитель 0e792ce8b7
Коммит 383e668837
21 изменённых файлов: 379 добавлений и 59 удалений

Просмотреть файл

@ -34,7 +34,7 @@ interface nsIURIClassifierCallback : nsISupports
interface nsIURIClassifier : nsISupports
{
/**
* Classify a Principal using it's URI, appId and InBrowserElement state.
* Classify a Principal using its URI.
*
* @param aPrincipal
* The principal that should be checked by the URI classifier.

Просмотреть файл

@ -15,9 +15,10 @@ namespace mozilla {
namespace safebrowsing {
/**
* Store the chunks as an array of uint32_t.
* XXX: We should optimize this further to compress the
* many consecutive numbers.
* Store the chunk numbers as an array of uint32_t. We need chunk numbers in
* order to ask for incremental updates from the server.
* XXX: We should optimize this further to compress the many consecutive
* numbers.
*/
class ChunkSet {
public:

Просмотреть файл

@ -199,7 +199,9 @@ Classifier::Check(const nsACString& aSpec, LookupResultArray& aResults)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_CHECK_TIME> timer;
// Get the set of fragments to look up.
// Get the set of fragments based on the url. This is necessary because we
// only look up at most 5 URLs per aSpec, even if aSpec has more than 5
// components.
nsTArray<nsCString> fragments;
nsresult rv = LookupCache::GetLookupFragments(aSpec, &fragments);
NS_ENSURE_SUCCESS(rv, rv);
@ -226,15 +228,16 @@ Classifier::Check(const nsACString& aSpec, LookupResultArray& aResults)
Completion hostKey;
rv = LookupCache::GetKey(fragments[i], &hostKey, mCryptoHash);
if (NS_FAILED(rv)) {
// Local host on the network
// Local host on the network.
continue;
}
#if DEBUG && defined(PR_LOGGING)
if (LOG_ENABLED()) {
nsAutoCString checking;
lookupHash.ToString(checking);
LOG(("Checking %s (%X)", checking.get(), lookupHash.ToUint32()));
lookupHash.ToHexString(checking);
LOG(("Checking fragment %s, hash %s (%X)", fragments[i].get(),
checking.get(), lookupHash.ToUint32()));
}
#endif
for (uint32_t i = 0; i < cacheArray.Length(); i++) {
@ -542,8 +545,7 @@ nsresult
Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
const nsACString& aTable)
{
LOG(("Classifier::ApplyTableUpdates(%s)",
PromiseFlatCString(aTable).get()));
LOG(("Classifier::ApplyTableUpdates(%s)", PromiseFlatCString(aTable).get()));
nsAutoPtr<HashStore> store(new HashStore(aTable, mStoreDirectory));
@ -567,6 +569,7 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
}
if (!validupdates) {
// This can happen if the update was only valid for one table.
return NS_OK;
}

Просмотреть файл

@ -3,6 +3,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// This header file defines the storage types of the actual safebrowsing
// chunk data, which may be either 32-bit hashes or complete 256-bit hashes.
// Chunk numbers are represented in ChunkSet.h.
#ifndef SBEntries_h__
#define SBEntries_h__
@ -21,6 +25,7 @@ namespace safebrowsing {
#define PREFIX_SIZE 4
#define COMPLETE_SIZE 32
// This is the struct that contains 4-byte hash prefixes.
template <uint32_t S, class Comparator>
struct SafebrowsingHash
{
@ -82,6 +87,19 @@ struct SafebrowsingHash
PL_Base64Encode((char*)buf, sHashSize, aStr.BeginWriting());
aStr.BeginWriting()[len] = '\0';
}
void ToHexString(nsACString& aStr) const {
static const char* const lut = "0123456789ABCDEF";
// 32 bytes is the longest hash
size_t len = 32;
aStr.SetCapacity(2 * len);
for (size_t i = 0; i < len; ++i) {
const char c = static_cast<const char>(buf[i]);
aStr.Append(lut[(c >> 4) & 0x0F]);
aStr.Append(lut[c & 15]);
}
}
#endif
uint32_t ToUint32() const {
return *((uint32_t*)buf);
@ -105,6 +123,7 @@ public:
}
}
};
// Use this for 4-byte hashes
typedef SafebrowsingHash<PREFIX_SIZE, PrefixComparator> Prefix;
typedef nsTArray<Prefix> PrefixArray;
@ -114,15 +133,19 @@ public:
return memcmp(a, b, COMPLETE_SIZE);
}
};
// Use this for 32-byte hashes
typedef SafebrowsingHash<COMPLETE_SIZE, CompletionComparator> Completion;
typedef nsTArray<Completion> CompletionArray;
struct AddPrefix {
// The truncated hash.
Prefix prefix;
// The chunk number to which it belongs.
uint32_t addChunk;
AddPrefix() : addChunk(0) {}
// Returns the chunk number.
uint32_t Chunk() const { return addChunk; }
const Prefix &PrefixHash() const { return prefix; }
@ -137,21 +160,20 @@ struct AddPrefix {
};
struct AddComplete {
union {
Prefix prefix;
Completion complete;
} hash;
Completion complete;
uint32_t addChunk;
AddComplete() : addChunk(0) {}
uint32_t Chunk() const { return addChunk; }
const Prefix &PrefixHash() const { return hash.prefix; }
const Completion &CompleteHash() const { return hash.complete; }
// The 4-byte prefix of the sha256 hash.
uint32_t ToUint32() const { return complete.ToUint32(); }
// The 32-byte sha256 hash.
const Completion &CompleteHash() const { return complete; }
template<class T>
int Compare(const T& other) const {
int cmp = hash.complete.Compare(other.CompleteHash());
int cmp = complete.Compare(other.CompleteHash());
if (cmp != 0) {
return cmp;
}
@ -160,8 +182,11 @@ struct AddComplete {
};
struct SubPrefix {
// The hash to subtract.
Prefix prefix;
// The chunk number of the add chunk to which the hash belonged.
uint32_t addChunk;
// The chunk number of this sub chunk.
uint32_t subChunk;
SubPrefix(): addChunk(0), subChunk(0) {}
@ -171,6 +196,7 @@ struct SubPrefix {
const Prefix &PrefixHash() const { return prefix; }
template<class T>
// Returns 0 if and only if the chunks are the same in every way.
int Compare(const T& aOther) const {
int cmp = prefix.Compare(aOther.PrefixHash());
if (cmp != 0)
@ -182,7 +208,9 @@ struct SubPrefix {
template<class T>
int CompareAlt(const T& aOther) const {
int cmp = prefix.Compare(aOther.PrefixHash());
Prefix other;
other.FromUint32(aOther.ToUint32());
int cmp = prefix.Compare(other);
if (cmp != 0)
return cmp;
return addChunk - aOther.addChunk;
@ -190,10 +218,7 @@ struct SubPrefix {
};
struct SubComplete {
union {
Prefix prefix;
Completion complete;
} hash;
Completion complete;
uint32_t addChunk;
uint32_t subChunk;
@ -201,11 +226,12 @@ struct SubComplete {
uint32_t Chunk() const { return subChunk; }
uint32_t AddChunk() const { return addChunk; }
const Prefix &PrefixHash() const { return hash.prefix; }
const Completion &CompleteHash() const { return hash.complete; }
const Completion &CompleteHash() const { return complete; }
// The 4-byte prefix of the sha256 hash.
uint32_t ToUint32() const { return complete.ToUint32(); }
int Compare(const SubComplete& aOther) const {
int cmp = hash.complete.Compare(aOther.hash.complete);
int cmp = complete.Compare(aOther.complete);
if (cmp != 0)
return cmp;
if (addChunk != aOther.addChunk)

Просмотреть файл

@ -146,7 +146,7 @@ TableUpdate::NewAddComplete(uint32_t aAddChunk, const Completion& aHash)
{
AddComplete *add = mAddCompletes.AppendElement();
add->addChunk = aAddChunk;
add->hash.complete = aHash;
add->complete = aHash;
}
void
@ -154,7 +154,7 @@ TableUpdate::NewSubComplete(uint32_t aAddChunk, const Completion& aHash, uint32_
{
SubComplete *sub = mSubCompletes.AppendElement();
sub->addChunk = aAddChunk;
sub->hash.complete = aHash;
sub->complete = aHash;
sub->subChunk = aSubChunk;
}
@ -323,6 +323,8 @@ HashStore::CalculateChecksum(nsAutoCString& aChecksum,
// Size of MD5 hash in bytes
const uint32_t CHECKSUM_SIZE = 16;
// MD5 is not a secure hash function, but since this is a filesystem integrity
// check, this usage is ok.
rv = hash->Init(nsICryptoHash::MD5);
NS_ENSURE_SUCCESS(rv, rv);
@ -362,9 +364,7 @@ HashStore::UpdateHeader()
nsresult
HashStore::ReadChunkNumbers()
{
if (!mInputStream) {
return NS_OK;
}
NS_ENSURE_STATE(mInputStream);
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET,
@ -385,6 +385,8 @@ nsresult
HashStore::ReadHashes()
{
if (!mInputStream) {
// BeginUpdate has been called but Open hasn't initialized mInputStream,
// because the existing HashStore is empty.
return NS_OK;
}
@ -819,14 +821,14 @@ HashStore::WriteFile()
rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written);
NS_ENSURE_SUCCESS(rv, rv);
// Write chunk numbers...
// Write chunk numbers.
rv = mAddChunks.Write(out);
NS_ENSURE_SUCCESS(rv, rv);
rv = mSubChunks.Write(out);
NS_ENSURE_SUCCESS(rv, rv);
// Write hashes..
// Write hashes.
rv = WriteAddPrefixes(out);
NS_ENSURE_SUCCESS(rv, rv);
@ -1002,7 +1004,7 @@ HashStore::ProcessSubs()
// Remove any remaining subbed prefixes from both addprefixes
// and addcompletes.
KnockoutSubs(&mSubPrefixes, &mAddPrefixes);
KnockoutSubs(&mSubPrefixes, &mAddPrefixes);
KnockoutSubs(&mSubCompletes, &mAddCompletes);
// Remove any remaining subprefixes referring to addchunks that

Просмотреть файл

@ -17,6 +17,9 @@
namespace mozilla {
namespace safebrowsing {
// A table update is built from a single update chunk from the server. As the
// protocol parser processes each chunk, it constructs a table update with the
// new hashes.
class TableUpdate {
public:
TableUpdate(const nsACString& aTable)
@ -34,6 +37,8 @@ public:
mSubCompletes.Length() == 0;
}
// Throughout, uint32_t aChunk refers only to the chunk number. Chunk data is
// stored in the Prefix structures.
void NewAddChunk(uint32_t aChunk) { mAddChunks.Set(aChunk); }
void NewSubChunk(uint32_t aChunk) { mSubChunks.Set(aChunk); }
@ -42,6 +47,7 @@ public:
void NewAddPrefix(uint32_t aAddChunk, const Prefix& aPrefix);
void NewSubPrefix(uint32_t aAddChunk, const Prefix& aPrefix, uint32_t aSubChunk);
void NewAddComplete(uint32_t aChunk, const Completion& aCompletion);
void NewSubComplete(uint32_t aAddChunk, const Completion& aCompletion,
uint32_t aSubChunk);
@ -51,9 +57,11 @@ public:
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
// Expirations for chunks.
ChunkSet& AddExpirations() { return mAddExpirations; }
ChunkSet& SubExpirations() { return mSubExpirations; }
// Hashes associated with this chunk.
AddPrefixArray& AddPrefixes() { return mAddPrefixes; }
SubPrefixArray& SubPrefixes() { return mSubPrefixes; }
AddCompleteArray& AddCompletes() { return mAddCompletes; }
@ -64,16 +72,22 @@ private:
// Update not from the remote server (no freshness)
bool mLocalUpdate;
// The list of chunk numbers that we have for each of the type of chunks.
ChunkSet mAddChunks;
ChunkSet mSubChunks;
ChunkSet mAddExpirations;
ChunkSet mSubExpirations;
// 4-byte sha256 prefixes.
AddPrefixArray mAddPrefixes;
SubPrefixArray mSubPrefixes;
// 32-byte hashes.
AddCompleteArray mAddCompletes;
SubCompleteArray mSubCompletes;
};
// There is one hash store per table.
class HashStore {
public:
HashStore(const nsACString& aTableName, nsIFile* aStoreFile);
@ -82,6 +96,11 @@ public:
const nsCString& TableName() const { return mTableName; }
nsresult Open();
// Add Prefixes are stored partly in the PrefixSet (contains the
// Prefix data organized for fast lookup/low RAM usage) and partly in the
// HashStore (Add Chunk numbers - only used for updates, slow retrieval).
// AugmentAdds function joins the separate datasets into one complete
// prefixes+chunknumbers dataset.
nsresult AugmentAdds(const nsTArray<uint32_t>& aPrefixes);
ChunkSet& AddChunks() { return mAddChunks; }
@ -126,6 +145,7 @@ private:
nsresult ReadChunkNumbers();
nsresult ReadHashes();
nsresult ReadAddPrefixes();
nsresult ReadSubPrefixes();
@ -134,6 +154,8 @@ private:
nsresult ProcessSubs();
// This is used for checking that the database is correct and for figuring out
// the number of chunks, etc. to read from disk on restart.
struct Header {
uint32_t magic;
uint32_t version;
@ -147,6 +169,8 @@ private:
Header mHeader;
// The name of the table (must end in -shavar or -digest256, or evidently
// -simple for unittesting.
nsCString mTableName;
nsCOMPtr<nsIFile> mStoreDirectory;
@ -154,19 +178,23 @@ private:
nsCOMPtr<nsIInputStream> mInputStream;
// Chunk numbers, stored as uint32_t arrays.
ChunkSet mAddChunks;
ChunkSet mSubChunks;
ChunkSet mAddExpirations;
ChunkSet mSubExpirations;
// Chunk data for shavar tables. See Entries.h for format.
AddPrefixArray mAddPrefixes;
AddCompleteArray mAddCompletes;
SubPrefixArray mSubPrefixes;
// See bug 806422 for background. We must be able to distinguish between
// updates from the completion server and updates from the regular server.
AddCompleteArray mAddCompletes;
SubCompleteArray mSubCompletes;
};
}
}
#endif

Просмотреть файл

@ -515,7 +515,7 @@ LookupCache::GetLookupFragments(const nsACString& aSpec,
key.Assign(hosts[hostIndex]);
key.Append('/');
key.Append(paths[pathIndex]);
LOG(("Chking %s", key.get()));
LOG(("Checking fragment %s", key.get()));
aFragments->AppendElement(key);
}

Просмотреть файл

@ -222,6 +222,7 @@ ProtocolParser::ProcessControl(bool* aDone)
rv = ProcessMAC(line);
NS_ENSURE_SUCCESS(rv, rv);
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("i:"))) {
// Set the table name from the table header line.
SetCurrentTable(Substring(line, 2));
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("n:"))) {
if (PR_sscanf(line.get(), "n:%d", &mUpdateWait) != 1) {
@ -330,12 +331,30 @@ ProtocolParser::ProcessChunkControl(const nsCString& aLine)
return NS_ERROR_FAILURE;
}
mChunkState.type = (command == 'a') ? CHUNK_ADD : CHUNK_SUB;
if (mChunkState.type == CHUNK_ADD) {
mTableUpdate->NewAddChunk(mChunkState.num);
} else {
mTableUpdate->NewSubChunk(mChunkState.num);
if (StringEndsWith(mTableUpdate->TableName(),
NS_LITERAL_CSTRING("-shavar")) ||
StringEndsWith(mTableUpdate->TableName(),
NS_LITERAL_CSTRING("-simple"))) {
// Accommodate test tables ending in -simple for now.
mChunkState.type = (command == 'a') ? CHUNK_ADD : CHUNK_SUB;
} else if (StringEndsWith(mTableUpdate->TableName(),
NS_LITERAL_CSTRING("-digest256"))) {
LOG(("Processing digest256 data"));
mChunkState.type = (command == 'a') ? CHUNK_ADD_DIGEST : CHUNK_SUB_DIGEST;
}
switch (mChunkState.type) {
case CHUNK_ADD:
mTableUpdate->NewAddChunk(mChunkState.num);
break;
case CHUNK_SUB:
mTableUpdate->NewSubChunk(mChunkState.num);
break;
case CHUNK_ADD_DIGEST:
mTableUpdate->NewAddChunk(mChunkState.num);
break;
case CHUNK_SUB_DIGEST:
mTableUpdate->NewSubChunk(mChunkState.num);
break;
}
return NS_OK;
@ -406,11 +425,15 @@ ProtocolParser::ProcessChunk(bool* aDone)
mState = PROTOCOL_STATE_CONTROL;
//LOG(("Handling a %d-byte chunk", chunk.Length()));
if (StringEndsWith(mTableUpdate->TableName(), NS_LITERAL_CSTRING("-shavar"))) {
if (StringEndsWith(mTableUpdate->TableName(),
NS_LITERAL_CSTRING("-shavar"))) {
return ProcessShaChunk(chunk);
} else {
return ProcessPlaintextChunk(chunk);
}
if (StringEndsWith(mTableUpdate->TableName(),
NS_LITERAL_CSTRING("-digest256"))) {
return ProcessDigestChunk(chunk);
}
return ProcessPlaintextChunk(chunk);
}
/**
@ -507,6 +530,61 @@ ProtocolParser::ProcessShaChunk(const nsACString& aChunk)
return NS_OK;
}
nsresult
ProtocolParser::ProcessDigestChunk(const nsACString& aChunk)
{
if (mChunkState.type == CHUNK_ADD_DIGEST) {
return ProcessDigestAdd(aChunk);
}
if (mChunkState.type == CHUNK_SUB_DIGEST) {
return ProcessDigestSub(aChunk);
}
return NS_ERROR_UNEXPECTED;
}
nsresult
ProtocolParser::ProcessDigestAdd(const nsACString& aChunk)
{
// The ABNF format for add chunks is (HASH)+, where HASH is 32 bytes.
MOZ_ASSERT(aChunk.Length() % 32 == 0,
"Chunk length in bytes must be divisible by 4");
uint32_t start = 0;
while (start < aChunk.Length()) {
Completion hash;
hash.Assign(Substring(aChunk, start, COMPLETE_SIZE));
start += COMPLETE_SIZE;
mTableUpdate->NewAddComplete(mChunkState.num, hash);
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessDigestSub(const nsACString& aChunk)
{
// The ABNF format for sub chunks is (ADDCHUNKNUM HASH)+, where ADDCHUNKNUM
// is a 4 byte chunk number, and HASH is 32 bytes.
MOZ_ASSERT(aChunk.Length() % 36 == 0,
"Chunk length in bytes must be divisible by 36");
uint32_t start = 0;
while (start < aChunk.Length()) {
// Read ADDCHUNKNUM
const nsCSubstring& addChunkStr = Substring(aChunk, start, 4);
start += 4;
uint32_t addChunk;
memcpy(&addChunk, addChunkStr.BeginReading(), 4);
addChunk = PR_ntohl(addChunk);
// Read the hash
Completion hash;
hash.Assign(Substring(aChunk, start, COMPLETE_SIZE));
start += COMPLETE_SIZE;
mTableUpdate->NewSubComplete(addChunk, hash, mChunkState.num);
}
return NS_OK;
}
nsresult
ProtocolParser::ProcessHostAdd(const Prefix& aDomain, uint8_t aNumEntries,
const nsACString& aChunk, uint32_t* aStart)
@ -590,6 +668,7 @@ ProtocolParser::ProcessHostAddComplete(uint8_t aNumEntries,
if (aNumEntries == 0) {
// this is totally comprehensible.
// My sarcasm detector is going off!
NS_WARNING("Expected > 0 entries for a 32-byte hash add.");
return NS_OK;
}
@ -684,5 +763,5 @@ ProtocolParser::GetTableUpdate(const nsACString& aTable)
return update;
}
}
}
} // namespace safebrowsing
} // namespace mozilla

Просмотреть файл

@ -59,6 +59,7 @@ private:
nsresult ProcessForward(const nsCString& aLine);
nsresult AddForward(const nsACString& aUrl, const nsACString& aMac);
nsresult ProcessChunk(bool* done);
// Remove this, it's only used for testing
nsresult ProcessPlaintextChunk(const nsACString& aChunk);
nsresult ProcessShaChunk(const nsACString& aChunk);
nsresult ProcessHostAdd(const Prefix& aDomain, uint8_t aNumEntries,
@ -69,6 +70,12 @@ private:
uint32_t *aStart);
nsresult ProcessHostSubComplete(uint8_t numEntries, const nsACString& aChunk,
uint32_t* start);
// Digest chunks are very similar to shavar chunks, except digest chunks
// always contain the full hash, so there is no need for chunk data to
// contain prefix sizes.
nsresult ProcessDigestChunk(const nsACString& aChunk);
nsresult ProcessDigestAdd(const nsACString& aChunk);
nsresult ProcessDigestSub(const nsACString& aChunk);
bool NextLine(nsACString& aLine);
void CleanupUpdates();
@ -80,8 +87,13 @@ private:
ParserState mState;
enum ChunkType {
// Types for shavar tables.
CHUNK_ADD,
CHUNK_SUB
CHUNK_SUB,
// Types for digest256 tables. digest256 tables differ in format from
// shavar tables since they only contain complete hashes.
CHUNK_ADD_DIGEST,
CHUNK_SUB_DIGEST
};
struct ChunkState {
@ -106,7 +118,9 @@ private:
bool mRekeyRequested;
nsTArray<ForwardedUpdate> mForwards;
// Keep track of updates to apply before passing them to the DBServiceWorkers.
nsTArray<TableUpdate*> mTableUpdates;
// Updates to apply to the current table being parsed.
TableUpdate *mTableUpdate;
};

Просмотреть файл

@ -3,6 +3,7 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This is the only implementation of nsIUrlListManager.
// A class that manages lists, namely white and black lists for
// phishing or malware protection. The ListManager knows how to fetch,
// update, and store lists.

Просмотреть файл

@ -79,9 +79,9 @@ interface nsIUrlClassifierUpdateObserver : nsISupports {
interface nsIUrlClassifierDBService : nsISupports
{
/**
* Looks up a key in the database.
* Looks up a URI in the database.
*
* @param key: The principal containing the information to search.
* @param principal: The principal containing the URI to search.
* @param c: The callback will be called with a comma-separated list
* of tables to which the key belongs.
*/

Просмотреть файл

@ -43,9 +43,11 @@ interface nsIUrlClassifierHashCompleterCallback : nsISupports
/**
* Clients updating the url-classifier database have the option of sending
* partial (32-bit) hashes of URL fragments to be blacklisted. If the
* url-classifier encounters one of these truncated hashes, it will ask
* an nsIUrlClassifierCompleter instance to asynchronously provide the
* complete hash, along with some associated metadata.
* url-classifier encounters one of these truncated hashes, it will ask an
* nsIUrlClassifierCompleter instance to asynchronously provide the complete
* hash, along with some associated metadata.
* This is only ever used for testing and should absolutely be deleted (I
* think).
*/
[scriptable, uuid(ade9b72b-3562-44f5-aba6-e63246be53ae)]
interface nsIUrlClassifierHashCompleter : nsISupports

Просмотреть файл

@ -23,6 +23,7 @@ interface nsIUrlClassifierStreamUpdater : nsISupports
* Try to download updates from updateUrl. Only one instance of this
* runs at a time, so we return false if another instance is already
* running.
* This is used in nsIUrlListManager as well as in testing.
* @param aRequestTables Comma-separated list of tables included in this
* update.
* @param aRequestBody The body for the request.

Просмотреть файл

@ -155,6 +155,7 @@ private:
nsCOMPtr<nsICryptoHash> mCryptoHash;
nsAutoPtr<Classifier> mClassifier;
// The class that actually parses the update chunks.
nsAutoPtr<ProtocolParser> mProtocolParser;
// Directory where to store the SB databases.
@ -458,6 +459,7 @@ nsUrlClassifierDBServiceWorker::BeginUpdate(nsIUrlClassifierUpdateObserver *obse
return NS_OK;
}
// Called from the stream updater.
NS_IMETHODIMP
nsUrlClassifierDBServiceWorker::BeginStream(const nsACString &table,
const nsACString &serverMAC)
@ -539,6 +541,7 @@ nsUrlClassifierDBServiceWorker::UpdateStream(const nsACString& chunk)
HandlePendingLookups();
// Feed the chunk to the parser.
return mProtocolParser->AppendStream(chunk);
}
@ -719,9 +722,9 @@ nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
if (activeTable) {
TableUpdate * tu = pParse->GetTableUpdate(resultsPtr->ElementAt(i).table);
LOG(("CacheCompletion Addchunk %d hash %X", resultsPtr->ElementAt(i).entry.addChunk,
resultsPtr->ElementAt(i).entry.hash.prefix));
resultsPtr->ElementAt(i).entry.ToUint32()));
tu->NewAddComplete(resultsPtr->ElementAt(i).entry.addChunk,
resultsPtr->ElementAt(i).entry.hash.complete);
resultsPtr->ElementAt(i).entry.complete);
tu->NewAddChunk(resultsPtr->ElementAt(i).entry.addChunk);
tu->SetLocalUpdate();
updates.AppendElement(tu);
@ -919,7 +922,7 @@ nsUrlClassifierLookupCallback::Completion(const nsACString& completeHash,
if (verified) {
CacheResult result;
result.entry.addChunk = chunkId;
result.entry.hash.complete = hash;
result.entry.complete = hash;
result.table = tableName;
// OK if this fails, we just won't cache the item.
@ -1300,6 +1303,7 @@ nsUrlClassifierDBService::LookupURI(nsIPrincipal* aPrincipal,
rv = mWorker->QueueLookup(key, proxyCallback);
NS_ENSURE_SUCCESS(rv, rv);
// This seems to just call HandlePendingLookups.
return mWorkerProxy->Lookup(nullptr, nullptr);
}

Просмотреть файл

@ -70,7 +70,8 @@ private:
// Disallow copy constructor
nsUrlClassifierDBService(nsUrlClassifierDBService&);
nsresult LookupURI(nsIPrincipal* aPrincipal, nsIUrlClassifierCallback* c,
nsresult LookupURI(nsIPrincipal* aPrincipal,
nsIUrlClassifierCallback* c,
bool forceCheck, bool *didCheck);
// Close db connection and join the background thread if it exists.

Просмотреть файл

@ -27,6 +27,8 @@ static const PRLogModuleInfo *gUrlClassifierStreamUpdaterLog = nullptr;
#endif
// This class does absolutely nothing, except pass requests onto the DBService.
///////////////////////////////////////////////////////////////////////////////
// nsIUrlClassiferStreamUpdater implementation
// Handles creating/running the stream listener
@ -107,6 +109,7 @@ nsUrlClassifierStreamUpdater::FetchUpdate(nsIURI *aUpdateUrl,
mBeganStream = false;
// If aRequestBody is empty, construct it for the test.
if (!aRequestBody.IsEmpty()) {
rv = AddRequestBody(aRequestBody);
NS_ENSURE_SUCCESS(rv, rv);
@ -114,6 +117,7 @@ nsUrlClassifierStreamUpdater::FetchUpdate(nsIURI *aUpdateUrl,
// Set the appropriate content type for file/data URIs, for unit testing
// purposes.
// This is only used for testing and should be deleted.
bool match;
if ((NS_SUCCEEDED(aUpdateUrl->SchemeIs("file", &match)) && match) ||
(NS_SUCCEEDED(aUpdateUrl->SchemeIs("data", &match)) && match)) {
@ -214,8 +218,9 @@ nsUrlClassifierStreamUpdater::DownloadUpdates(
mUpdateUrl->GetAsciiSpec(urlSpec);
LOG(("FetchUpdate: %s", urlSpec.get()));
//LOG(("requestBody: %s", aRequestBody.get()));
//LOG(("requestBody: %s", aRequestBody.Data()));
LOG(("Calling into FetchUpdate"));
return FetchUpdate(mUpdateUrl, aRequestBody, EmptyCString(), EmptyCString());
}
@ -238,6 +243,9 @@ nsUrlClassifierStreamUpdater::UpdateUrlRequested(const nsACString &aUrl,
StringBeginsWith(aUrl, NS_LITERAL_CSTRING("file:"))) {
update->mUrl = aUrl;
} else {
// This must be fixed when bug 783047 is fixed. However, for unittesting
// update urls to localhost should use http, not https (otherwise the
// connection will fail silently, since there will be no cert available).
update->mUrl = NS_LITERAL_CSTRING("http://") + aUrl;
}
update->mTable = aTable;
@ -418,6 +426,7 @@ nsUrlClassifierStreamUpdater::OnStartRequest(nsIRequest *request,
uint32_t requestStatus;
rv = httpChannel->GetResponseStatus(&requestStatus);
LOG(("HTTP request returned failure code: %d.", requestStatus));
NS_ENSURE_SUCCESS(rv, rv);
strStatus.AppendInt(requestStatus);
@ -462,7 +471,6 @@ nsUrlClassifierStreamUpdater::OnDataAvailable(nsIRequest *request,
NS_ENSURE_SUCCESS(rv, rv);
//LOG(("Chunk (%d): %s\n\n", chunk.Length(), chunk.get()));
rv = mDBService->UpdateStream(chunk);
NS_ENSURE_SUCCESS(rv, rv);

Просмотреть файл

@ -52,15 +52,18 @@ private:
nsresult AddRequestBody(const nsACString &aRequestBody);
// Fetches an update for a single table.
nsresult FetchUpdate(nsIURI *aURI,
const nsACString &aRequestBody,
const nsACString &aTable,
const nsACString &aServerMAC);
// Dumb wrapper so we don't have to create URIs.
nsresult FetchUpdate(const nsACString &aURI,
const nsACString &aRequestBody,
const nsACString &aTable,
const nsACString &aServerMAC);
// Fetches the next table, from mPendingUpdates.
nsresult FetchNext();
bool mIsUpdating;

Двоичные данные
toolkit/components/url-classifier/tests/unit/data/digest1.chunk Normal file

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,2 @@
a:5:32:32
<EFBFBD><EFBFBD>_H<EFBFBD>^<5E>a<EFBFBD>7<EFBFBD><37>]<5D>=#<23>nm<6E><6D><EFBFBD><EFBFBD>n<EFBFBD><6E>o<EFBFBD><6F>Q<EFBFBD>

Просмотреть файл

@ -0,0 +1,144 @@
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
XPCOMUtils.defineLazyModuleGetter(this, "NetUtil",
"resource://gre/modules/NetUtil.jsm");
XPCOMUtils.defineLazyModuleGetter(this, "Promise",
"resource://gre/modules/Promise.jsm");
// Global test server for serving safebrowsing updates.
let gHttpServ = null;
// Global nsIUrlClassifierDBService
let gDbService = Cc["@mozilla.org/url-classifier/dbservice;1"]
.getService(Ci.nsIUrlClassifierDBService);
// Security manager for creating nsIPrincipals from URIs
let gSecMan = Cc["@mozilla.org/scriptsecuritymanager;1"]
.getService(Ci.nsIScriptSecurityManager);
// A map of tables to arrays of update redirect urls.
let gTables = {};
// Construct an update from a file.
function readFileToString(aFilename) {
let f = do_get_file(aFilename);
let stream = Cc["@mozilla.org/network/file-input-stream;1"]
.createInstance(Ci.nsIFileInputStream);
stream.init(f, -1, 0, 0);
let buf = NetUtil.readInputStreamToString(stream, stream.available());
return buf;
}
// Registers a table for which to serve update chunks. Returns a promise that
// resolves when that chunk has been downloaded.
function registerTableUpdate(aTable, aFilename) {
let deferred = Promise.defer();
// If we haven't been given an update for this table yet, add it to the map
if (!(aTable in gTables)) {
gTables[aTable] = [];
}
// The number of chunks associated with this table.
let numChunks = gTables[aTable].length + 1;
let redirectPath = "/" + aTable + "-" + numChunks;
let redirectUrl = "localhost:4444" + redirectPath;
// Store redirect url for that table so we can return it later when we
// process an update request.
gTables[aTable].push(redirectUrl);
gHttpServ.registerPathHandler(redirectPath, function(request, response) {
do_print("Mock safebrowsing server handling request for " + redirectPath);
let contents = readFileToString(aFilename);
response.setHeader("Content-Type",
"application/vnd.google.safebrowsing-update", false);
response.setStatusLine(request.httpVersion, 200, "OK");
response.bodyOutputStream.write(contents, contents.length);
deferred.resolve(contents);
});
return deferred.promise;
}
// Construct a response with redirect urls.
function processUpdateRequest() {
let response = "n:1000\n";
for (let table in gTables) {
response += "i:" + table + "\n";
for (let i = 0; i < gTables[table].length; ++i) {
response += "u:" + gTables[table][i] + "\n";
}
}
do_print("Returning update response: " + response);
return response;
}
// Set up our test server to handle update requests.
function run_test() {
gHttpServ = new HttpServer();
gHttpServ.registerDirectory("/", do_get_cwd());
gHttpServ.registerPathHandler("/downloads", function(request, response) {
let buf = NetUtil.readInputStreamToString(request.bodyInputStream,
request.bodyInputStream.available());
let blob = processUpdateRequest();
response.setHeader("Content-Type",
"application/vnd.google.safebrowsing-update", false);
response.setStatusLine(request.httpVersion, 200, "OK");
response.bodyOutputStream.write(blob, blob.length);
});
gHttpServ.start(4444);
run_next_test();
}
function createURI(s) {
let service = Cc["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService);
return service.newURI(s, null, null);
}
// Just throw if we ever get an update or download error.
function handleError(aEvent) {
do_throw("We didn't download or update correctly: " + aEvent);
}
add_test(function test_update() {
let streamUpdater = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
streamUpdater.updateUrl = "http://localhost:4444/downloads";
// Load up some update chunks for the safebrowsing server to serve.
registerTableUpdate("goog-downloadwhite-digest256", "data/digest1.chunk");
registerTableUpdate("goog-downloadwhite-digest256", "data/digest2.chunk");
// Download some updates, and don't continue until the downloads are done.
function updateSuccess(aEvent) {
// Timeout of n:1000 is constructed in processUpdateRequest above and
// passed back in the callback in nsIUrlClassifierStreamUpdater on success.
do_check_eq("1000", aEvent);
do_print("All data processed");
run_next_test();
}
streamUpdater.downloadUpdates(
"goog-downloadwhite-digest256",
"goog-downloadwhite-digest256;\n", "",
updateSuccess, handleError, handleError);
});
add_test(function test_url_not_whitelisted() {
let uri = createURI("http://example.com");
let principal = gSecMan.getNoAppCodebasePrincipal(uri);
gDbService.lookup(principal, function handleEvent(aEvent) {
// This URI is not on any lists.
do_check_eq("", aEvent);
run_next_test();
});
});
add_test(function test_url_whitelisted() {
// Hash of "whitelisted.com/" (canonicalized URL) is:
// 93CA5F48E15E9861CD37C2D95DB43D23CC6E6DE5C3F8FA6E8BE66F97CC518907
let uri = createURI("http://whitelisted.com");
let principal = gSecMan.getNoAppCodebasePrincipal(uri);
gDbService.lookup(principal, function handleEvent(aEvent) {
do_check_eq("goog-downloadwhite-digest256", aEvent);
run_next_test();
});
});

Просмотреть файл

@ -11,3 +11,4 @@ skip-if = os == "mac" || os == "linux"
[test_partial.js]
[test_prefixset.js]
[test_streamupdater.js]
[test_digest256.js]