--- a/toolkit/components/url-classifier/Classifier.cpp
+++ b/toolkit/components/url-classifier/Classifier.cpp
@@ -439,17 +439,16 @@ Classifier::TableRequest(nsACString& aRe
// Update the TableRequest result in-memory cache.
mTableRequestResult = aResult;
mIsTableRequestResultOutdated = false;
}
nsresult
Classifier::Check(const nsACString& aSpec,
const nsACString& aTables,
- uint32_t aFreshnessGuarantee,
LookupResultArray& aResults)
{
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_CHECK_TIME> timer;
// Get the set of fragments based on the url. This is necessary because we
// only look up at most 5 URLs per aSpec, even if aSpec has more than 5
// components.
nsTArray<nsCString> fragments;
@@ -491,21 +490,20 @@ Classifier::Check(const nsACString& aSpe
nsAutoCString checking;
lookupHash.ToHexString(checking);
LOG(("Checking fragment %s, hash %s (%X)", fragments[i].get(),
checking.get(), lookupHash.ToUint32()));
}
for (uint32_t i = 0; i < cacheArray.Length(); i++) {
LookupCache *cache = cacheArray[i];
- bool has, fromCache, confirmed;
+ bool has, confirmed;
uint32_t matchLength;
- rv = cache->Has(lookupHash, mTableFreshness, aFreshnessGuarantee,
- &has, &matchLength, &confirmed, &fromCache);
+ rv = cache->Has(lookupHash, &has, &matchLength, &confirmed);
NS_ENSURE_SUCCESS(rv, rv);
if (has) {
LookupResult *result = aResults.AppendElement();
if (!result)
return NS_ERROR_OUT_OF_MEMORY;
LOG(("Found a result in %s: %s",
@@ -1235,17 +1233,17 @@ Classifier::UpdateHashStore(nsTArray<Tab
// Read the part of the store that is (only) in the cache
LookupCacheV2* lookupCache =
LookupCache::Cast<LookupCacheV2>(GetLookupCacheForUpdate(store.TableName()));
if (!lookupCache) {
return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND;
}
// Clear cache when update
- lookupCache->ClearCache();
+ lookupCache->InvalidateExpiredCacheEntries();
FallibleTArray<uint32_t> AddPrefixHashes;
rv = lookupCache->GetPrefixes(AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
rv = store.AugmentAdds(AddPrefixHashes);
NS_ENSURE_SUCCESS(rv, rv);
AddPrefixHashes.Clear();
@@ -1331,17 +1329,17 @@ Classifier::UpdateTableV4(nsTArray<Table
LookupCache::Cast<LookupCacheV4>(GetLookupCacheForUpdate(aTable));
if (!lookupCache) {
return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND;
}
// Remove cache entries whose negative cache time is expired when update.
// We don't check if positive cache time is expired here because we want to
// keep the eviction rule simple when doing an update.
- lookupCache->InvalidateExpiredCacheEntry();
+ lookupCache->InvalidateExpiredCacheEntries();
nsresult rv = NS_OK;
// If there are multiple updates for the same table, prefixes1 & prefixes2
// will act as input and output in turn to reduce memory copy overhead.
PrefixStringMap prefixes1, prefixes2;
PrefixStringMap* input = &prefixes1;
PrefixStringMap* output = &prefixes2;
@@ -1426,17 +1424,18 @@ Classifier::UpdateCache(TableUpdate* aUp
LookupCache *lookupCache = GetLookupCache(table);
if (!lookupCache) {
return NS_ERROR_FAILURE;
}
auto lookupV2 = LookupCache::Cast<LookupCacheV2>(lookupCache);
if (lookupV2) {
auto updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
- lookupV2->AddCompletionsToCache(updateV2->AddCompletes());
+ lookupV2->AddGethashResultToCache(updateV2->AddCompletes(),
+ updateV2->MissPrefixes());
} else {
auto lookupV4 = LookupCache::Cast<LookupCacheV4>(lookupCache);
if (!lookupV4) {
return NS_ERROR_FAILURE;
}
auto updateV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
lookupV4->AddFullHashResponseToCache(updateV4->FullHashResponse());
--- a/toolkit/components/url-classifier/Classifier.h
+++ b/toolkit/components/url-classifier/Classifier.h
@@ -55,17 +55,16 @@ public:
*/
nsresult ActiveTables(nsTArray<nsCString>& aTables);
/**
* Check a URL against the specified tables.
*/
nsresult Check(const nsACString& aSpec,
const nsACString& tables,
- uint32_t aFreshnessGuarantee,
LookupResultArray& aResults);
/**
* Asynchronously apply updates to the in-use databases. When the
* update is complete, the caller can be notified by |aCallback|, which
* will occur on the caller thread. Note that the ownership of
* |aUpdates| will be transferred. This design is inherited from the
* previous sync update function (ApplyUpdates) which has been removed.
--- a/toolkit/components/url-classifier/Entries.h
+++ b/toolkit/components/url-classifier/Entries.h
@@ -257,16 +257,17 @@ struct SubComplete {
return subChunk - aOther.subChunk;
}
};
typedef FallibleTArray<AddPrefix> AddPrefixArray;
typedef FallibleTArray<AddComplete> AddCompleteArray;
typedef FallibleTArray<SubPrefix> SubPrefixArray;
typedef FallibleTArray<SubComplete> SubCompleteArray;
+typedef FallibleTArray<Prefix> MissPrefixArray;
/**
* Compares chunks by their add chunk, then their prefix.
*/
template<class T>
class EntryCompare {
public:
typedef T elem_type;
--- a/toolkit/components/url-classifier/HashStore.cpp
+++ b/toolkit/components/url-classifier/HashStore.cpp
@@ -144,16 +144,24 @@ TableUpdateV2::NewSubComplete(uint32_t a
SubComplete *sub = mSubCompletes.AppendElement(fallible);
if (!sub) return NS_ERROR_OUT_OF_MEMORY;
sub->addChunk = aAddChunk;
sub->complete = aHash;
sub->subChunk = aSubChunk;
return NS_OK;
}
+nsresult
+TableUpdateV2::NewMissPrefix(const Prefix& aPrefix)
+{
+ Prefix *prefix = mMissPrefixes.AppendElement(aPrefix, fallible);
+ if (!prefix) return NS_ERROR_OUT_OF_MEMORY;
+ return NS_OK;
+}
+
void
TableUpdateV4::NewPrefixes(int32_t aSize, std::string& aPrefixes)
{
NS_ENSURE_TRUE_VOID(aPrefixes.size() % aSize == 0);
NS_ENSURE_TRUE_VOID(!mPrefixesMap.Get(aSize));
if (LOG_ENABLED() && 4 == aSize) {
int numOfPrefixes = aPrefixes.size() / 4;
--- a/toolkit/components/url-classifier/HashStore.h
+++ b/toolkit/components/url-classifier/HashStore.h
@@ -60,17 +60,18 @@ public:
bool Empty() const override {
return mAddChunks.Length() == 0 &&
mSubChunks.Length() == 0 &&
mAddExpirations.Length() == 0 &&
mSubExpirations.Length() == 0 &&
mAddPrefixes.Length() == 0 &&
mSubPrefixes.Length() == 0 &&
mAddCompletes.Length() == 0 &&
- mSubCompletes.Length() == 0;
+ mSubCompletes.Length() == 0 &&
+ mMissPrefixes.Length() == 0;
}
// Throughout, uint32_t aChunk refers only to the chunk number. Chunk data is
// stored in the Prefix structures.
MOZ_MUST_USE nsresult NewAddChunk(uint32_t aChunk) {
return mAddChunks.Set(aChunk);
};
MOZ_MUST_USE nsresult NewSubChunk(uint32_t aChunk) {
@@ -86,44 +87,51 @@ public:
MOZ_MUST_USE nsresult NewSubPrefix(uint32_t aAddChunk,
const Prefix& aPrefix,
uint32_t aSubChunk);
MOZ_MUST_USE nsresult NewAddComplete(uint32_t aChunk,
const Completion& aCompletion);
MOZ_MUST_USE nsresult NewSubComplete(uint32_t aAddChunk,
const Completion& aCompletion,
uint32_t aSubChunk);
+ MOZ_MUST_USE nsresult NewMissPrefix(const Prefix& aPrefix);
ChunkSet& AddChunks() { return mAddChunks; }
ChunkSet& SubChunks() { return mSubChunks; }
// Expirations for chunks.
ChunkSet& AddExpirations() { return mAddExpirations; }
ChunkSet& SubExpirations() { return mSubExpirations; }
// Hashes associated with this chunk.
AddPrefixArray& AddPrefixes() { return mAddPrefixes; }
SubPrefixArray& SubPrefixes() { return mSubPrefixes; }
AddCompleteArray& AddCompletes() { return mAddCompletes; }
SubCompleteArray& SubCompletes() { return mSubCompletes; }
+ // Entries that cannot be completed.
+ MissPrefixArray& MissPrefixes() { return mMissPrefixes; }
+
// For downcasting.
static const int TAG = 2;
private:
// The list of chunk numbers that we have for each of the type of chunks.
ChunkSet mAddChunks;
ChunkSet mSubChunks;
ChunkSet mAddExpirations;
ChunkSet mSubExpirations;
// 4-byte sha256 prefixes.
- AddPrefixArray mAddPrefixes;
- SubPrefixArray mSubPrefixes;
+ AddPrefixArray mAddPrefixes;
+ SubPrefixArray mSubPrefixes;
+
+ // This is only used by gethash so don't add this to Header.
+ MissPrefixArray mMissPrefixes;
// 32-byte hashes.
AddCompleteArray mAddCompletes;
SubCompleteArray mSubCompletes;
virtual int Tag() const override { return TAG; }
};
--- a/toolkit/components/url-classifier/LookupCache.cpp
+++ b/toolkit/components/url-classifier/LookupCache.cpp
@@ -27,16 +27,18 @@
// on startup.
// mGetHashCache contains 32-byte completions which were
// returned from the gethash server. They are not serialized,
// only cached until the next update.
// Name of the persistent PrefixSet storage
#define PREFIXSET_SUFFIX ".pset"
+#define V2_CACHE_DURATION_SEC (15 * 60)
+
// MOZ_LOG=UrlClassifierDbService:5
extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
#define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args)
#define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug)
namespace mozilla {
namespace safebrowsing {
@@ -111,16 +113,103 @@ LookupCache::WriteFile()
NS_ENSURE_SUCCESS(rv, rv);
rv = StoreToFile(psFile);
NS_WARNING_ASSERTION(NS_SUCCEEDED(rv), "failed to store the prefixset");
return NS_OK;
}
+nsresult
+LookupCache::CheckCache(const Completion& aCompletion,
+ bool* aHas,
+ bool* aConfirmed)
+{
+ // Shouldn't call this function if prefix is not in the database.
+ MOZ_ASSERT(*aHas);
+
+ *aConfirmed = false;
+
+ uint32_t prefix = aCompletion.ToUint32();
+
+ CachedFullHashResponse* fullHashResponse = mCache.Get(prefix);
+ if (!fullHashResponse) {
+ return NS_OK;
+ }
+
+ int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
+ int64_t expiryTimeSec;
+
+ FullHashExpiryCache& fullHashes = fullHashResponse->fullHashes;
+ nsDependentCSubstring completion(
+ reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
+
+ // Check if we can find the fullhash in positive cache
+ if (fullHashes.Get(completion, &expiryTimeSec)) {
+ if (nowSec <= expiryTimeSec) {
+ // Url is NOT safe.
+ *aConfirmed = true;
+ LOG(("Found a valid fullhash in the positive cache"));
+ } else {
+ // Trigger a gethash request in this case(aConfirmed is false).
+ LOG(("Found an expired fullhash in the positive cache"));
+
+ // Remove fullhash entry from the cache when the negative cache
+ // is also expired because whether or not the fullhash is cached
+ // locally, we will need to consult the server next time we
+ // lookup this hash. We may as well remove it from our cache.
+ if (fullHashResponse->negativeCacheExpirySec < expiryTimeSec) {
+ fullHashes.Remove(completion);
+ if (fullHashes.Count() == 0 &&
+ fullHashResponse->negativeCacheExpirySec < nowSec) {
+ mCache.Remove(prefix);
+ }
+ }
+ }
+ return NS_OK;
+ }
+
+ // Check negative cache.
+ if (fullHashResponse->negativeCacheExpirySec >= nowSec) {
+ // Url is safe.
+ LOG(("Found a valid prefix in the negative cache"));
+ *aHas = false;
+ } else {
+ LOG(("Found an expired prefix in the negative cache"));
+ if (fullHashes.Count() == 0) {
+ mCache.Remove(prefix);
+ }
+ }
+
+ return NS_OK;
+}
+
+// This function remove cache entries whose negative cache time is expired.
+// It is possible that a cache entry whose positive cache time is not yet
+// expired but still being removed after calling this API. Right now we call
+// this on every update.
+void
+LookupCache::InvalidateExpiredCacheEntries()
+{
+ int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
+
+ for (auto iter = mCache.Iter(); !iter.Done(); iter.Next()) {
+ CachedFullHashResponse* response = iter.Data();
+ if (response->negativeCacheExpirySec < nowSec) {
+ iter.Remove();
+ }
+ }
+}
+
+void
+LookupCache::ClearCache()
+{
+ mCache.Clear();
+}
+
void
LookupCache::ClearAll()
{
ClearCache();
ClearPrefixes();
mPrimed = false;
}
@@ -330,16 +419,68 @@ LookupCache::LoadPrefixSet()
uint32_t size = SizeOfPrefixSet();
LOG(("SB tree done, size = %d bytes\n", size));
}
#endif
return NS_OK;
}
+#if defined(DEBUG)
+static
+void CStringToHexString(const nsACString& aIn, nsACString& aOut)
+{
+ static const char* const lut = "0123456789ABCDEF";
+ // 32 bytes is the longest hash
+ size_t len = COMPLETE_SIZE;
+
+ aOut.SetCapacity(2 * len);
+ for (size_t i = 0; i < aIn.Length(); ++i) {
+ const char c = static_cast<const char>(aIn[i]);
+ aOut.Append(lut[(c >> 4) & 0x0F]);
+ aOut.Append(lut[c & 15]);
+ }
+}
+
+static
+nsCString GetFormattedTimeString(int64_t aCurTimeSec)
+{
+ PRExplodedTime pret;
+ PR_ExplodeTime(aCurTimeSec * PR_USEC_PER_SEC, PR_GMTParameters, &pret);
+
+ return nsPrintfCString(
+ "%04d-%02d-%02d %02d:%02d:%02d UTC",
+ pret.tm_year, pret.tm_month + 1, pret.tm_mday,
+ pret.tm_hour, pret.tm_min, pret.tm_sec);
+}
+
+void
+LookupCache::DumpCache()
+{
+ if (!LOG_ENABLED()) {
+ return;
+ }
+
+ for (auto iter = mCache.ConstIter(); !iter.Done(); iter.Next()) {
+ CachedFullHashResponse* response = iter.Data();
+ LOG(("Caches prefix: %X, Expire time: %s",
+ iter.Key(),
+ GetFormattedTimeString(response->negativeCacheExpirySec).get()));
+
+ FullHashExpiryCache& fullHashes = response->fullHashes;
+ for (auto iter2 = fullHashes.ConstIter(); !iter2.Done(); iter2.Next()) {
+ nsAutoCString strFullhash;
+ CStringToHexString(iter2.Key(), strFullhash);
+ LOG((" - %s, Expire time: %s", strFullhash.get(),
+ GetFormattedTimeString(iter2.Data()).get()));
+ }
+ }
+}
+#endif
+
nsresult
LookupCacheV2::Init()
{
mPrefixSet = new nsUrlClassifierPrefixSet();
nsresult rv = mPrefixSet->Init(mTableName);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
@@ -362,55 +503,48 @@ void
LookupCacheV2::ClearAll()
{
LookupCache::ClearAll();
mUpdateCompletions.Clear();
}
nsresult
LookupCacheV2::Has(const Completion& aCompletion,
- const TableFreshnessMap& aTableFreshness,
- uint32_t aFreshnessGuarantee,
- bool* aHas, uint32_t* aMatchLength,
- bool* aConfirmed, bool* aFromCache)
+ bool* aHas,
+ uint32_t* aMatchLength,
+ bool* aConfirmed)
{
- *aHas = *aConfirmed = *aFromCache = false;
+ *aHas = *aConfirmed = false;
*aMatchLength = 0;
uint32_t prefix = aCompletion.ToUint32();
bool found;
nsresult rv = mPrefixSet->Contains(prefix, &found);
NS_ENSURE_SUCCESS(rv, rv);
- LOG(("Probe in %s: %X, found %d", mTableName.get(), prefix, found));
-
if (found) {
*aHas = true;
*aMatchLength = PREFIX_SIZE;
- }
-
- if ((mGetHashCache.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex) ||
- (mUpdateCompletions.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex)) {
- LOG(("Complete in %s", mTableName.get()));
- *aFromCache = true;
+ } else if (mUpdateCompletions.BinaryIndexOf(aCompletion) !=
+ nsTArray<Completion>::NoIndex) {
+ // Completions is found in database, confirm the result
*aHas = true;
*aMatchLength = COMPLETE_SIZE;
-
- int64_t ageSec; // in seconds
- if (aTableFreshness.Get(mTableName, &ageSec)) {
- int64_t nowSec = (PR_Now() / PR_USEC_PER_SEC);
- MOZ_ASSERT(ageSec <= nowSec);
-
- // Considered completion as unsafe if its table is up-to-date.
- *aConfirmed = (nowSec - ageSec) < aFreshnessGuarantee;
- }
+ *aConfirmed = true;
}
- return NS_OK;
+ if (*aHas && !(*aConfirmed)) {
+ rv = CheckCache(aCompletion, aHas, aConfirmed);
+ }
+
+ LOG(("Probe in %s: %X, has %d, confirmed %d",
+ mTableName.get(), prefix, *aHas, *aConfirmed));
+
+ return rv;
}
bool
LookupCacheV2::IsEmpty()
{
bool isEmpty;
mPrefixSet->IsEmpty(&isEmpty);
return isEmpty;
@@ -447,27 +581,45 @@ LookupCacheV2::GetPrefixes(FallibleTArra
if (!mPrimed) {
// This can happen if its a new table, so no error.
LOG(("GetPrefixes from empty LookupCache"));
return NS_OK;
}
return mPrefixSet->GetPrefixesNative(aAddPrefixes);
}
-nsresult
-LookupCacheV2::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
+void
+LookupCacheV2::AddGethashResultToCache(AddCompleteArray& aAddCompletes,
+ MissPrefixArray& aMissPrefixes,
+ int64_t aExpirySec)
{
- for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
- if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
- mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
+ int64_t defaultExpirySec = PR_Now() / PR_USEC_PER_SEC + V2_CACHE_DURATION_SEC;
+ if (aExpirySec != 0) {
+ defaultExpirySec = aExpirySec;
+ }
+
+ for (const AddComplete& add : aAddCompletes) {
+ nsDependentCSubstring fullhash(
+ reinterpret_cast<const char*>(add.CompleteHash().buf), COMPLETE_SIZE);
+
+ CachedFullHashResponse* response = mCache.LookupOrAdd(add.ToUint32());
+ // Set negative cache expiry to the same value as positive cache
+ // expiry when the gethash request returns a complete match.
+ if (response->negativeCacheExpirySec == 0) {
+ response->negativeCacheExpirySec = defaultExpirySec;
}
+ FullHashExpiryCache& fullHashes = response->fullHashes;
+ fullHashes.Put(fullhash, defaultExpirySec);
}
- mGetHashCache.Sort();
- return NS_OK;
+ for (const Prefix& prefix : aMissPrefixes) {
+ CachedFullHashResponse* response = mCache.LookupOrAdd(prefix.ToUint32());
+
+ response->negativeCacheExpirySec = defaultExpirySec;
+ }
}
nsresult
LookupCacheV2::ReadCompletions()
{
HashStore store(mTableName, mProvider, mRootStoreDirectory);
nsresult rv = store.Open();
@@ -478,22 +630,16 @@ LookupCacheV2::ReadCompletions()
const AddCompleteArray& addComplete = store.AddCompletes();
for (uint32_t i = 0; i < addComplete.Length(); i++) {
mUpdateCompletions.AppendElement(addComplete[i].complete);
}
return NS_OK;
}
-void
-LookupCacheV2::ClearCache()
-{
- mGetHashCache.Clear();
-}
-
nsresult
LookupCacheV2::ClearPrefixes()
{
return mPrefixSet->SetPrefixes(nullptr, 0);
}
nsresult
LookupCacheV2::StoreToFile(nsIFile* aFile)
@@ -564,31 +710,16 @@ LookupCacheV2::ConstructPrefixSet(AddPre
#endif
mPrimed = true;
return NS_OK;
}
#if defined(DEBUG)
-
-void
-LookupCacheV2::DumpCache()
-{
- if (!LOG_ENABLED()) {
- return;
- }
-
- for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
- nsAutoCString str;
- mGetHashCache[i].ToHexString(str);
- LOG(("Caches: %s", str.get()));
- }
-}
-
void
LookupCacheV2::DumpCompletions()
{
if (!LOG_ENABLED())
return;
for (uint32_t i = 0; i < mUpdateCompletions.Length(); i++) {
nsAutoCString str;
--- a/toolkit/components/url-classifier/LookupCache.h
+++ b/toolkit/components/url-classifier/LookupCache.h
@@ -104,24 +104,35 @@ public:
Prefix prefix;
};
class CacheResultV2 final : public CacheResult
{
public:
static const int VER;
+ // True when 'prefix' in CacheResult indicates a prefix that
+ // cannot be completed.
+ bool miss = false;
+
+ // 'completion' and 'addChunk' are used when 'miss' field is false.
Completion completion;
uint32_t addChunk;
bool operator==(const CacheResultV2& aOther) const {
- return table == aOther.table &&
- prefix == aOther.prefix &&
- completion == aOther.completion &&
- addChunk == aOther.addChunk;
+ if (table != aOther.table ||
+ prefix != aOther.prefix ||
+ miss != aOther.miss) {
+ return false;
+ }
+
+ if (miss) {
+ return true;
+ }
+ return completion == aOther.completion && addChunk == aOther.addChunk;
}
bool findCompletion(const Completion& aCompletion) const override {
return completion == aCompletion;
}
virtual int Ver() const override { return VER; }
};
@@ -179,93 +190,107 @@ public:
// be moved away when a backup is made.
nsresult UpdateRootDirHandle(nsIFile* aRootStoreDirectory);
// Write data stored in lookup cache to disk.
nsresult WriteFile();
bool IsPrimed() const { return mPrimed; };
+ // Called when update to clear expired entries.
+ void InvalidateExpiredCacheEntries();
+
+ // Clear completions retrieved from gethash request.
+ void ClearCache();
+
+ // Check if completions can be found in cache.
+ // Currently this is only used by testcase.
+ bool IsInCache(uint32_t key) { return mCache.Get(key); };
+
+#if DEBUG
+ void DumpCache();
+#endif
+
virtual nsresult Open();
virtual nsresult Init() = 0;
virtual nsresult ClearPrefixes() = 0;
virtual nsresult Has(const Completion& aCompletion,
- const TableFreshnessMap& aTableFreshness,
- uint32_t aFreshnessGuarantee,
- bool* aHas, uint32_t* aMatchLength,
- bool* aConfirmed, bool* aFromCache) = 0;
-
- // Clear completions retrieved from gethash request.
- virtual void ClearCache() = 0;
+ bool* aHas,
+ uint32_t* aMatchLength,
+ bool* aConfirmed) = 0;
virtual bool IsEmpty() = 0;
virtual void ClearAll();
-#if DEBUG
- virtual void DumpCache() = 0;
-#endif
-
template<typename T>
static T* Cast(LookupCache* aThat) {
return ((aThat && T::VER == aThat->Ver()) ? reinterpret_cast<T*>(aThat) : nullptr);
}
private:
nsresult LoadPrefixSet();
virtual nsresult StoreToFile(nsIFile* aFile) = 0;
virtual nsresult LoadFromFile(nsIFile* aFile) = 0;
virtual size_t SizeOfPrefixSet() = 0;
virtual int Ver() const = 0;
protected:
+ // Check completions in positive cache and prefix in negative cache.
+ // 'aHas' and 'aConfirmed' are output parameters.
+ nsresult CheckCache(const Completion& aCompletion,
+ bool* aHas,
+ bool* aConfirmed);
+
bool mPrimed;
nsCString mTableName;
nsCString mProvider;
nsCOMPtr<nsIFile> mRootStoreDirectory;
nsCOMPtr<nsIFile> mStoreDirectory;
// For gtest to inspect private members.
friend class PerProviderDirectoryTestUtils;
+
+ // Cache gethash result.
+ FullHashResponseMap mCache;
};
class LookupCacheV2 final : public LookupCache
{
public:
explicit LookupCacheV2(const nsACString& aTableName,
const nsACString& aProvider,
nsIFile* aStoreFile)
: LookupCache(aTableName, aProvider, aStoreFile) {}
~LookupCacheV2() {}
virtual nsresult Init() override;
virtual nsresult Open() override;
- virtual void ClearCache() override;
virtual void ClearAll() override;
virtual nsresult Has(const Completion& aCompletion,
- const TableFreshnessMap& aTableFreshness,
- uint32_t aFreshnessGuarantee,
- bool* aHas, uint32_t* aMatchLength,
- bool* aConfirmed, bool* aFromCache) override;
+ bool* aHas,
+ uint32_t* aMatchLength,
+ bool* aConfirmed) override;
virtual bool IsEmpty() override;
nsresult Build(AddPrefixArray& aAddPrefixes,
AddCompleteArray& aAddCompletes);
nsresult GetPrefixes(FallibleTArray<uint32_t>& aAddPrefixes);
// This will Clear() the passed arrays when done.
- nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
+ // 'aExpirySec' is used by testcase to config an expired time.
+ void AddGethashResultToCache(AddCompleteArray& aAddCompletes,
+ MissPrefixArray& aMissPrefixes,
+ int64_t aExpirySec = 0);
#if DEBUG
- virtual void DumpCache() override;
-
void DumpCompletions();
#endif
static const int VER;
protected:
nsresult ReadCompletions();
@@ -281,17 +306,14 @@ private:
// This will Clear() aAddPrefixes when done.
nsresult ConstructPrefixSet(AddPrefixArray& aAddPrefixes);
// Full length hashes obtained in update request
CompletionArray mUpdateCompletions;
// Set of prefixes known to be in the database
RefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
-
- // Full length hashes obtained in gethash request
- CompletionArray mGetHashCache;
};
} // namespace safebrowsing
} // namespace mozilla
#endif
--- a/toolkit/components/url-classifier/LookupCacheV4.cpp
+++ b/toolkit/components/url-classifier/LookupCacheV4.cpp
@@ -75,22 +75,21 @@ LookupCacheV4::Init()
nsresult rv = mVLPrefixSet->Init(mTableName);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
LookupCacheV4::Has(const Completion& aCompletion,
- const TableFreshnessMap& aTableFreshness,
- uint32_t aFreshnessGuarantee,
- bool* aHas, uint32_t* aMatchLength,
- bool* aConfirmed, bool* aFromCache)
+ bool* aHas,
+ uint32_t* aMatchLength,
+ bool* aConfirmed)
{
- *aHas = *aConfirmed = *aFromCache = false;
+ *aHas = *aConfirmed = false;
*aMatchLength = 0;
uint32_t length = 0;
nsDependentCSubstring fullhash;
fullhash.Rebind((const char *)aCompletion.buf, COMPLETE_SIZE);
nsresult rv = mVLPrefixSet->Matches(fullhash, &length);
NS_ENSURE_SUCCESS(rv, rv);
@@ -106,73 +105,19 @@ LookupCacheV4::Has(const Completion& aCo
prefix, *aHas, length == COMPLETE_SIZE));
}
// Check if fullhash match any prefix in the local database
if (!(*aHas)) {
return NS_OK;
}
- // We always send 4-bytes for completion(Bug 1323953) so the prefix used to
- // lookup for cache should be 4-bytes(uint32_t) too.
- uint32_t prefix = aCompletion.ToUint32();
-
- // Check if prefix can be found in cache.
- CachedFullHashResponse* fullHashResponse = mCache.Get(prefix);
- if (!fullHashResponse) {
- return NS_OK;
- }
-
- *aFromCache = true;
-
- int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
- int64_t expiryTime;
-
- FullHashExpiryCache& fullHashes = fullHashResponse->fullHashes;
- nsDependentCSubstring completion(
- reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
-
- // Check if we can find the fullhash in positive cache
- if (fullHashes.Get(completion, &expiryTime)) {
- if (nowSec <= expiryTime) {
- // Url is NOT safe.
- *aConfirmed = true;
- LOG(("Found a valid fullhash in the positive cache"));
- } else {
- // Trigger a gethash request in this case(aConfirmed is false).
- LOG(("Found an expired fullhash in the positive cache"));
-
- // Remove fullhash entry from the cache when the negative cache
- // is also expired because whether or not the fullhash is cached
- // locally, we will need to consult the server next time we
- // lookup this hash. We may as well remove it from our cache.
- if (fullHashResponse->negativeCacheExpirySec < expiryTime) {
- fullHashes.Remove(completion);
- if (fullHashes.Count() == 0 &&
- fullHashResponse->negativeCacheExpirySec < nowSec) {
- mCache.Remove(prefix);
- }
- }
- }
- return NS_OK;
- }
-
- // Check negative cache.
- if (fullHashResponse->negativeCacheExpirySec >= nowSec) {
- // Url is safe.
- LOG(("Found a valid prefix in the negative cache"));
- *aHas = false;
- } else {
- LOG(("Found an expired prefix in the negative cache"));
- if (fullHashes.Count() == 0) {
- mCache.Remove(prefix);
- }
- }
-
- return NS_OK;
+ // Even though V4 supports variable-length prefix, we always send 4-bytes for
+ // completion (Bug 1323953). This means cached prefix length is also 4-bytes.
+ return CheckCache(aCompletion, aHas, aConfirmed);
}
bool
LookupCacheV4::IsEmpty()
{
bool isEmpty;
mVLPrefixSet->IsEmpty(&isEmpty);
return isEmpty;
@@ -598,91 +543,16 @@ LookupCacheV4::LoadMetadata(nsACString&
if (NS_FAILED(rv)) {
LOG(("Failed to read checksum."));
return rv;
}
return rv;
}
-void
-LookupCacheV4::ClearCache()
-{
- mCache.Clear();
-}
-
-// This function remove cache entries whose negative cache time is expired.
-// It is possible that a cache entry whose positive cache time is not yet
-// expired but still being removed after calling this API. Right now we call
-// this on every update.
-void
-LookupCacheV4::InvalidateExpiredCacheEntry()
-{
- int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
-
- for (auto iter = mCache.Iter(); !iter.Done(); iter.Next()) {
- CachedFullHashResponse* response = iter.Data();
- if (response->negativeCacheExpirySec < nowSec) {
- iter.Remove();
- }
- }
-}
-
-#if defined(DEBUG)
-static
-void CStringToHexString(const nsACString& aIn, nsACString& aOut)
-{
- static const char* const lut = "0123456789ABCDEF";
- // 32 bytes is the longest hash
- size_t len = COMPLETE_SIZE;
-
- aOut.SetCapacity(2 * len);
- for (size_t i = 0; i < aIn.Length(); ++i) {
- const char c = static_cast<const char>(aIn[i]);
- aOut.Append(lut[(c >> 4) & 0x0F]);
- aOut.Append(lut[c & 15]);
- }
-}
-
-static
-nsCString GetFormattedTimeString(int64_t aCurTimeSec)
-{
- PRExplodedTime pret;
- PR_ExplodeTime(aCurTimeSec * PR_USEC_PER_SEC, PR_GMTParameters, &pret);
-
- return nsPrintfCString(
- "%04d-%02d-%02d %02d:%02d:%02d UTC",
- pret.tm_year, pret.tm_month + 1, pret.tm_mday,
- pret.tm_hour, pret.tm_min, pret.tm_sec);
-}
-
-void
-LookupCacheV4::DumpCache()
-{
- if (!LOG_ENABLED()) {
- return;
- }
-
- for (auto iter = mCache.ConstIter(); !iter.Done(); iter.Next()) {
- CachedFullHashResponse* response = iter.Data();
- LOG(("Caches prefix: %X, Expire time: %s",
- iter.Key(),
- GetFormattedTimeString(response->negativeCacheExpirySec).get()));
-
- FullHashExpiryCache& fullHashes = response->fullHashes;
- for (auto iter2 = fullHashes.ConstIter(); !iter2.Done(); iter2.Next()) {
- nsAutoCString strFullhash;
- CStringToHexString(iter2.Key(), strFullhash);
- LOG((" - %s, Expire time: %s", strFullhash.get(),
- GetFormattedTimeString(iter2.Data()).get()));
- }
- }
-}
-#endif
-
VLPrefixSet::VLPrefixSet(const PrefixStringMap& aMap)
: mCount(0)
{
for (auto iter = aMap.ConstIter(); !iter.Done(); iter.Next()) {
uint32_t size = iter.Key();
mMap.Put(size, new PrefixString(*iter.Data(), size));
mCount += iter.Data()->Length() / size;
}
--- a/toolkit/components/url-classifier/LookupCacheV4.h
+++ b/toolkit/components/url-classifier/LookupCacheV4.h
@@ -20,26 +20,19 @@ public:
explicit LookupCacheV4(const nsACString& aTableName,
const nsACString& aProvider,
nsIFile* aStoreFile)
: LookupCache(aTableName, aProvider, aStoreFile) {}
~LookupCacheV4() {}
virtual nsresult Init() override;
virtual nsresult Has(const Completion& aCompletion,
- const TableFreshnessMap& aTableFreshness,
- uint32_t aFreshnessGuarantee,
- bool* aHas, uint32_t* aMatchLength,
- bool* aConfirmed, bool* aFromCache) override;
-
- virtual void ClearCache() override;
-
-#if DEBUG
- virtual void DumpCache() override;
-#endif
+ bool* aHas,
+ uint32_t* aMatchLength,
+ bool* aConfirmed) override;
virtual bool IsEmpty() override;
nsresult Build(PrefixStringMap& aPrefixMap);
nsresult GetPrefixes(PrefixStringMap& aPrefixMap);
nsresult GetFixedLengthPrefixes(FallibleTArray<uint32_t>& aPrefixes);
@@ -48,33 +41,29 @@ public:
PrefixStringMap& aInputMap,
PrefixStringMap& aOutputMap);
nsresult AddFullHashResponseToCache(const FullHashResponseMap& aResponseMap);
nsresult WriteMetadata(TableUpdateV4* aTableUpdate);
nsresult LoadMetadata(nsACString& aState, nsACString& aChecksum);
- void InvalidateExpiredCacheEntry();
-
static const int VER;
protected:
virtual nsresult ClearPrefixes() override;
virtual nsresult StoreToFile(nsIFile* aFile) override;
virtual nsresult LoadFromFile(nsIFile* aFile) override;
virtual size_t SizeOfPrefixSet() override;
private:
virtual int Ver() const override { return VER; }
nsresult InitCrypto(nsCOMPtr<nsICryptoHash>& aCrypto);
nsresult VerifyChecksum(const nsACString& aChecksum);
RefPtr<VariableLengthPrefixSet> mVLPrefixSet;
-
- FullHashResponseMap mCache;
};
} // namespace safebrowsing
} // namespace mozilla
#endif
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
@@ -192,17 +192,17 @@ nsUrlClassifierDBServiceWorker::DoLocalL
}
// Bail if we haven't been initialized on the background thread.
if (!mClassifier) {
return NS_ERROR_NOT_AVAILABLE;
}
// We ignore failures from Check because we'd rather return the
// results that were found than fail.
- mClassifier->Check(spec, tables, gFreshnessGuarantee, *results);
+ mClassifier->Check(spec, tables, *results);
LOG(("Found %" PRIuSIZE " results.", results->Length()));
return NS_OK;
}
static nsresult
ProcessLookupResults(LookupResultArray* results, nsTArray<nsCString>& tables)
{
@@ -260,44 +260,34 @@ nsUrlClassifierDBServiceWorker::DoLookup
if (LOG_ENABLED()) {
PRIntervalTime clockEnd = PR_IntervalNow();
LOG(("query took %dms\n",
PR_IntervalToMilliseconds(clockEnd - clockStart)));
}
- nsAutoPtr<LookupResultArray> completes(new LookupResultArray());
-
for (uint32_t i = 0; i < results->Length(); i++) {
const LookupResult& lookupResult = results->ElementAt(i);
- // mMissCache should only be used in V2.
- if (!lookupResult.mProtocolV2 ||
- !mMissCache.Contains(lookupResult.hash.fixedLengthPrefix)) {
- completes->AppendElement(lookupResult);
- }
- }
-
- for (uint32_t i = 0; i < completes->Length(); i++) {
- if (!completes->ElementAt(i).Confirmed() &&
- mDBService->CanComplete(completes->ElementAt(i).mTableName)) {
+ if (!lookupResult.Confirmed() &&
+ mDBService->CanComplete(lookupResult.mTableName)) {
// We're going to be doing a gethash request, add some extra entries.
// Note that we cannot pass the first two by reference, because we
// add to completes, whicah can cause completes to reallocate and move.
- AddNoise(completes->ElementAt(i).hash.fixedLengthPrefix,
- completes->ElementAt(i).mTableName,
- mGethashNoise, *completes);
+ AddNoise(lookupResult.hash.fixedLengthPrefix,
+ lookupResult.mTableName,
+ mGethashNoise, *results);
break;
}
}
// At this point ownership of 'results' is handed to the callback.
- c->LookupComplete(completes.forget());
+ c->LookupComplete(results.forget());
return NS_OK;
}
nsresult
nsUrlClassifierDBServiceWorker::HandlePendingLookups()
{
if (gShuttingDownThread) {
@@ -678,18 +668,16 @@ nsUrlClassifierDBServiceWorker::NotifyUp
}
// Do not record telemetry for testing tables.
if (!provider.Equals(TESTING_TABLE_PROVIDER_NAME)) {
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_UPDATE_ERROR, provider,
NS_ERROR_GET_CODE(updateStatus));
}
- mMissCache.Clear();
-
// Null out mUpdateObserver before notifying so that BeginUpdate()
// becomes available prior to callback.
nsCOMPtr<nsIUrlClassifierUpdateObserver> updateObserver = nullptr;
updateObserver.swap(mUpdateObserver);
if (NS_SUCCEEDED(mUpdateStatus)) {
LOG(("Notifying success: %d", mUpdateWaitSec));
updateObserver->UpdateSuccess(mUpdateWaitSec);
@@ -859,17 +847,17 @@ nsUrlClassifierDBServiceWorker::CacheCom
for (uint32_t table = 0; table < tables.Length(); table++) {
if (tables[table].Equals(result->table)) {
activeTable = true;
break;
}
}
if (activeTable) {
nsAutoPtr<ProtocolParser> pParse;
- pParse = resultsPtr->ElementAt(i)->Ver() == CacheResult::V2 ?
+ pParse = result->Ver() == CacheResult::V2 ?
static_cast<ProtocolParser*>(new ProtocolParserV2()) :
static_cast<ProtocolParser*>(new ProtocolParserProtobuf());
TableUpdate* tu = pParse->GetTableUpdate(result->table);
rv = CacheResultToTableUpdate(result, tu);
if (NS_FAILED(rv)) {
// We can bail without leaking here because ForgetTableUpdates
@@ -892,25 +880,28 @@ nsresult
nsUrlClassifierDBServiceWorker::CacheResultToTableUpdate(CacheResult* aCacheResult,
TableUpdate* aUpdate)
{
auto tuV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
if (tuV2) {
auto result = CacheResult::Cast<CacheResultV2>(aCacheResult);
MOZ_ASSERT(result);
- LOG(("CacheCompletion hash %X, Addchunk %d", result->completion.ToUint32(),
- result->addChunk));
+ if (result->miss) {
+ return tuV2->NewMissPrefix(result->prefix);
+ } else {
+ LOG(("CacheCompletion hash %X, Addchunk %d", result->completion.ToUint32(),
+ result->addChunk));
- nsresult rv = tuV2->NewAddComplete(result->addChunk, result->completion);
- if (NS_FAILED(rv)) {
- return rv;
+ nsresult rv = tuV2->NewAddComplete(result->addChunk, result->completion);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ return tuV2->NewAddChunk(result->addChunk);
}
- rv = tuV2->NewAddChunk(result->addChunk);
- return rv;
}
auto tuV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
if (tuV4) {
auto result = CacheResult::Cast<CacheResultV4>(aCacheResult);
MOZ_ASSERT(result);
if (LOG_ENABLED()) {
@@ -927,31 +918,16 @@ nsUrlClassifierDBServiceWorker::CacheRes
return NS_OK;
}
// tableUpdate object should be either V2 or V4.
return NS_ERROR_FAILURE;
}
nsresult
-nsUrlClassifierDBServiceWorker::CacheMisses(PrefixArray *results)
-{
- LOG(("nsUrlClassifierDBServiceWorker::CacheMisses [%p] %" PRIuSIZE,
- this, results->Length()));
-
- // Ownership is transferred in to us
- nsAutoPtr<PrefixArray> resultsPtr(results);
-
- for (uint32_t i = 0; i < resultsPtr->Length(); i++) {
- mMissCache.AppendElement(resultsPtr->ElementAt(i));
- }
- return NS_OK;
-}
-
-nsresult
nsUrlClassifierDBServiceWorker::OpenDb()
{
if (gShuttingDownThread) {
return NS_ERROR_ABORT;
}
MOZ_ASSERT(!NS_IsMainThread(), "Must initialize DB on background thread");
// Connection already open, don't do anything.
@@ -1049,16 +1025,17 @@ public:
, mCallback(c)
{}
private:
~nsUrlClassifierLookupCallback();
nsresult HandleResults();
nsresult ProcessComplete(CacheResult* aCacheResult);
+ nsresult CacheMisses();
RefPtr<nsUrlClassifierDBService> mDBService;
nsAutoPtr<LookupResultArray> mResults;
// Completed results to send back to the worker for caching.
nsAutoPtr<CacheResultArray> mCacheResults;
uint32_t mPendingCompletions;
@@ -1390,32 +1367,19 @@ nsUrlClassifierLookupCallback::HandleRes
}
}
if (matchResult != MatchResult::eTelemetryDisabled) {
Telemetry::Accumulate(Telemetry::URLCLASSIFIER_MATCH_RESULT,
MatchResultToUint(matchResult));
}
- // TODO: Bug 1333328, Refactor cache miss mechanism for v2.
// Some parts of this gethash request generated no hits at all.
- // Prefixes must have been removed from the database since our last update.
- // Save the prefixes we checked to prevent repeated requests
- // until the next update.
- nsAutoPtr<PrefixArray> cacheMisses(new PrefixArray());
- if (cacheMisses) {
- for (uint32_t i = 0; i < mResults->Length(); i++) {
- LookupResult &result = mResults->ElementAt(i);
- if (result.mProtocolV2 && !result.Confirmed() && !result.mNoise) {
- cacheMisses->AppendElement(result.hash.fixedLengthPrefix);
- }
- }
- // Hands ownership of the miss array back to the worker thread.
- mDBService->CacheMisses(cacheMisses.forget());
- }
+ // Save the prefixes we checked to prevent repeated requests.
+ CacheMisses();
if (mCacheResults) {
// This hands ownership of the cache results array back to the worker
// thread.
mDBService->CacheCompletions(mCacheResults.forget());
}
nsAutoCString tableStr;
@@ -1423,16 +1387,44 @@ nsUrlClassifierLookupCallback::HandleRes
if (i != 0)
tableStr.Append(',');
tableStr.Append(tables[i]);
}
return mCallback->HandleEvent(tableStr);
}
+nsresult
+nsUrlClassifierLookupCallback::CacheMisses()
+{
+ for (uint32_t i = 0; i < mResults->Length(); i++) {
+ const LookupResult &result = mResults->ElementAt(i);
+ // Skip V4 because cache information is already included in the
+ // fullhash response so we don't need to manually add it here.
+ if (!result.mProtocolV2 || result.Confirmed() || result.mNoise) {
+ continue;
+ }
+
+ if (!mCacheResults) {
+ mCacheResults = new CacheResultArray();
+ if (!mCacheResults) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ auto cacheResult = new CacheResultV2;
+
+ cacheResult->table = result.mTableName;
+ cacheResult->prefix = result.hash.fixedLengthPrefix;
+ cacheResult->miss = true;
+ mCacheResults->AppendElement(cacheResult);
+ }
+ return NS_OK;
+}
+
struct Provider {
nsCString name;
uint8_t priority;
};
// Order matters
// Provider which is not included in this table has the lowest priority 0
static const Provider kBuiltInProviders[] = {
@@ -2252,24 +2244,16 @@ nsUrlClassifierDBService::ReloadDatabase
nsresult
nsUrlClassifierDBService::CacheCompletions(CacheResultArray *results)
{
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
return mWorkerProxy->CacheCompletions(results);
}
-nsresult
-nsUrlClassifierDBService::CacheMisses(PrefixArray *results)
-{
- NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
-
- return mWorkerProxy->CacheMisses(results);
-}
-
bool
nsUrlClassifierDBService::CanComplete(const nsACString &aTableName)
{
return mGethashTables.Contains(aTableName) &&
!mDisallowCompletionsTables.Contains(aTableName);
}
bool
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.h
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.h
@@ -99,17 +99,16 @@ public:
NS_DECL_NSIURLCLASSIFIERDBSERVICE
NS_DECL_NSIURICLASSIFIER
NS_DECL_NSIOBSERVER
bool CanComplete(const nsACString &tableName);
bool GetCompleter(const nsACString& tableName,
nsIUrlClassifierHashCompleter** completer);
nsresult CacheCompletions(mozilla::safebrowsing::CacheResultArray *results);
- nsresult CacheMisses(mozilla::safebrowsing::PrefixArray *results);
static nsIThread* BackgroundThread();
static bool ShutdownHasStarted();
private:
const nsTArray<nsCString> kObservedPrefs = {
@@ -213,17 +212,16 @@ public:
// Open the DB connection
nsresult GCC_MANGLING_WORKAROUND OpenDb();
// Provide a way to forcibly close the db connection.
nsresult GCC_MANGLING_WORKAROUND CloseDb();
nsresult CacheCompletions(CacheResultArray * aEntries);
- nsresult CacheMisses(PrefixArray * aEntries);
// Used to probe the state of the worker thread. When the update begins,
// mUpdateObserver will be set. When the update finished, mUpdateObserver
// will be nulled out in NotifyUpdateObserver.
bool IsBusyUpdating() const { return !!mUpdateObserver; }
// Delegate Classifier to disable async update. If there is an
// ongoing update on the update thread, we will be blocked until
@@ -274,20 +272,16 @@ private:
RefPtr<nsUrlClassifierDBService> mDBService;
// XXX: maybe an array of autoptrs. Or maybe a class specifically
// storing a series of updates.
nsTArray<mozilla::safebrowsing::TableUpdate*> mTableUpdates;
uint32_t mUpdateWaitSec;
- // Entries that cannot be completed. We expect them to die at
- // the next update
- PrefixArray mMissCache;
-
// Stores the last results that triggered a table update.
nsAutoPtr<CacheResultArray> mLastResults;
nsresult mUpdateStatus;
nsTArray<nsCString> mUpdateTables;
nsCOMPtr<nsIUrlClassifierUpdateObserver> mUpdateObserver;
bool mInStream;
--- a/toolkit/components/url-classifier/nsUrlClassifierProxies.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierProxies.cpp
@@ -206,30 +206,16 @@ UrlClassifierDBServiceWorkerProxy::Cache
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::CacheCompletionsRunnable::Run()
{
mTarget->CacheCompletions(mEntries);
return NS_OK;
}
-nsresult
-UrlClassifierDBServiceWorkerProxy::CacheMisses(PrefixArray * aEntries)
-{
- nsCOMPtr<nsIRunnable> r = new CacheMissesRunnable(mTarget, aEntries);
- return DispatchToWorkerThread(r);
-}
-
-NS_IMETHODIMP
-UrlClassifierDBServiceWorkerProxy::CacheMissesRunnable::Run()
-{
- mTarget->CacheMisses(mEntries);
- return NS_OK;
-}
-
NS_IMETHODIMP
UrlClassifierDBServiceWorkerProxy::SetLastUpdateTime(const nsACString& table,
uint64_t lastUpdateTime)
{
nsCOMPtr<nsIRunnable> r =
new SetLastUpdateTimeRunnable(mTarget, table, lastUpdateTime);
return DispatchToWorkerThread(r);
}
--- a/toolkit/components/url-classifier/nsUrlClassifierProxies.h
+++ b/toolkit/components/url-classifier/nsUrlClassifierProxies.h
@@ -128,32 +128,16 @@ public:
NS_DECL_NSIRUNNABLE
private:
RefPtr<nsUrlClassifierDBServiceWorker> mTarget;
mozilla::safebrowsing::CacheResultArray *mEntries;
};
- class CacheMissesRunnable : public mozilla::Runnable
- {
- public:
- CacheMissesRunnable(nsUrlClassifierDBServiceWorker* aTarget,
- mozilla::safebrowsing::PrefixArray *aEntries)
- : mTarget(aTarget)
- , mEntries(aEntries)
- { }
-
- NS_DECL_NSIRUNNABLE
-
- private:
- RefPtr<nsUrlClassifierDBServiceWorker> mTarget;
- mozilla::safebrowsing::PrefixArray *mEntries;
- };
-
class DoLocalLookupRunnable : public mozilla::Runnable
{
public:
DoLocalLookupRunnable(nsUrlClassifierDBServiceWorker* aTarget,
const nsACString& spec,
const nsACString& tables,
mozilla::safebrowsing::LookupResultArray* results)
: mTarget(aTarget)
@@ -205,17 +189,16 @@ public:
nsresult DoLocalLookup(const nsACString& spec,
const nsACString& tables,
mozilla::safebrowsing::LookupResultArray* results);
nsresult OpenDb();
nsresult CloseDb();
nsresult CacheCompletions(mozilla::safebrowsing::CacheResultArray * aEntries);
- nsresult CacheMisses(mozilla::safebrowsing::PrefixArray * aEntries);
private:
~UrlClassifierDBServiceWorkerProxy() {}
RefPtr<nsUrlClassifierDBServiceWorker> mTarget;
};
// The remaining classes here are all proxies to the main thread
--- a/toolkit/components/url-classifier/tests/gtest/Common.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/Common.cpp
@@ -132,17 +132,16 @@ PrefixArrayToAddPrefixArrayV2(const nsTA
{
out.Clear();
for (size_t i = 0; i < prefixArray.Length(); i++) {
// Create prefix hash from string
Prefix hash;
static_assert(sizeof(hash.buf) == PREFIX_SIZE, "Prefix must be 4 bytes length");
memcpy(hash.buf, prefixArray[i].BeginReading(), PREFIX_SIZE);
- MOZ_ASSERT(prefixArray[i].Length() == PREFIX_SIZE);
AddPrefix *add = out.AppendElement(fallible);
if (!add) {
return NS_ERROR_OUT_OF_MEMORY;
}
add->addChunk = i;
add->prefix = hash;
@@ -158,27 +157,48 @@ GeneratePrefix(const nsCString& aFragmen
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
complete.FromPlaintext(aFragment, cryptoHash);
nsCString hash;
hash.Assign((const char *)complete.buf, aLength);
return hash;
}
-UniquePtr<LookupCacheV4>
-SetupLookupCacheV4(const _PrefixArray& prefixArray)
+static nsresult
+BuildCache(LookupCacheV2* cache, const _PrefixArray& prefixArray)
+{
+ AddPrefixArray prefixes;
+ AddCompleteArray completions;
+ nsresult rv = PrefixArrayToAddPrefixArrayV2(prefixArray, prefixes);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ EntrySort(prefixes);
+ return cache->Build(prefixes, completions);
+}
+
+static nsresult
+BuildCache(LookupCacheV4* cache, const _PrefixArray& prefixArray)
+{
+ PrefixStringMap map;
+ PrefixArrayToPrefixStringMap(prefixArray, map);
+ return cache->Build(map);
+}
+
+template<typename T>
+UniquePtr<T>
+SetupLookupCache(const _PrefixArray& prefixArray)
{
nsCOMPtr<nsIFile> file;
NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
file->AppendNative(GTEST_SAFEBROWSING_DIR);
- UniquePtr<LookupCacheV4> cache = MakeUnique<LookupCacheV4>(GTEST_TABLE, EmptyCString(), file);
+ UniquePtr<T> cache = MakeUnique<T>(GTEST_TABLE, EmptyCString(), file);
nsresult rv = cache->Init();
EXPECT_EQ(rv, NS_OK);
- PrefixStringMap map;
- PrefixArrayToPrefixStringMap(prefixArray, map);
- rv = cache->Build(map);
+ rv = BuildCache(cache.get(), prefixArray);
EXPECT_EQ(rv, NS_OK);
return Move(cache);
}
--- a/toolkit/components/url-classifier/tests/gtest/Common.h
+++ b/toolkit/components/url-classifier/tests/gtest/Common.h
@@ -40,9 +40,10 @@ void PrefixArrayToPrefixStringMap(const
nsresult PrefixArrayToAddPrefixArrayV2(const nsTArray<nsCString>& prefixArray,
AddPrefixArray& out);
// Generate a hash prefix from string
nsCString GeneratePrefix(const nsCString& aFragment, uint8_t aLength);
// Create a LookupCacheV4 object with sepecified prefix array.
-UniquePtr<LookupCacheV4> SetupLookupCacheV4(const _PrefixArray& prefixArray);
+template<typename T>
+UniquePtr<T> SetupLookupCache(const _PrefixArray& prefixArray);
new file mode 100644
--- /dev/null
+++ b/toolkit/components/url-classifier/tests/gtest/TestCaching.cpp
@@ -0,0 +1,285 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Common.h"
+
+#define EXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC - 3600)
+#define NOTEXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC + 3600)
+
+static void
+SetupCacheEntry(LookupCacheV2* aLookupCache,
+ const nsCString& aCompletion,
+ bool aNegExpired = false,
+ bool aPosExpired = false)
+{
+ AddCompleteArray completes;
+ AddCompleteArray emptyCompletes;
+ MissPrefixArray misses;
+ MissPrefixArray emptyMisses;
+
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+
+ AddComplete* add = completes.AppendElement(fallible);
+ add->complete.FromPlaintext(aCompletion, cryptoHash);
+
+ Prefix* prefix = misses.AppendElement(fallible);
+ prefix->FromPlaintext(aCompletion, cryptoHash);
+
+ int64_t negExpirySec = aNegExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
+ aLookupCache->AddGethashResultToCache(emptyCompletes, misses, negExpirySec);
+
+ int64_t posExpirySec = aPosExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
+ aLookupCache->AddGethashResultToCache(completes, emptyMisses, posExpirySec);
+}
+
+static void
+SetupCacheEntry(LookupCacheV4* aLookupCache,
+ const nsCString& aCompletion,
+ bool aNegExpired = false,
+ bool aPosExpired = false)
+{
+ FullHashResponseMap map;
+
+ Prefix prefix;
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+ prefix.FromPlaintext(aCompletion, cryptoHash);
+
+ CachedFullHashResponse* response = map.LookupOrAdd(prefix.ToUint32());
+
+ response->negativeCacheExpirySec = aNegExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
+ response->fullHashes.Put(GeneratePrefix(aCompletion, COMPLETE_SIZE),
+ aPosExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC);
+
+ aLookupCache->AddFullHashResponseToCache(map);
+}
+
+template<typename T>
+void
+TestCache(const Completion aCompletion,
+ bool aExpectedHas,
+ bool aExpectedConfirmed,
+ bool aExpectedInCache,
+ T* aCache = nullptr)
+{
+ bool has, inCache, confirmed;
+ uint32_t matchLength;
+
+ if (aCache) {
+ aCache->Has(aCompletion, &has, &matchLength, &confirmed);
+ inCache = aCache->IsInCache(aCompletion.ToUint32());
+ } else {
+ _PrefixArray array = { GeneratePrefix(_Fragment("cache.notexpired.com/"), 10),
+ GeneratePrefix(_Fragment("cache.expired.com/"), 8),
+ GeneratePrefix(_Fragment("gound.com/"), 5),
+ GeneratePrefix(_Fragment("small.com/"), 4)
+ };
+
+ UniquePtr<T> cache = SetupLookupCache<T>(array);
+
+ // Create an expired entry and a non-expired entry
+ SetupCacheEntry(cache.get(), _Fragment("cache.notexpired.com/"));
+ SetupCacheEntry(cache.get(), _Fragment("cache.expired.com/"), true, true);
+
+ cache->Has(aCompletion, &has, &matchLength, &confirmed);
+ inCache = cache->IsInCache(aCompletion.ToUint32());
+ }
+
+ EXPECT_EQ(has, aExpectedHas);
+ EXPECT_EQ(confirmed, aExpectedConfirmed);
+ EXPECT_EQ(inCache, aExpectedInCache);
+}
+
+template<typename T>
+void
+TestCache(const _Fragment& aFragment,
+ bool aExpectedHas,
+ bool aExpectedConfirmed,
+ bool aExpectedInCache,
+ T* aCache = nullptr)
+{
+ Completion lookupHash;
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+ lookupHash.FromPlaintext(aFragment, cryptoHash);
+
+ TestCache<T>(lookupHash, aExpectedHas, aExpectedConfirmed, aExpectedInCache, aCache);
+}
+
+// This testcase check the returned result of |Has| API if fullhash cannot match
+// any prefix in the local database.
+TEST(UrlClassifierCaching, NotFound)
+{
+ TestCache<LookupCacheV2>(_Fragment("nomatch.com/"), false, false, false);
+ TestCache<LookupCacheV4>(_Fragment("nomatch.com/"), false, false, false);
+}
+
+// This testcase check the returned result of |Has| API if fullhash find a match
+// in the local database but not in the cache.
+TEST(UrlClassifierCaching, NotInCache)
+{
+ TestCache<LookupCacheV2>(_Fragment("gound.com/"), true, false, false);
+ TestCache<LookupCacheV4>(_Fragment("gound.com/"), true, false, false);
+}
+
+// This testcase check the returned result of |Has| API if fullhash matches
+// a cache entry in positive cache.
+TEST(UrlClassifierCaching, InPositiveCacheNotExpired)
+{
+ TestCache<LookupCacheV2>(_Fragment("cache.notexpired.com/"), true, true, true);
+ TestCache<LookupCacheV4>(_Fragment("cache.notexpired.com/"), true, true, true);
+}
+
+// This testcase check the returned result of |Has| API if fullhash matches
+// a cache entry in positive cache but that it is expired.
+TEST(UrlClassifierCaching, InPositiveCacheExpired)
+{
+ TestCache<LookupCacheV2>(_Fragment("cache.expired.com/"), true, false, true);
+ TestCache<LookupCacheV4>(_Fragment("cache.expired.com/"), true, false, true);
+}
+
+// This testcase check the returned result of |Has| API if fullhash matches
+// a cache entry in negative cache.
+TEST(UrlClassifierCaching, InNegativeCacheNotExpired)
+{
+ // Create a fullhash whose prefix matches the prefix in negative cache
+ // but completion doesn't match any fullhash in positive cache.
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+
+ Completion prefix;
+ prefix.FromPlaintext(_Fragment("cache.notexpired.com/"), cryptoHash);
+
+ Completion fullhash;
+ fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
+
+ // Overwrite the 4-byte prefix of `fullhash` so that it conflicts with `prefix`.
+ // Since "cache.notexpired.com" is added to database in TestCache as a
+ // 10-byte prefix, we should copy more than 10 bytes to fullhash to ensure
+ // it can match the prefix in database.
+ memcpy(fullhash.buf, prefix.buf, 10);
+
+ TestCache<LookupCacheV2>(fullhash, false, false, true);
+ TestCache<LookupCacheV4>(fullhash, false, false, true);
+}
+
+// This testcase check the returned result of |Has| API if fullhash matches
+// a cache entry in negative cache but that entry is expired.
+TEST(UrlClassifierCaching, InNegativeCacheExpired)
+{
+ // Create a fullhash whose prefix is in the cache.
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+
+ Completion prefix;
+ prefix.FromPlaintext(_Fragment("cache.expired.com/"), cryptoHash);
+
+ Completion fullhash;
+ fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
+
+ memcpy(fullhash.buf, prefix.buf, 10);
+
+ TestCache<LookupCacheV2>(fullhash, true, false, true);
+ TestCache<LookupCacheV4>(fullhash, true, false, true);
+}
+
+#define CACHED_URL _Fragment("cache.com/")
+#define NEG_CACHE_EXPIRED_URL _Fragment("cache.negExpired.com/")
+#define POS_CACHE_EXPIRED_URL _Fragment("cache.posExpired.com/")
+#define BOTH_CACHE_EXPIRED_URL _Fragment("cache.negAndposExpired.com/")
+
+// This testcase create 4 cache entries.
+// 1. unexpired entry.
+// 2. an entry whose negative cache time is expired but whose positive cache
+// is not expired.
+// 3. an entry whose positive cache time is expired
+// 4. an entry whose negative cache time and positive cache time are expired
+// After calling |InvalidateExpiredCacheEntry| API, entries with expired
+// negative time should be removed from cache(2 & 4)
+template<typename T>
+void TestInvalidateExpiredCacheEntry()
+{
+ _PrefixArray array = { GeneratePrefix(CACHED_URL, 10),
+ GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8),
+ GeneratePrefix(POS_CACHE_EXPIRED_URL, 5),
+ GeneratePrefix(BOTH_CACHE_EXPIRED_URL, 4)
+ };
+ UniquePtr<T> cache = SetupLookupCache<T>(array);
+
+ SetupCacheEntry(cache.get(), CACHED_URL, false, false);
+ SetupCacheEntry(cache.get(), NEG_CACHE_EXPIRED_URL, true, false);
+ SetupCacheEntry(cache.get(), POS_CACHE_EXPIRED_URL, false, true);
+ SetupCacheEntry(cache.get(), BOTH_CACHE_EXPIRED_URL, true, true);
+
+ // Before invalidate
+ TestCache<T>(CACHED_URL, true, true, true, cache.get());
+ TestCache<T>(NEG_CACHE_EXPIRED_URL, true, true, true, cache.get());
+ TestCache<T>(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
+ TestCache<T>(BOTH_CACHE_EXPIRED_URL, true, false, true, cache.get());
+
+ // Call InvalidateExpiredCacheEntry to remove cache entries whose negative cache
+ // time is expired
+ cache->InvalidateExpiredCacheEntries();
+
+ // After invalidate, NEG_CACHE_EXPIRED_URL & BOTH_CACHE_EXPIRED_URL should
+ // not be found in cache.
+ TestCache<T>(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
+ TestCache<T>(BOTH_CACHE_EXPIRED_URL, true, false, false, cache.get());
+
+ // Other entries should remain the same result.
+ TestCache<T>(CACHED_URL, true, true, true, cache.get());
+ TestCache<T>(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
+}
+
+TEST(UrlClassifierCaching, InvalidateExpiredCacheEntryV2)
+{
+ TestInvalidateExpiredCacheEntry<LookupCacheV2>();
+}
+
+TEST(UrlClassifierCaching, InvalidateExpiredCacheEntryV4)
+{
+ TestInvalidateExpiredCacheEntry<LookupCacheV4>();
+}
+
+// This testcase check if an cache entry whose negative cache time is expired
+// and it doesn't have any postive cache entries in it, it should be removed
+// from cache after calling |Has|.
+TEST(UrlClassifierCaching, NegativeCacheExpireV2)
+{
+ _PrefixArray array = { GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8) };
+ UniquePtr<LookupCacheV2> cache = SetupLookupCache<LookupCacheV2>(array);
+
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+
+ MissPrefixArray misses;
+ Prefix* prefix = misses.AppendElement(fallible);
+ prefix->FromPlaintext(NEG_CACHE_EXPIRED_URL, cryptoHash);
+
+ AddCompleteArray dummy;
+ cache->AddGethashResultToCache(dummy, misses, EXPIRED_TIME_SEC);
+
+ // Ensure it is in cache in the first place.
+ EXPECT_EQ(cache->IsInCache(prefix->ToUint32()), true);
+
+ // It should be removed after calling Has API.
+ TestCache<LookupCacheV2>(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
+}
+
+TEST(UrlClassifierCaching, NegativeCacheExpireV4)
+{
+ _PrefixArray array = { GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8) };
+ UniquePtr<LookupCacheV4> cache = SetupLookupCache<LookupCacheV4>(array);
+
+ FullHashResponseMap map;
+ Prefix prefix;
+ nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
+ prefix.FromPlaintext(NEG_CACHE_EXPIRED_URL, cryptoHash);
+ CachedFullHashResponse* response = map.LookupOrAdd(prefix.ToUint32());
+
+ response->negativeCacheExpirySec = EXPIRED_TIME_SEC;
+
+ cache->AddFullHashResponseToCache(map);
+
+ // Ensure it is in cache in the first place.
+ EXPECT_EQ(cache->IsInCache(prefix.ToUint32()), true);
+
+ // It should be removed after calling Has API.
+ TestCache<LookupCacheV4>(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
+}
deleted file mode 100644
--- a/toolkit/components/url-classifier/tests/gtest/TestCachingV4.cpp
+++ /dev/null
@@ -1,220 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "Common.h"
-
-#define EXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC - 3600)
-#define NOTEXPIRED_TIME_SEC (PR_Now() / PR_USEC_PER_SEC + 3600)
-
-static void
-SetupCacheEntry(LookupCacheV4* aLookupCache,
- const nsCString& aCompletion,
- bool aNegExpired = false,
- bool aPosExpired = false)
-{
- FullHashResponseMap map;
-
- Prefix prefix;
- nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
- prefix.FromPlaintext(aCompletion, cryptoHash);
-
- CachedFullHashResponse* response = map.LookupOrAdd(prefix.ToUint32());
-
- response->negativeCacheExpirySec = aNegExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
- response->fullHashes.Put(GeneratePrefix(aCompletion, COMPLETE_SIZE),
- aPosExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC);
-
- aLookupCache->AddFullHashResponseToCache(map);
-}
-
-void
-TestCache(const Completion aCompletion,
- bool aExpectedHas,
- bool aExpectedConfirmed,
- bool aExpectedFromCache,
- LookupCacheV4* aCache = nullptr)
-{
- bool has, fromCache, confirmed;
- uint32_t matchLength;
- TableFreshnessMap dummy;
-
- if (aCache) {
- aCache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
- } else {
- _PrefixArray array = { GeneratePrefix(_Fragment("cache.notexpired.com/"), 10),
- GeneratePrefix(_Fragment("cache.expired.com/"), 8),
- GeneratePrefix(_Fragment("gound.com/"), 5),
- GeneratePrefix(_Fragment("small.com/"), 4)
- };
-
- UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
- // Create an expired entry and a non-expired entry
- SetupCacheEntry(cache.get(), _Fragment("cache.notexpired.com/"));
- SetupCacheEntry(cache.get(), _Fragment("cache.expired.com/"), true, true);
-
- cache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
- }
-
- EXPECT_EQ(has, aExpectedHas);
- EXPECT_EQ(confirmed, aExpectedConfirmed);
- EXPECT_EQ(fromCache, aExpectedFromCache);
-}
-
-void
-TestCache(const _Fragment& aFragment,
- bool aExpectedHas,
- bool aExpectedConfirmed,
- bool aExpectedFromCache,
- LookupCacheV4* aCache = nullptr)
-{
- Completion lookupHash;
- nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
- lookupHash.FromPlaintext(aFragment, cryptoHash);
-
- TestCache(lookupHash, aExpectedHas, aExpectedConfirmed, aExpectedFromCache, aCache);
-}
-
-// This testcase check the returned result of |Has| API if fullhash cannot match
-// any prefix in the local database.
-TEST(CachingV4, NotFound)
-{
- TestCache(_Fragment("nomatch.com/"), false, false, false);
-}
-
-// This testcase check the returned result of |Has| API if fullhash find a match
-// in the local database but not in the cache.
-TEST(CachingV4, NotInCache)
-{
- TestCache(_Fragment("gound.com/"), true, false, false);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in positive cache.
-TEST(CachingV4, InPositiveCacheNotExpired)
-{
- TestCache(_Fragment("cache.notexpired.com/"), true, true, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in positive cache but that it is expired.
-TEST(CachingV4, InPositiveCacheExpired)
-{
- TestCache(_Fragment("cache.expired.com/"), true, false, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in negative cache.
-TEST(CachingV4, InNegativeCacheNotExpired)
-{
- // Create a fullhash whose prefix matches the prefix in negative cache
- // but completion doesn't match any fullhash in positive cache.
- nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
-
- Completion prefix;
- prefix.FromPlaintext(_Fragment("cache.notexpired.com/"), cryptoHash);
-
- Completion fullhash;
- fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
-
- // Overwrite the 4-byte prefix of `fullhash` so that it conflicts with `prefix`.
- // Since "cache.notexpired.com" is added to database in TestCache as a
- // 10-byte prefix, we should copy more than 10 bytes to fullhash to ensure
- // it can match the prefix in database.
- memcpy(fullhash.buf, prefix.buf, 10);
-
- TestCache(fullhash, false, false, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in negative cache but that entry is expired.
-TEST(CachingV4, InNegativeCacheExpired)
-{
- // Create a fullhash whose prefix is in the cache.
- nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
-
- Completion prefix;
- prefix.FromPlaintext(_Fragment("cache.expired.com/"), cryptoHash);
-
- Completion fullhash;
- fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
-
- memcpy(fullhash.buf, prefix.buf, 10);
-
- TestCache(fullhash, true, false, true);
-}
-
-#define CACHED_URL _Fragment("cache.com/")
-#define NEG_CACHE_EXPIRED_URL _Fragment("cache.negExpired.com/")
-#define POS_CACHE_EXPIRED_URL _Fragment("cache.posExpired.com/")
-#define BOTH_CACHE_EXPIRED_URL _Fragment("cache.negAndposExpired.com/")
-
-// This testcase create 4 cache entries.
-// 1. unexpired entry.
-// 2. an entry whose negative cache time is expired but whose positive cache
-// is not expired.
-// 3. an entry whose positive cache time is expired
-// 4. an entry whose negative cache time and positive cache time are expired
-// After calling |InvalidateExpiredCacheEntry| API, entries with expired
-// negative time should be removed from cache(2 & 4)
-TEST(CachingV4, InvalidateExpiredCacheEntry)
-{
- _PrefixArray array = { GeneratePrefix(CACHED_URL, 10),
- GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8),
- GeneratePrefix(POS_CACHE_EXPIRED_URL, 5),
- GeneratePrefix(BOTH_CACHE_EXPIRED_URL, 4)
- };
-
- UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
- SetupCacheEntry(cache.get(), CACHED_URL, false, false);
- SetupCacheEntry(cache.get(), NEG_CACHE_EXPIRED_URL, true, false);
- SetupCacheEntry(cache.get(), POS_CACHE_EXPIRED_URL, false, true);
- SetupCacheEntry(cache.get(), BOTH_CACHE_EXPIRED_URL, true, true);
-
- // Before invalidate
- TestCache(CACHED_URL, true, true, true, cache.get());
- TestCache(NEG_CACHE_EXPIRED_URL, true, true, true, cache.get());
- TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
- TestCache(BOTH_CACHE_EXPIRED_URL, true, false, true, cache.get());
-
- // Call InvalidateExpiredCacheEntry to remove cache entries whose negative cache
- // time is expired
- cache->InvalidateExpiredCacheEntry();
-
- // After invalidate, NEG_CACHE_EXPIRED_URL & BOTH_CACHE_EXPIRED_URL should
- // not be found in cache.
- TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
- TestCache(BOTH_CACHE_EXPIRED_URL, true, false, false, cache.get());
-
- // Other entries should remain the same result.
- TestCache(CACHED_URL, true, true, true, cache.get());
- TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
-}
-
-// This testcase check if an cache entry whose negative cache time is expired
-// and it doesn't have any postive cache entries in it, it should be removed
-// from cache after calling |Has|.
-TEST(CachingV4, NegativeCacheExpire)
-{
- _PrefixArray array = { GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8) };
- UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
- FullHashResponseMap map;
- Prefix prefix;
- nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
- prefix.FromPlaintext(NEG_CACHE_EXPIRED_URL, cryptoHash);
- CachedFullHashResponse* response = map.LookupOrAdd(prefix.ToUint32());
-
- response->negativeCacheExpirySec = EXPIRED_TIME_SEC;
-
- cache->AddFullHashResponseToCache(map);
-
- // The first time we should found it in the cache but the result is not
- // confirmed(because it is expired).
- TestCache(NEG_CACHE_EXPIRED_URL, true, false, true, cache.get());
-
- // The second time it should not be found in the cache again
- TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
-}
--- a/toolkit/components/url-classifier/tests/gtest/TestLookupCacheV4.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/TestLookupCacheV4.cpp
@@ -9,34 +9,33 @@ TestHasPrefix(const _Fragment& aFragment
{
_PrefixArray array = { GeneratePrefix(_Fragment("bravo.com/"), 32),
GeneratePrefix(_Fragment("browsing.com/"), 8),
GeneratePrefix(_Fragment("gound.com/"), 5),
GeneratePrefix(_Fragment("small.com/"), 4)
};
RunTestInNewThread([&] () -> void {
- UniquePtr<LookupCache> cache = SetupLookupCacheV4(array);
+ UniquePtr<LookupCache> cache = SetupLookupCache<LookupCacheV4>(array);
Completion lookupHash;
nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
lookupHash.FromPlaintext(aFragment, cryptoHash);
- bool has, confirmed, fromCache;
+ bool has, confirmed;
uint32_t matchLength;
// Freshness is not used in V4 so we just put dummy values here.
TableFreshnessMap dummy;
- nsresult rv = cache->Has(lookupHash, dummy, 0,
- &has, &matchLength, &confirmed, &fromCache);
+ nsresult rv =
+ cache->Has(lookupHash, &has, &matchLength, &confirmed);
EXPECT_EQ(rv, NS_OK);
EXPECT_EQ(has, aExpectedHas);
EXPECT_EQ(matchLength == COMPLETE_SIZE, aExpectedComplete);
EXPECT_EQ(confirmed, false);
- EXPECT_EQ(fromCache, false);
cache->ClearAll();
});
}
TEST(LookupCacheV4, HasComplete)
{
--- a/toolkit/components/url-classifier/tests/gtest/moz.build
+++ b/toolkit/components/url-classifier/tests/gtest/moz.build
@@ -5,17 +5,17 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
LOCAL_INCLUDES += [
'../..',
]
UNIFIED_SOURCES += [
'Common.cpp',
- 'TestCachingV4.cpp',
+ 'TestCaching.cpp',
'TestChunkSet.cpp',
'TestClassifier.cpp',
'TestFailUpdate.cpp',
'TestFindFullHash.cpp',
'TestLookupCacheV4.cpp',
'TestPerProviderDirectory.cpp',
'TestProtocolParser.cpp',
'TestRiceDeltaDecoder.cpp',
new file mode 100644
--- /dev/null
+++ b/toolkit/components/url-classifier/tests/mochitest/cache.sjs
@@ -0,0 +1,134 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+const CC = Components.Constructor;
+const BinaryInputStream = CC("@mozilla.org/binaryinputstream;1",
+ "nsIBinaryInputStream",
+ "setInputStream");
+
+function handleRequest(request, response)
+{
+ var query = {};
+ request.queryString.split('&').forEach(function (val) {
+ var idx = val.indexOf('=');
+ query[val.slice(0, idx)] = unescape(val.slice(idx + 1));
+ });
+
+ var responseBody;
+
+ // Store fullhash in the server side.
+ if ("list" in query && "fullhash" in query) {
+ // In the server side we will store:
+ // 1. All the full hashes for a given list
+ // 2. All the lists we have right now
+ // data is separate by '\n'
+ let list = query["list"];
+ let hashes = getState(list);
+
+ let hash = base64ToString(query["fullhash"]);
+ hashes += hash + "\n";
+ setState(list, hashes);
+
+ let lists = getState("lists");
+ if (lists.indexOf(list) == -1) {
+ lists += list + "\n";
+ setState("lists", lists);
+ }
+
+ return;
+ // gethash count return how many gethash request received.
+ // This is used by client to know if a gethash request is triggered by gecko
+ } else if ("gethashcount" == request.queryString) {
+ var counter = getState("counter");
+ responseBody = counter == "" ? "0" : counter;
+ } else {
+ var body = new BinaryInputStream(request.bodyInputStream);
+ var avail;
+ var bytes = [];
+
+ while ((avail = body.available()) > 0) {
+ Array.prototype.push.apply(bytes, body.readByteArray(avail));
+ }
+
+ var counter = getState("counter");
+ counter = counter == "" ? "1" : (parseInt(counter) + 1).toString();
+ setState("counter", counter);
+
+ responseBody = parseV2Request(bytes);
+ }
+
+ response.setHeader("Content-Type", "text/plain", false);
+ response.write(responseBody);
+
+}
+
+function parseV2Request(bytes) {
+ var request = String.fromCharCode.apply(this, bytes);
+ var [HEADER, PREFIXES] = request.split("\n");
+ var [PREFIXSIZE, LENGTH] = HEADER.split(":").map(val => {
+ return parseInt(val);
+ });
+
+ var ret = "";
+ for(var start = 0; start < LENGTH; start += PREFIXSIZE) {
+ getState("lists").split("\n").forEach(function(list) {
+ var completions = getState(list).split("\n");
+
+ for (var completion of completions) {
+ if (completion.indexOf(PREFIXES.substr(start, PREFIXSIZE)) == 0) {
+ ret += list + ":" + "1" + ":" + "32" + "\n";
+ ret += completion;
+ }
+ }
+ });
+ }
+
+ return ret;
+}
+
+/* Convert Base64 data to a string */
+const toBinaryTable = [
+ -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+ -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+ -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,62, -1,-1,-1,63,
+ 52,53,54,55, 56,57,58,59, 60,61,-1,-1, -1, 0,-1,-1,
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 11,12,13,14,
+ 15,16,17,18, 19,20,21,22, 23,24,25,-1, -1,-1,-1,-1,
+ -1,26,27,28, 29,30,31,32, 33,34,35,36, 37,38,39,40,
+ 41,42,43,44, 45,46,47,48, 49,50,51,-1, -1,-1,-1,-1
+];
+const base64Pad = '=';
+
+function base64ToString(data) {
+ var result = '';
+ var leftbits = 0; // number of bits decoded, but yet to be appended
+ var leftdata = 0; // bits decoded, but yet to be appended
+
+ // Convert one by one.
+ for (var i = 0; i < data.length; i++) {
+ var c = toBinaryTable[data.charCodeAt(i) & 0x7f];
+ var padding = (data[i] == base64Pad);
+ // Skip illegal characters and whitespace
+ if (c == -1) continue;
+
+ // Collect data into leftdata, update bitcount
+ leftdata = (leftdata << 6) | c;
+ leftbits += 6;
+
+ // If we have 8 or more bits, append 8 bits to the result
+ if (leftbits >= 8) {
+ leftbits -= 8;
+ // Append if not padding.
+ if (!padding)
+ result += String.fromCharCode((leftdata >> leftbits) & 0xff);
+ leftdata &= (1 << leftbits) - 1;
+ }
+ }
+
+ // If there are any bits left, the base64 string was corrupted
+ if (leftbits)
+ throw Components.Exception('Corrupted base64 string');
+
+ return result;
+}
--- a/toolkit/components/url-classifier/tests/mochitest/mochitest.ini
+++ b/toolkit/components/url-classifier/tests/mochitest/mochitest.ini
@@ -26,17 +26,19 @@ support-files =
dnt.sjs
update.sjs
bad.css
bad.css^headers^
gethash.sjs
gethashFrame.html
tracker.js
seek.webm
+ cache.sjs
[test_classifier.html]
skip-if = (os == 'linux' && debug) #Bug 1199778
[test_classifier_match.html]
[test_classifier_worker.html]
[test_classify_ping.html]
[test_classify_track.html]
[test_gethash.html]
[test_bug1254766.html]
+[test_cachemiss.html]
--- a/toolkit/components/url-classifier/tests/mochitest/test_bug1254766.html
+++ b/toolkit/components/url-classifier/tests/mochitest/test_bug1254766.html
@@ -141,34 +141,16 @@ function testGethash() {
.then(() => {
ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
.then(loadTestFrame)
.then(() => {
ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is not triggered."); })
.then(reset);
}
-// This testcase is to make sure an update request will clear completion cache:
-// 1. Add prefixes to DB.
-// 2. Load test frame, this should trigger a gethash request
-// 3. Trigger an update, completion cache should be cleared now.
-// 4. Load test frame again, since cache is cleared now, gethash request should be triggered.
-function testUpdateClearCache() {
- return Promise.resolve()
- .then(addPrefixToDB)
- .then(loadTestFrame)
- .then(() => {
- ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
- .then(updateUnusedUrl)
- .then(loadTestFrame)
- .then(() => {
- ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
- .then(reset);
-}
-
// This testcae is to make sure completions in update works:
// 1. Add completions to DB.
// 2. Load test frame, since completions is stored in DB, gethash request should
// not be triggered.
function testUpdate() {
return Promise.resolve()
.then(addCompletionToDB)
.then(loadTestFrame)
@@ -234,17 +216,16 @@ function testGethashCompletionsAfterRelo
.then(reset);
}
function runTest() {
Promise.resolve()
.then(classifierHelper.waitForInit)
.then(setup)
.then(testGethash)
- .then(testUpdateClearCache)
.then(testUpdate)
.then(testUpdateNotClearCompletions)
.then(testUpdateCompletionsAfterReload)
.then(testGethashCompletionsAfterReload)
.then(function() {
SimpleTest.finish();
}).catch(function(e) {
ok(false, "Some test failed with error " + e);
new file mode 100644
--- /dev/null
+++ b/toolkit/components/url-classifier/tests/mochitest/test_cachemiss.html
@@ -0,0 +1,161 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Bug 1272239 - Test gethash.</title>
+ <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="classifierHelper.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+
+<body>
+<p id="display"></p>
+<div id="content" style="display: none">
+</div>
+<pre id="test">
+
+<script src="head.js"></script>
+<script class="testbody" type="text/javascript">
+
+const MALWARE_LIST = "test-malware-simple";
+const MALWARE_HOST = "malware.example.com/";
+
+const UNWANTED_LIST = "test-unwanted-simple";
+const UNWANTED_HOST = "unwanted.example.com/";
+
+const GETHASH_URL = "http://mochi.test:8888/tests/toolkit/components/url-classifier/tests/mochitest/cache.sjs";
+
+var shouldLoad = false;
+
+var gPreGethashCounter = 0;
+var gCurGethashCounter = 0;
+
+function loadTestFrame() {
+ return new Promise(function(resolve, reject) {
+ var iframe = document.createElement("iframe");
+ iframe.setAttribute("src", "gethashFrame.html");
+ document.body.appendChild(iframe);
+
+ iframe.onload = function() {
+ document.body.removeChild(iframe);
+ resolve();
+ };
+ }).then(getGethashCounter);
+}
+
+function getGethashCounter() {
+ return new Promise(function(resolve, reject) {
+ var xhr = new XMLHttpRequest;
+ xhr.open("PUT", GETHASH_URL + "?gethashcount");
+ xhr.setRequestHeader("Content-Type", "text/plain");
+ xhr.onreadystatechange = function() {
+ if (this.readyState == this.DONE) {
+ gPreGethashCounter = gCurGethashCounter;
+ gCurGethashCounter = parseInt(xhr.response);
+ resolve();
+ }
+ };
+ xhr.send();
+ });
+}
+
+// add 4-bytes prefixes to local database, so when we access the url,
+// it will trigger gethash request.
+function addPrefixToDB(list, url) {
+ var testData = [{ db: list, url: url, len: 4 }];
+
+ return classifierHelper.addUrlToDB(testData)
+ .catch(function(err) {
+ ok(false, "Couldn't update classifier. Error code: " + err);
+ // Abort test.
+ SimpleTest.finish();
+ });
+}
+
+// manually reset DB to make sure next test won't be affected by cache.
+function reset() {
+ return classifierHelper.resetDB;
+}
+
+// This test has to come before testPositiveCache to ensure gethash server doesn't
+// contain completions.
+function testNegativeCache() {
+ shouldLoad = true;
+
+ function setup() {
+ classifierHelper.allowCompletion([MALWARE_LIST, UNWANTED_LIST], GETHASH_URL);
+
+ // Only add prefix to database. not server, so gethash will not return
+ // result.
+ return Promise.all([
+ addPrefixToDB(MALWARE_LIST, MALWARE_HOST),
+ addPrefixToDB(UNWANTED_LIST, UNWANTED_HOST),
+ ]);
+ }
+
+ return Promise.resolve()
+ .then(setup)
+ .then(() => loadTestFrame())
+ .then(() => {
+ ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
+ // Second load should not trigger gethash request because cache.
+ .then(() => loadTestFrame())
+ .then(() => {
+ ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is nottriggered."); })
+ .then(reset);
+}
+
+function testPositiveCache() {
+ shouldLoad = false;
+
+ function setup() {
+ classifierHelper.allowCompletion([MALWARE_LIST, UNWANTED_LIST], GETHASH_URL);
+
+ return Promise.all([
+ addPrefixToDB(MALWARE_LIST, MALWARE_HOST),
+ addPrefixToDB(UNWANTED_LIST, UNWANTED_HOST),
+ addCompletionToServer(MALWARE_LIST, MALWARE_HOST, GETHASH_URL),
+ addCompletionToServer(UNWANTED_LIST, UNWANTED_HOST, GETHASH_URL)
+ ]);
+ }
+
+ return Promise.resolve()
+ .then(setup)
+ .then(() => loadTestFrame())
+ .then(() => {
+ ok(gCurGethashCounter > gPreGethashCounter, "Gethash request is triggered."); })
+ // Second load should not trigger gethash request because cache.
+ .then(() => loadTestFrame())
+ .then(() => {
+ ok(gCurGethashCounter == gPreGethashCounter, "Gethash request is nottriggered."); })
+ .then(reset);
+}
+
+function runTest() {
+ Promise.resolve()
+ // This test resources get blocked when gethash returns successfully
+ .then(classifierHelper.waitForInit)
+ .then(testNegativeCache)
+ .then(testPositiveCache)
+ .then(function() {
+ SimpleTest.finish();
+ }).catch(function(e) {
+ ok(false, "Some test failed with error " + e);
+ SimpleTest.finish();
+ });
+}
+
+SimpleTest.waitForExplicitFinish();
+
+// 'network.predictor.enabled' is disabled because if other testcase load
+// evil.js, evil.css ...etc resources, it may cause we load them from cache
+// directly and bypass classifier check
+SpecialPowers.pushPrefEnv({"set": [
+ ["browser.safebrowsing.malware.enabled", true],
+ ["network.predictor.enabled", false],
+ ["urlclassifier.gethash.timeout_ms", 30000],
+]}, runTest);
+
+</script>
+</pre>
+</body>
+</html>
--- a/toolkit/components/url-classifier/tests/unit/test_partial.js
+++ b/toolkit/components/url-classifier/tests/unit/test_partial.js
@@ -667,94 +667,16 @@ function testErrorList()
doErrorUpdate("test-phish-simple,test-malware-simple", function() {
// Now the lists should be marked stale. Check assertions.
checkAssertions(assertions, runNextTest);
}, updateError);
}, updateError);
}
-function testStaleList()
-{
- var addUrls = [ "foo.com/a", "foo.com/b", "bar.com/c" ];
- var update = buildPhishingUpdate(
- [
- { "chunkNum" : 1,
- "urls" : addUrls
- }],
- 32);
-
- var completer = installCompleter('test-phish-simple', [[1, addUrls]], []);
-
- var assertions = {
- "tableData" : "test-phish-simple;a:1",
- "urlsExist" : addUrls,
- // These are complete urls, and will only be completed if the
- // list is stale.
- "completerQueried" : [completer, addUrls]
- };
-
- // Consider a match stale after one second.
- prefBranch.setIntPref("urlclassifier.max-complete-age", 1);
-
- // Apply the update.
- doStreamUpdate(update, function() {
- // Now the test-phish-simple and test-malware-simple tables are marked
- // as fresh. Wait three seconds to make sure the list is marked stale.
- new Timer(3000, function() {
- // Now the lists should be marked stale. Check assertions.
- checkAssertions(assertions, function() {
- prefBranch.setIntPref("urlclassifier.max-complete-age", 2700);
- runNextTest();
- });
- }, updateError);
- }, updateError);
-}
-
-// Same as testStaleList, but verifies that an empty response still
-// unconfirms the entry.
-function testStaleListEmpty()
-{
- var addUrls = [ "foo.com/a", "foo.com/b", "bar.com/c" ];
- var update = buildPhishingUpdate(
- [
- { "chunkNum" : 1,
- "urls" : addUrls
- }],
- 32);
-
- var completer = installCompleter('test-phish-simple', [], []);
-
- var assertions = {
- "tableData" : "test-phish-simple;a:1",
- // None of these should match, because they won't be completed
- "urlsDontExist" : addUrls,
- // These are complete urls, and will only be completed if the
- // list is stale.
- "completerQueried" : [completer, addUrls]
- };
-
- // Consider a match stale after one second.
- prefBranch.setIntPref("urlclassifier.max-complete-age", 1);
-
- // Apply the update.
- doStreamUpdate(update, function() {
- // Now the test-phish-simple and test-malware-simple tables are marked
- // as fresh. Wait three seconds to make sure the list is marked stale.
- new Timer(3000, function() {
- // Now the lists should be marked stale. Check assertions.
- checkAssertions(assertions, function() {
- prefBranch.setIntPref("urlclassifier.max-complete-age", 2700);
- runNextTest();
- });
- }, updateError);
- }, updateError);
-}
-
-
// Verify that different lists (test-phish-simple,
// test-malware-simple) maintain their freshness separately.
function testErrorListIndependent()
{
var phishUrls = [ "phish.com/a" ];
var malwareUrls = [ "attack.com/a" ];
var update = buildPhishingUpdate(
[
@@ -810,16 +732,14 @@ function run_test()
testMixedSizesDifferentDomains,
testInvalidHashSize,
testWrongTable,
testCachedResults,
testCachedResultsWithSub,
testCachedResultsWithExpire,
testCachedResultsUpdate,
testCachedResultsFailure,
- testStaleList,
- testStaleListEmpty,
testErrorList,
testErrorListIndependent
]);
}
do_test_pending();