Bug 1311935 - P2. Process fullHashes.find response. r?francois draft
authordimi <dlee@mozilla.com>
Tue, 11 Apr 2017 11:50:48 +0800
changeset 560078 03d34c11bc75e5df164f398e0034160c8ab724e0
parent 559474 eb9089540eeea82cde1419906e20992e7854c568
child 560079 a319000da3b1422e1cf2daea13d306eb0bbac6ec
push id53321
push userdlee@mozilla.com
push dateTue, 11 Apr 2017 03:51:38 +0000
reviewersfrancois
bugs1311935
milestone55.0a1
Bug 1311935 - P2. Process fullHashes.find response. r?francois This patch includes following changes: 1. nsUrlClassifierHashCompleter.js nsUrlClassifierHashCompleter.idl - Add completionV4 interface for hashCompleter to pass response data to DB service. - Process response data includes negative cache duration, matched full hashes and cache duration for each match. Full matches are passed through nsIFullHashMatch interface. - Change _requests.responses from array contains matched fullhashes to dictionary so that it can store additional information likes negative cache duration. 2. nsUrlClassifierDBService.cpp - Implement CompletionV4 interface, store response data to CacheResultV4 object. Expired duration to expired time is handled here. - Add CacheResultToTableUpdate function to convert V2 & V4 cache result to TableUpdate object. 3. LookupCache.h - Extend CacheResult to CacheResultV2 and CacheResultV4 so we can store response data in CompletionV2 and CompletionV4. 4. HashStore.h - Add API and member variable in TableUpdateV4 to store response data. TableUpdate object is used by DB service to pass update data or gethash response to Classifier, so we need to extend TableUpdateV4 to be able to store fullHashes.find response. 6. Entry.h - Define the structure about how we cache fullHashes.find response. MozReview-Commit-ID: FV4yAl2SAc6
toolkit/components/url-classifier/Entries.h
toolkit/components/url-classifier/HashStore.cpp
toolkit/components/url-classifier/HashStore.h
toolkit/components/url-classifier/LookupCache.cpp
toolkit/components/url-classifier/LookupCache.h
toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
toolkit/components/url-classifier/nsUrlClassifierDBService.h
toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
toolkit/components/url-classifier/tests/unit/test_partial.js
--- a/toolkit/components/url-classifier/Entries.h
+++ b/toolkit/components/url-classifier/Entries.h
@@ -313,12 +313,50 @@ WriteTArray(nsIOutputStream* aStream, ns
                         aArray.Length() * sizeof(T),
                         &written);
 }
 
 typedef nsClassHashtable<nsUint32HashKey, nsCString> PrefixStringMap;
 
 typedef nsDataHashtable<nsCStringHashKey, int64_t> TableFreshnessMap;
 
+typedef nsCStringHashKey VLHashPrefixString;
+typedef nsCStringHashKey FullHashString;
+
+typedef nsDataHashtable<FullHashString, int64_t> FullHashExpiryCache;
+
+struct CachedFullHashResponse {
+  int64_t negativeCacheExpirySec;
+
+  // Map contains all matches found in Fullhash response, this field might be empty.
+  FullHashExpiryCache fullHashes;
+
+  CachedFullHashResponse& operator=(const CachedFullHashResponse& aOther) {
+    negativeCacheExpirySec = aOther.negativeCacheExpirySec;
+
+    fullHashes.Clear();
+    for (auto iter = aOther.fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
+      fullHashes.Put(iter.Key(), iter.Data());
+    }
+
+    return *this;
+  }
+
+  bool operator==(const CachedFullHashResponse& aOther) const {
+    if (negativeCacheExpirySec != aOther.negativeCacheExpirySec ||
+        fullHashes.Count() != aOther.fullHashes.Count()) {
+      return false;
+    }
+    for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
+      if (iter.Data() != aOther.fullHashes.Get(iter.Key())) {
+        return false;
+      }
+    }
+    return true;
+  }
+};
+
+typedef nsClassHashtable<VLHashPrefixString, CachedFullHashResponse> FullHashResponseMap;
+
 } // namespace safebrowsing
 } // namespace mozilla
 
 #endif // SBEntries_h__
--- a/toolkit/components/url-classifier/HashStore.cpp
+++ b/toolkit/components/url-classifier/HashStore.cpp
@@ -188,16 +188,29 @@ TableUpdateV4::NewRemovalIndices(const u
 }
 
 void
 TableUpdateV4::NewChecksum(const std::string& aChecksum)
 {
   mChecksum.Assign(aChecksum.data(), aChecksum.size());
 }
 
+nsresult
+TableUpdateV4::NewFullHashResponse(const nsACString& aPrefix,
+                                   CachedFullHashResponse& aResponse)
+{
+  CachedFullHashResponse* response =
+    mFullHashResponseMap.LookupOrAdd(aPrefix);
+  if (!response) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
+  *response = aResponse;
+  return NS_OK;
+}
+
 HashStore::HashStore(const nsACString& aTableName,
                      const nsACString& aProvider,
                      nsIFile* aRootStoreDir)
   : mTableName(aTableName)
   , mInUpdate(false)
   , mFileSize(0)
 {
   nsresult rv = Classifier::GetPrivateStoreDirectory(aRootStoreDir,
--- a/toolkit/components/url-classifier/HashStore.h
+++ b/toolkit/components/url-classifier/HashStore.h
@@ -154,42 +154,50 @@ public:
   explicit TableUpdateV4(const nsACString& aTable)
     : TableUpdate(aTable)
     , mFullUpdate(false)
   {
   }
 
   bool Empty() const override
   {
-    return mPrefixesMap.IsEmpty() && mRemovalIndiceArray.IsEmpty();
+    return mPrefixesMap.IsEmpty() &&
+           mRemovalIndiceArray.IsEmpty() &&
+           mFullHashResponseMap.IsEmpty();
   }
 
   bool IsFullUpdate() const { return mFullUpdate; }
   PrefixStdStringMap& Prefixes() { return mPrefixesMap; }
   RemovalIndiceArray& RemovalIndices() { return mRemovalIndiceArray; }
   const nsACString& ClientState() const { return mClientState; }
   const nsACString& Checksum() const { return mChecksum; }
+  const FullHashResponseMap& FullHashResponse() const { return mFullHashResponseMap; }
 
   // For downcasting.
   static const int TAG = 4;
 
   void SetFullUpdate(bool aIsFullUpdate) { mFullUpdate = aIsFullUpdate; }
   void NewPrefixes(int32_t aSize, std::string& aPrefixes);
   void NewRemovalIndices(const uint32_t* aIndices, size_t aNumOfIndices);
   void SetNewClientState(const nsACString& aState) { mClientState = aState; }
   void NewChecksum(const std::string& aChecksum);
+  nsresult NewFullHashResponse(const nsACString& aPrefix,
+                               CachedFullHashResponse& aResponse);
 
 private:
   virtual int Tag() const override { return TAG; }
 
   bool mFullUpdate;
   PrefixStdStringMap mPrefixesMap;
   RemovalIndiceArray mRemovalIndiceArray;
   nsCString mClientState;
   nsCString mChecksum;
+
+  // This is used to store response from fullHashes.find.
+  FullHashResponseMap mFullHashResponseMap;
 };
 
 // There is one hash store per table.
 class HashStore {
 public:
   HashStore(const nsACString& aTableName,
             const nsACString& aProvider,
             nsIFile* aRootStoreFile);
--- a/toolkit/components/url-classifier/LookupCache.cpp
+++ b/toolkit/components/url-classifier/LookupCache.cpp
@@ -35,16 +35,19 @@
 // MOZ_LOG=UrlClassifierDbService:5
 extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
 #define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args)
 #define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug)
 
 namespace mozilla {
 namespace safebrowsing {
 
+const int CacheResultV2::VER = CacheResult::V2;
+const int CacheResultV4::VER = CacheResult::V4;
+
 const int LookupCacheV2::VER = 2;
 
 LookupCache::LookupCache(const nsACString& aTableName,
                          const nsACString& aProvider,
                          nsIFile* aRootStoreDir)
   : mPrimed(false)
   , mTableName(aTableName)
   , mProvider(aProvider)
--- a/toolkit/components/url-classifier/LookupCache.h
+++ b/toolkit/components/url-classifier/LookupCache.h
@@ -98,28 +98,78 @@ public:
   bool mProtocolV2;
 
   // This is only used by telemetry to record the match result.
   MatchResult mMatchResult;
 };
 
 typedef nsTArray<LookupResult> LookupResultArray;
 
-struct CacheResult {
-  AddComplete entry;
+class CacheResult {
+public:
+  enum { V2, V4 };
+
+  virtual int Ver() const = 0;
+  virtual bool findCompletion(const Completion& aCompletion) const = 0;
+
+  virtual ~CacheResult() {}
+
+  template<typename T>
+  static T* Cast(CacheResult* aThat) {
+    return ((aThat && T::VER == aThat->Ver()) ?
+      reinterpret_cast<T*>(aThat) : nullptr);
+  }
+
   nsCString table;
+};
+
+class CacheResultV2 final : public CacheResult
+{
+public:
+  static const int VER;
+
+  Completion completion;
+  uint32_t addChunk;
 
-  bool operator==(const CacheResult& aOther) const {
-    if (entry != aOther.entry) {
-      return false;
-    }
-    return table == aOther.table;
+  bool operator==(const CacheResultV2& aOther) const {
+    return table == aOther.table &&
+           completion == aOther.completion &&
+           addChunk == aOther.addChunk;
+  }
+
+  bool findCompletion(const Completion& aCompletion) const override {
+    return completion == aCompletion;
   }
+
+  virtual int Ver() const override { return VER; }
 };
-typedef nsTArray<CacheResult> CacheResultArray;
+
+class CacheResultV4 final : public CacheResult
+{
+public:
+  static const int VER;
+
+  nsCString prefix;
+  CachedFullHashResponse response;
+
+  bool operator==(const CacheResultV4& aOther) const {
+    return prefix == aOther.prefix &&
+           response == aOther.response;
+  }
+
+  bool findCompletion(const Completion& aCompletion) const override {
+    nsDependentCSubstring completion(
+      reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
+    return response.fullHashes.Contains(completion);
+  }
+
+  virtual int Ver() const override { return VER; }
+};
+
+typedef nsTArray<UniquePtr<CacheResult>> CacheResultArray;
 
 class LookupCache {
 public:
   // Check for a canonicalized IP address.
   static bool IsCanonicalizedIP(const nsACString& aHost);
 
   // take a lookup string (www.hostname.com/path/to/resource.html) and
   // expand it into the set of fragments that should be searched for in an
--- a/toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
+++ b/toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
@@ -1,35 +1,70 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 
+interface nsIArray;
+
+/**
+ * This interface contains feilds in Matches object of FullHashResponse(V4).
+ * Reference from:
+ * https://developers.google.com/safe-browsing/v4/update-api#http-post-response_2
+ */
+[scriptable, uuid(aabeb50e-d9f7-418e-9469-2cd9608958c0)]
+interface nsIFullHashMatch : nsISupports
+{
+  readonly attribute ACString tableName;
+
+  readonly attribute ACString fullHash;
+
+  readonly attribute uint32_t cacheDuration;
+};
+
 /**
  * This interface is implemented by nsIUrlClassifierHashCompleter clients.
  */
 [scriptable, uuid(da16de40-df26-414d-bde7-c4faf4504868)]
 interface nsIUrlClassifierHashCompleterCallback : nsISupports
 {
   /**
    * A complete hash has been found that matches the partial hash.
    * This method may be called 0-n times for a given
    * nsIUrlClassifierCompleter::complete() call.
    *
    * @param hash
-   *        The 128-bit hash that was discovered.
+   *        The 256-bit hash that was discovered.
    * @param table
    *        The name of the table that this hash belongs to.
    * @param chunkId
    *        The database chunk that this hash belongs to.
    */
-  void completion(in ACString hash,
-                  in ACString table,
-                  in uint32_t chunkId);
+  void completionV2(in ACString hash,
+                    in ACString table,
+                    in uint32_t chunkId);
+
+  /**
+   * This will be called when a fullhash response is received and parsed
+   * no matter if any full hash has been found.
+   *
+   * @param partialHash
+   *        The hash that was sent for completion.
+   * @param table
+   *        The name of the table that this hash belongs to.
+   * @param negativeCacheDuration
+   *        The negative cache duration in millisecond.
+   * @param fullHashes
+   *        Array of fullhashes that match the prefix.
+   */
+  void completionV4(in ACString partialHash,
+                    in ACString table,
+                    in uint32_t negativeCacheDuration,
+                    in nsIArray fullHashes);
 
   /**
    * The completion is complete.  This method is called once per
    * nsIUrlClassifierCompleter::complete() call, after all completion()
    * calls are finished.
    *
    * @param status
    *        NS_OK if the request completed successfully, or an error code.
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
@@ -1,16 +1,17 @@
 //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsAppDirectoryServiceDefs.h"
+#include "nsArrayUtils.h"
 #include "nsCRT.h"
 #include "nsICryptoHash.h"
 #include "nsICryptoHMAC.h"
 #include "nsIDirectoryService.h"
 #include "nsIKeyModule.h"
 #include "nsIObserverService.h"
 #include "nsIPermissionManager.h"
 #include "nsIPrefBranch.h"
@@ -117,16 +118,19 @@ LazyLogModule gUrlClassifierDbServiceLog
 #define BLOCKED_TABLE_PREF              "urlclassifier.blockedTable"
 #define DOWNLOAD_BLOCK_TABLE_PREF       "urlclassifier.downloadBlockTable"
 #define DOWNLOAD_ALLOW_TABLE_PREF       "urlclassifier.downloadAllowTable"
 #define DISALLOW_COMPLETION_TABLE_PREF  "urlclassifier.disallow_completions"
 
 #define CONFIRM_AGE_PREF        "urlclassifier.max-complete-age"
 #define CONFIRM_AGE_DEFAULT_SEC (45 * 60)
 
+// 30 minutes as the maximum negative cache duration.
+#define MAXIMUM_NEGATIVE_CACHE_DURATION_SEC (30 * 60 * 1000)
+
 // TODO: The following two prefs are to be removed after we
 //       roll out full v4 hash completion. See Bug 1331534.
 #define TAKE_V4_COMPLETION_RESULT_PREF    "browser.safebrowsing.temporary.take_v4_completion_result"
 #define TAKE_V4_COMPLETION_RESULT_DEFAULT false
 
 class nsUrlClassifierDBServiceWorker;
 
 // Singleton instance.
@@ -833,80 +837,121 @@ nsUrlClassifierDBServiceWorker::CloseDb(
 nsresult
 nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
 {
   if (gShuttingDownThread) {
     return NS_ERROR_ABORT;
   }
 
   LOG(("nsUrlClassifierDBServiceWorker::CacheCompletions [%p]", this));
-  if (!mClassifier)
+  if (!mClassifier) {
     return NS_OK;
+  }
 
   // Ownership is transferred in to us
   nsAutoPtr<CacheResultArray> resultsPtr(results);
 
-  if (mLastResults == *resultsPtr) {
+  if (resultsPtr->Length() == 0) {
+    return NS_OK;
+  }
+
+  if (IsSameAsLastResults(*resultsPtr)) {
     LOG(("Skipping completions that have just been cached already."));
     return NS_OK;
   }
 
-  nsAutoPtr<ProtocolParserV2> pParse(new ProtocolParserV2());
-  nsTArray<TableUpdate*> updates;
-
   // Only cache results for tables that we have, don't take
   // in tables we might accidentally have hit during a completion.
   // This happens due to goog vs googpub lists existing.
   nsTArray<nsCString> tables;
   nsresult rv = mClassifier->ActiveTables(tables);
   NS_ENSURE_SUCCESS(rv, rv);
 
+  nsTArray<TableUpdate*> updates;
+
   for (uint32_t i = 0; i < resultsPtr->Length(); i++) {
     bool activeTable = false;
+    CacheResult* result = resultsPtr->ElementAt(i).get();
+
     for (uint32_t table = 0; table < tables.Length(); table++) {
-      if (tables[table].Equals(resultsPtr->ElementAt(i).table)) {
+      if (tables[table].Equals(result->table)) {
         activeTable = true;
         break;
       }
     }
     if (activeTable) {
-      TableUpdateV2* tuV2 = TableUpdate::Cast<TableUpdateV2>(
-        pParse->GetTableUpdate(resultsPtr->ElementAt(i).table));
-
-      // Ignore V4 for now.
-      if (!tuV2) {
-        continue;
-      }
-
-      LOG(("CacheCompletion Addchunk %d hash %X", resultsPtr->ElementAt(i).entry.addChunk,
-           resultsPtr->ElementAt(i).entry.ToUint32()));
-      rv = tuV2->NewAddComplete(resultsPtr->ElementAt(i).entry.addChunk,
-                                resultsPtr->ElementAt(i).entry.complete);
+      nsAutoPtr<ProtocolParser> pParse;
+      pParse = resultsPtr->ElementAt(i)->Ver() == CacheResult::V2 ?
+                 static_cast<ProtocolParser*>(new ProtocolParserV2()) :
+                 static_cast<ProtocolParser*>(new ProtocolParserProtobuf());
+
+      TableUpdate* tu = pParse->GetTableUpdate(result->table);
+
+      rv = CacheResultToTableUpdate(result, tu);
       if (NS_FAILED(rv)) {
         // We can bail without leaking here because ForgetTableUpdates
         // hasn't been called yet.
         return rv;
       }
-      rv = tuV2->NewAddChunk(resultsPtr->ElementAt(i).entry.addChunk);
-      if (NS_FAILED(rv)) {
-        return rv;
-      }
-      updates.AppendElement(tuV2);
+      updates.AppendElement(tu);
       pParse->ForgetTableUpdates();
     } else {
       LOG(("Completion received, but table is not active, so not caching."));
     }
    }
 
   mClassifier->ApplyFullHashes(&updates);
-  mLastResults = *resultsPtr;
+  mLastResults = Move(resultsPtr);
   return NS_OK;
 }
 
 nsresult
+nsUrlClassifierDBServiceWorker::CacheResultToTableUpdate(CacheResult* aCacheResult,
+                                                         TableUpdate* aUpdate)
+{
+  auto tuV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
+  if (tuV2) {
+    auto result = CacheResult::Cast<CacheResultV2>(aCacheResult);
+    MOZ_ASSERT(result);
+
+    LOG(("CacheCompletion hash %X, Addchunk %d", result->completion.ToUint32(),
+         result->addChunk));
+
+    nsresult rv = tuV2->NewAddComplete(result->addChunk, result->completion);
+    if (NS_FAILED(rv)) {
+      return rv;
+    }
+    rv = tuV2->NewAddChunk(result->addChunk);
+    return rv;
+  }
+
+  auto tuV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
+  if (tuV4) {
+    auto result = CacheResult::Cast<CacheResultV4>(aCacheResult);
+    MOZ_ASSERT(result);
+
+    if (LOG_ENABLED()) {
+      const FullHashExpiryCache& fullHashes = result->response.fullHashes;
+      for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
+        Completion completion;
+        completion.Assign(iter.Key());
+        LOG(("CacheCompletion(v4) hash %X, CacheExpireTime %" PRId64,
+             completion.ToUint32(), iter.Data()));
+      }
+    }
+
+    tuV4->NewFullHashResponse(result->prefix, result->response);
+    return NS_OK;
+  }
+
+  // tableUpdate object should be either v2 or v4.
+  return NS_ERROR_FAILURE;
+}
+
+nsresult
 nsUrlClassifierDBServiceWorker::CacheMisses(PrefixArray *results)
 {
   LOG(("nsUrlClassifierDBServiceWorker::CacheMisses [%p] %" PRIuSIZE,
        this, results->Length()));
 
   // Ownership is transferred in to us
   nsAutoPtr<PrefixArray> resultsPtr(results);
 
@@ -957,20 +1002,49 @@ nsUrlClassifierDBServiceWorker::SetLastU
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsUrlClassifierDBServiceWorker::ClearLastResults()
 {
   MOZ_ASSERT(!NS_IsMainThread(), "Must be on the background thread");
-  mLastResults.Clear();
+  if (mLastResults) {
+    mLastResults->Clear();
+  }
   return NS_OK;
 }
 
+bool
+nsUrlClassifierDBServiceWorker::IsSameAsLastResults(CacheResultArray& aResult)
+{
+  if (!mLastResults || mLastResults->Length() != aResult.Length()) {
+    return false;
+  }
+
+  bool equal = true;
+  for (uint32_t i = 0; i < mLastResults->Length() && equal; i++) {
+    CacheResult* lhs = mLastResults->ElementAt(i).get();
+    CacheResult* rhs = aResult[i].get();
+
+    if (lhs->Ver() != rhs->Ver()) {
+      return false;
+    }
+
+    if (lhs->Ver() == CacheResult::V2) {
+      equal = *(CacheResult::Cast<CacheResultV2>(lhs)) ==
+              *(CacheResult::Cast<CacheResultV2>(rhs));
+    } else if (lhs->Ver() == CacheResult::V4) {
+      equal = *(CacheResult::Cast<CacheResultV4>(lhs)) ==
+              *(CacheResult::Cast<CacheResultV4>(rhs));
+    }
+  }
+
+  return equal;
+}
 
 // -------------------------------------------------------------------------
 // nsUrlClassifierLookupCallback
 //
 // This class takes the results of a lookup found on the worker thread
 // and handles any necessary partial hash expansions before calling
 // the client callback.
 
@@ -989,16 +1063,17 @@ public:
     , mPendingCompletions(0)
     , mCallback(c)
     {}
 
 private:
   ~nsUrlClassifierLookupCallback();
 
   nsresult HandleResults();
+  nsresult ProcessComplete(CacheResult* aCacheResult);
 
   RefPtr<nsUrlClassifierDBService> mDBService;
   nsAutoPtr<LookupResultArray> mResults;
 
   // Completed results to send back to the worker for caching.
   nsAutoPtr<CacheResultArray> mCacheResults;
 
   uint32_t mPendingCompletions;
@@ -1104,49 +1179,107 @@ nsUrlClassifierLookupCallback::Completio
   if (mPendingCompletions == 0) {
     HandleResults();
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsUrlClassifierLookupCallback::Completion(const nsACString& completeHash,
-                                          const nsACString& tableName,
-                                          uint32_t chunkId)
+nsUrlClassifierLookupCallback::CompletionV2(const nsACString& aCompleteHash,
+                                            const nsACString& aTableName,
+                                            uint32_t aChunkId)
 {
   LOG(("nsUrlClassifierLookupCallback::Completion [%p, %s, %d]",
-       this, PromiseFlatCString(tableName).get(), chunkId));
-
-  mozilla::safebrowsing::Completion hash;
-  hash.Assign(completeHash);
-
+       this, PromiseFlatCString(aTableName).get(), aChunkId));
+
+  MOZ_ASSERT(!StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
+
+  auto result = new CacheResultV2;
+
+  result->table = aTableName;
+  result->completion.Assign(aCompleteHash);
+  result->addChunk = aChunkId;
+
+  return ProcessComplete(result);
+}
+
+NS_IMETHODIMP
+nsUrlClassifierLookupCallback::CompletionV4(const nsACString& aPartialHash,
+                                            const nsACString& aTableName,
+                                            uint32_t aNegativeCacheDuration,
+                                            nsIArray* aFullHashes)
+{
+  LOG(("nsUrlClassifierLookupCallback::CompletionV4 [%p, %s, %d]",
+       this, PromiseFlatCString(aTableName).get(), aNegativeCacheDuration));
+
+  MOZ_ASSERT(StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
+
+  if(!aFullHashes) {
+    return NS_ERROR_INVALID_ARG;
+  }
+
+  if (aNegativeCacheDuration > MAXIMUM_NEGATIVE_CACHE_DURATION_SEC) {
+    LOG(("Negative cache duration too large, clamping it down to"
+         "a reasonable value."));
+    aNegativeCacheDuration = MAXIMUM_NEGATIVE_CACHE_DURATION_SEC;
+  }
+
+  auto result = new CacheResultV4;
+
+  int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
+
+  result->table = aTableName;
+  result->prefix = aPartialHash;
+  result->response.negativeCacheExpirySec = nowSec + aNegativeCacheDuration;
+
+  // Fill in positive cache entries.
+  uint32_t fullHashCount = 0;
+  nsresult rv = aFullHashes->GetLength(&fullHashCount);
+  if (NS_FAILED(rv)) {
+    return rv;
+  }
+
+  for (uint32_t i = 0; i < fullHashCount; i++) {
+    nsCOMPtr<nsIFullHashMatch> match = do_QueryElementAt(aFullHashes, i);
+
+    nsCString fullHash;
+    match->GetFullHash(fullHash);
+
+    uint32_t duration;
+    match->GetCacheDuration(&duration);
+
+    result->response.fullHashes.Put(fullHash, nowSec + duration);
+  }
+
+  return ProcessComplete(result);
+}
+
+nsresult
+nsUrlClassifierLookupCallback::ProcessComplete(CacheResult* aCacheResult)
+{
   // Send this completion to the store for caching.
   if (!mCacheResults) {
     mCacheResults = new CacheResultArray();
-    if (!mCacheResults)
+    if (!mCacheResults) {
       return NS_ERROR_OUT_OF_MEMORY;
+    }
   }
 
-  CacheResult result;
-  result.entry.addChunk = chunkId;
-  result.entry.complete = hash;
-  result.table = tableName;
-
   // OK if this fails, we just won't cache the item.
-  mCacheResults->AppendElement(result);
+  mCacheResults->AppendElement(aCacheResult);
 
   // Check if this matched any of our results.
   for (uint32_t i = 0; i < mResults->Length(); i++) {
     LookupResult& result = mResults->ElementAt(i);
 
     // Now, see if it verifies a lookup
     if (!result.mNoise
-        && result.CompleteHash() == hash
-        && result.mTableName.Equals(tableName)) {
+        && result.mTableName.Equals(aCacheResult->table)
+        && aCacheResult->findCompletion(result.CompleteHash())) {
       result.mProtocolConfirmed = true;
     }
   }
 
   return NS_OK;
 }
 
 
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.h
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.h
@@ -213,16 +213,21 @@ private:
                     const nsACString& tables,
                     nsIUrlClassifierLookupCallback* c);
 
   nsresult AddNoise(const Prefix aPrefix,
                     const nsCString tableName,
                     uint32_t aCount,
                     LookupResultArray& results);
 
+  nsresult CacheResultToTableUpdate(CacheResult* aCacheResult,
+                                    TableUpdate* aUpdate);
+
+  bool IsSameAsLastResults(CacheResultArray& aResult);
+
   // Can only be used on the background thread
   nsCOMPtr<nsICryptoHash> mCryptoHash;
 
   nsAutoPtr<mozilla::safebrowsing::Classifier> mClassifier;
   // The class that actually parses the update chunks.
   nsAutoPtr<ProtocolParser> mProtocolParser;
 
   // Directory where to store the SB databases.
@@ -234,17 +239,17 @@ private:
 
   uint32_t mUpdateWaitSec;
 
   // Entries that cannot be completed. We expect them to die at
   // the next update
   PrefixArray mMissCache;
 
   // Stores the last results that triggered a table update.
-  CacheResultArray mLastResults;
+  nsAutoPtr<CacheResultArray> mLastResults;
 
   nsresult mUpdateStatus;
   nsTArray<nsCString> mUpdateTables;
 
   nsCOMPtr<nsIUrlClassifierUpdateObserver> mUpdateObserver;
   bool mInStream;
 
   // The number of noise entries to add to the set of lookup results.
--- a/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
+++ b/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
@@ -144,16 +144,30 @@ function httpStatusToBucket(httpStatus) 
     statusBucket = 14;
     break;
   default:
     statusBucket = 15;
   };
   return statusBucket;
 }
 
+function FullHashMatch(table, hash, duration) {
+  this.tableName = table;
+  this.fullHash = hash;
+  this.cacheDuration = duration;
+}
+
+FullHashMatch.prototype = {
+  QueryInterface: XPCOMUtils.generateQI([Ci.nsIFullHashMatch]),
+
+  tableName : null,
+  fullHash : null,
+  cacheDuration : null,
+};
+
 function HashCompleter() {
   // The current HashCompleterRequest in flight. Once it is started, it is set
   // to null. It may be used by multiple calls to |complete| in succession to
   // avoid creating multiple requests to the same gethash URL.
   this._currentRequest = null;
   // A map of gethashUrls to HashCompleterRequests that haven't yet begun.
   this._pendingRequests = {};
 
@@ -310,17 +324,18 @@ HashCompleterRequest.prototype = {
                                          Ci.nsISupports]),
 
   // This is called by the HashCompleter to add a hash and callback to the
   // HashCompleterRequest. It must be called before calling |begin|.
   add: function HCR_add(aPartialHash, aCallback, aTableName) {
     this._requests.push({
       partialHash: aPartialHash,
       callback: aCallback,
-      responses: []
+      tableName: aTableName,
+      response: { matches:[] },
     });
 
     if (aTableName) {
       let isTableNameV4 = aTableName.endsWith('-proto');
       if (0 === this.tableNames.size) {
         // Decide if this request is v4 by the first added partial hash.
         this.isV4 = isTableNameV4;
       } else if (this.isV4 !== isTableNameV4) {
@@ -511,17 +526,17 @@ HashCompleterRequest.prototype = {
 
     let uploadChannel = this._channel.QueryInterface(Ci.nsIUploadChannel);
     uploadChannel.setUploadStream(inputStream, "text/plain", -1);
 
     let httpChannel = this._channel.QueryInterface(Ci.nsIHttpChannel);
     httpChannel.requestMethod = "POST";
   },
 
-  // Parses the response body and eventually adds items to the |responses| array
+  // Parses the response body and eventually adds items to the |response.matches| array
   // for elements of |this._requests|.
   handleResponse: function HCR_handleResponse() {
     if (this._response == "") {
       return;
     }
 
     if (this.isV4) {
       return this.handleResponseV4();
@@ -532,16 +547,18 @@ HashCompleterRequest.prototype = {
     let length = this._response.length;
     while (start != length) {
       start = this.handleTable(start);
     }
   },
 
   handleResponseV4: function HCR_handleResponseV4() {
     let callback = {
+      // onCompleteHashFound will be called for each fullhash found in
+      // FullHashResponse.
       onCompleteHashFound : (aCompleteHash,
                              aTableNames,
                              aPerHashCacheDuration) => {
         log("V4 fullhash response complete hash found callback: " +
             JSON.stringify(aCompleteHash) + ", " +
             aTableNames + ", CacheDuration(" + aPerHashCacheDuration + ")");
 
         // Filter table names which we didn't requested.
@@ -551,39 +568,51 @@ HashCompleterRequest.prototype = {
         if (0 === filteredTables.length) {
           log("ERROR: Got complete hash which is from unknown table.");
           return;
         }
         if (filteredTables.length > 1) {
           log("WARNING: Got complete hash which has ambigious threat type.");
         }
 
-        this.handleItem(aCompleteHash, filteredTables[0], 0);
-
-        // TODO: Bug 1311935 - Implement v4 cache.
+        this.handleItem({
+          completeHash: aCompleteHash,
+          tableName: filteredTables[0],
+          cacheDuration: aPerHashCacheDuration
+        });
       },
 
+      // onResponseParsed will be called no matter if there is match in
+      // FullHashResponse, the callback is mainly used to pass negative cache
+      // duration and minimum wait duration.
       onResponseParsed : (aMinWaitDuration,
                           aNegCacheDuration) => {
         log("V4 fullhash response parsed callback: " +
             "MinWaitDuration(" + aMinWaitDuration + "), " +
             "NegativeCacheDuration(" + aNegCacheDuration + ")");
 
         let minWaitDuration = aMinWaitDuration;
 
         if (aMinWaitDuration > MIN_WAIT_DURATION_MAX_VALUE) {
+          log("WARNING: Minimum wait duration too large, clamping it down " +
+              "to a reasonable value.");
           minWaitDuration = MIN_WAIT_DURATION_MAX_VALUE;
         } else if (aMinWaitDuration < 0) {
+          log("WARNING: Minimum wait duration is negative, reset it to 0");
           minWaitDuration = 0;
         }
 
         this._completer._nextGethashTimeMs[this.gethashUrl] =
           Date.now() + minWaitDuration;
 
-        // TODO: Bug 1311935 - Implement v4 cache.
+        // A fullhash request may contain more than one prefix, so the negative
+        // cache duration should be set for all the prefixes in the request.
+        this._requests.forEach(request => {
+          request.response.negCacheDuration = aNegCacheDuration;
+        });
       },
     };
 
     gUrlUtil.parseFindFullHashResponseV4(this._response, callback);
   },
 
   // This parses a table entry in the response body and calls |handleItem|
   // for complete hash in the table entry.
@@ -610,53 +639,68 @@ HashCompleterRequest.prototype = {
     if (dataLength % COMPLETE_LENGTH != 0 ||
         dataLength == 0 ||
         dataLength > body.length - (newlineIndex + 1)) {
       throw errorWithStack();
     }
 
     let data = body.substr(newlineIndex + 1, dataLength);
     for (let i = 0; i < (dataLength / COMPLETE_LENGTH); i++) {
-      this.handleItem(data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH), list,
-                      addChunk);
+      this.handleItem({
+        completeHash: data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH),
+        tableName: list,
+        chunkId: addChunk
+      });
     }
 
     return aStart + newlineIndex + 1 + dataLength;
   },
 
   // This adds a complete hash to any entry in |this._requests| that matches
   // the hash.
-  handleItem: function HCR_handleItem(aData, aTableName, aChunkId) {
+  handleItem: function HCR_handleItem(aData) {
     for (let i = 0; i < this._requests.length; i++) {
       let request = this._requests[i];
-      if (aData.startsWith(request.partialHash)) {
-        request.responses.push({
-          completeHash: aData,
-          tableName: aTableName,
-          chunkId: aChunkId,
-        });
+      if (aData.completeHash.startsWith(request.partialHash)) {
+        request.response.matches.push(aData);
       }
     }
   },
 
   // notifySuccess and notifyFailure are used to alert the callbacks with
   // results. notifySuccess makes |completion| and |completionFinished| calls
   // while notifyFailure only makes a |completionFinished| call with the error
   // code.
   notifySuccess: function HCR_notifySuccess() {
-    for (let i = 0; i < this._requests.length; i++) {
-      let request = this._requests[i];
-      for (let j = 0; j < request.responses.length; j++) {
-        let response = request.responses[j];
-        request.callback.completion(response.completeHash, response.tableName,
-                                    response.chunkId);
-      }
+    // V2 completion handler
+    let completionV2 = (req) => {
+      req.response.matches.forEach((m) => {
+        req.callback.completionV2(m.completeHash, m.tableName, m.chunkId);
+      });
+
+      req.callback.completionFinished(Cr.NS_OK);
+    };
+
+    // V4 completion handler
+    let completionV4 = (req) => {
+      let matches = Cc["@mozilla.org/array;1"].createInstance(Ci.nsIMutableArray);
 
-      request.callback.completionFinished(Cr.NS_OK);
-    }
+      req.response.matches.forEach(m => {
+        matches.appendElement(
+          new FullHashMatch(m.tableName, m.completeHash, m.cacheDuration), false);
+      });
+
+      req.callback.completionV4(req.partialHash, req.tableName,
+                                req.response.negCacheDuration, matches);
+
+      req.callback.completionFinished(Cr.NS_OK);
+    };
+
+    let completion = this.isV4 ? completionV4 : completionV2;
+    this._requests.forEach((req) => { completion(req); });
   },
 
   notifyFailure: function HCR_notifyFailure(aStatus) {
     log("notifying failure\n");
     for (let i = 0; i < this._requests.length; i++) {
       let request = this._requests[i];
       request.callback.completionFinished(aStatus);
     }
--- a/toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
@@ -454,26 +454,26 @@ nsUrlClassifierUtils::ParseFindFullHashR
   for (auto& m : r.matches()) {
     nsCString tableNames;
     nsresult rv = ConvertThreatTypeToListNames(m.threat_type(), tableNames);
     if (NS_FAILED(rv)) {
       hasUnknownThreatType = true;
       continue; // Ignore un-convertable threat type.
     }
     auto& hash = m.threat().hash();
-    auto cacheDuration = DurationToMs(m.cache_duration());
+    auto cacheDuration = m.cache_duration().seconds();
     aCallback->OnCompleteHashFound(nsCString(hash.c_str(), hash.length()),
                                    tableNames, cacheDuration);
 
     Telemetry::Accumulate(Telemetry::URLCLASSIFIER_POSITIVE_CACHE_DURATION,
                           cacheDuration);
   }
 
   auto minWaitDuration = DurationToMs(r.minimum_wait_duration());
-  auto negCacheDuration = DurationToMs(r.negative_cache_duration());
+  auto negCacheDuration = r.negative_cache_duration().seconds();
 
   aCallback->OnResponseParsed(minWaitDuration, negCacheDuration);
 
   Telemetry::Accumulate(Telemetry::URLCLASSIFIER_COMPLETION_ERROR,
                         hasUnknownThreatType ? UNKNOWN_THREAT_TYPE : SUCCESS);
 
   Telemetry::Accumulate(Telemetry::URLCLASSIFIER_NEGATIVE_CACHE_DURATION,
                         negCacheDuration);
--- a/toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
@@ -154,17 +154,17 @@ public:
 
     return NS_OK;
   }
 
   NS_IMETHOD
   OnResponseParsed(uint32_t aMinWaitDuration,
                    uint32_t aNegCacheDuration) override
   {
-    VerifyDuration(aMinWaitDuration, EXPECTED_MIN_WAIT_DURATION);
+    VerifyDuration(aMinWaitDuration / 1000, EXPECTED_MIN_WAIT_DURATION);
     VerifyDuration(aNegCacheDuration, EXPECTED_NEG_CACHE_DURATION);
 
     return NS_OK;
   }
 
 private:
   void
   Verify(const nsACString& aCompleteHash,
@@ -186,17 +186,17 @@ private:
     VerifyDuration(aPerHashCacheDuration, expected.mPerHashCacheDuration);
 
     mCallbackCount++;
   }
 
   void
   VerifyDuration(uint32_t aToVerify, const MyDuration& aExpected)
   {
-    ASSERT_TRUE(aToVerify == (aExpected.mSecs * 1000));
+    ASSERT_TRUE(aToVerify == aExpected.mSecs);
   }
 
   ~MyParseCallback() {}
 
   uint32_t& mCallbackCount;
 };
 
 NS_IMPL_ISUPPORTS(MyParseCallback, nsIUrlClassifierParseFindFullHashCallback)
--- a/toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
+++ b/toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
@@ -350,17 +350,17 @@ function hashCompleterServer(aRequest, a
 }
 
 
 function callback(completion) {
   this._completion = completion;
 }
 
 callback.prototype = {
-  completion: function completion(hash, table, chunkId, trusted) {
+  completionV2: function completion(hash, table, chunkId, trusted) {
     do_check_true(this._completion.expectCompletion);
     if (this._completion.multipleCompletions) {
       for (let completion of this._completion.completions) {
         if (completion.hash == hash) {
           do_check_eq(JSON.stringify(hash), JSON.stringify(completion.hash));
           do_check_eq(table, completion.table);
           do_check_eq(chunkId, completion.chunkId);
 
--- a/toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
+++ b/toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
@@ -85,52 +85,69 @@ add_test(function test_getHashRequestV4(
                                                    [btoa(NEW_CLIENT_STATE)],
                                                    [btoa("0123"), btoa("1234567"), btoa("1111")],
                                                    1,
                                                    3);
   registerHandlerGethashV4("&$req=" + request);
   let completeFinishedCnt = 0;
 
   gCompleter.complete("0123", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completion(hash, table, chunkId) {
-      equal(hash, "01234567890123456789012345678901");
+    completionV4(hash, table, duration, fullhashes) {
+      equal(hash, "0123");
       equal(table, TEST_TABLE_DATA_V4.tableName);
-      equal(chunkId, 0);
-      do_print("completion: " + hash + ", " + table + ", " + chunkId);
+      equal(duration, 120);
+      equal(fullhashes.length, 1);
+
+      let match = fullhashes.QueryInterface(Ci.nsIArray)
+                  .queryElementAt(0, Ci.nsIFullHashMatch);
+
+      equal(match.fullHash, "01234567890123456789012345678901");
+      equal(match.cacheDuration, 8)
+      do_print("completion: " + match.fullHash + ", " + table);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
     },
   });
 
   gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completion(hash, table, chunkId) {
-      equal(hash, "12345678901234567890123456789012");
+    completionV4(hash, table, duration, fullhashes) {
+      equal(hash, "1234567");
       equal(table, TEST_TABLE_DATA_V4.tableName);
-      equal(chunkId, 0);
-      do_print("completion: " + hash + ", " + table + ", " + chunkId);
+      equal(duration, 120);
+      equal(fullhashes.length, 1);
+
+      let match = fullhashes.QueryInterface(Ci.nsIArray)
+                  .queryElementAt(0, Ci.nsIFullHashMatch);
+
+      equal(match.fullHash, "12345678901234567890123456789012");
+      equal(match.cacheDuration, 7)
+      do_print("completion: " + match.fullHash + ", " + table);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
     },
   });
 
   gCompleter.complete("1111", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completion(hash, table, chunkId) {
-      ok(false, "1111 is not the prefix of " + hash);
+    completionV4(hash, table, duration, fullhashes) {
+      equal(hash, "1111");
+      equal(table, TEST_TABLE_DATA_V4.tableName);
+      equal(duration, 120);
+      equal(fullhashes.length, 0);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
@@ -144,21 +161,27 @@ add_test(function test_minWaitDuration()
       completionFinished(status) {
         equal(status, Cr.NS_ERROR_ABORT);
       },
     });
   };
 
   let successComplete = function() {
     gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-      completion(hash, table, chunkId) {
-        equal(hash, "12345678901234567890123456789012");
+      completionV4(hash, table, duration, fullhashes) {
+        equal(hash, "1234567");
         equal(table, TEST_TABLE_DATA_V4.tableName);
-        equal(chunkId, 0);
-        do_print("completion: " + hash + ", " + table + ", " + chunkId);
+        equal(fullhashes.length, 1);
+
+        let match = fullhashes.QueryInterface(Ci.nsIArray)
+                    .queryElementAt(0, Ci.nsIFullHashMatch);
+
+        equal(match.fullHash, "12345678901234567890123456789012");
+        equal(match.cacheDuration, 7)
+        do_print("completion: " + match.fullHash + ", " + table);
       },
 
       completionFinished(status) {
         equal(status, Cr.NS_OK);
         run_next_test();
       },
     });
   };
--- a/toolkit/components/url-classifier/tests/unit/test_partial.js
+++ b/toolkit/components/url-classifier/tests/unit/test_partial.js
@@ -30,17 +30,17 @@ complete: function(partialHash, gethashU
         cb.completionFinished(Cr.NS_ERROR_FAILURE);
         return;
       }
       var results;
       if (fragments[partialHash]) {
         for (var i = 0; i < fragments[partialHash].length; i++) {
           var chunkId = fragments[partialHash][i][0];
           var hash = fragments[partialHash][i][1];
-          cb.completion(hash, self.tableName, chunkId);
+          cb.completionV2(hash, self.tableName, chunkId);
         }
       }
     cb.completionFinished(0);
   }
   var timer = new Timer(0, doCallback);
 },
 
 getHash: function(fragment)