Bug 1325341 - Add code that triggers racing the network and cache requests r=michal draft
authorValentin Gosu <valentin.gosu@gmail.com>
Mon, 03 Apr 2017 14:12:39 +0200
changeset 554997 07d58fb387f121e94aa61f0d9c85a75c79d7e292
parent 551789 9577ddeaafd85554c2a855f385a87472a089d5c0
child 554998 9bc10faca1e29e2221b179819ad359e6cfbd346e
push id52110
push uservalentin.gosu@gmail.com
push dateMon, 03 Apr 2017 12:42:26 +0000
reviewersmichal
bugs1325341
milestone55.0a1
Bug 1325341 - Add code that triggers racing the network and cache requests r=michal The racing algorithm is quite simple at this point: If racing is enabled, the request is allowed to hit the network, and the cache queue size is bigger than a certain threshold, then we trigger the network right before we query the cache. MozReview-Commit-ID: JklG4P1eRyO
modules/libpref/init/all.js
netwerk/protocol/http/nsHttpChannel.cpp
netwerk/protocol/http/nsHttpChannel.h
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -1619,16 +1619,21 @@ pref("network.http.enforce-framing.soft"
 // array - behavior as it used to be. If it is true: empty headers coming from
 // the network will exist in header array as empty string. Call SetHeader with
 // an empty value will still delete the header.(Bug 6699259)
 pref("network.http.keep_empty_response_headers_as_empty_string", true);
 
 // Max size, in bytes, for received HTTP response header.
 pref("network.http.max_response_header_size", 393216);
 
+// If we should attempt to race the cache and network
+pref("network.http.rcwn.enabled", true);
+pref("network.http.rcwn.cache_queue_normal_threshold", 50);
+pref("network.http.rcwn.cache_queue_priority_threshold", 10);
+
 // default values for FTP
 // in a DSCP environment this should be 40 (0x28, or AF11), per RFC-4594,
 // Section 4.8 "High-Throughput Data Service Class", and 80 (0x50, or AF22)
 // per Section 4.7 "Low-Latency Data Service Class".
 pref("network.ftp.data.qos", 0);
 pref("network.ftp.control.qos", 0);
 
 // The max time to spend on xpcom events between two polls in ms.
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -114,16 +114,19 @@
 
 namespace mozilla { namespace net {
 
 namespace {
 
 // Monotonically increasing ID for generating unique cache entries per
 // intercepted channel.
 static uint64_t gNumIntercepted = 0;
+static bool sRCWNEnabled = false;
+static uint32_t sRCWNQueueSizeNormal = 50;
+static uint32_t sRCWNQueueSizePriority = 10;
 
 // True if the local cache should be bypassed when processing a request.
 #define BYPASS_LOCAL_CACHE(loadFlags) \
         (loadFlags & (nsIRequest::LOAD_BYPASS_CACHE | \
                       nsICachingChannel::LOAD_BYPASS_LOCAL_CACHE))
 
 #define RECOVER_FROM_CACHE_FILE_ERROR(result) \
         ((result) == NS_ERROR_FILE_NOT_FOUND || \
@@ -2042,17 +2045,19 @@ nsHttpChannel::ProcessResponse()
         // for Strict-Transport-Security.
     } else {
         // Given a successful connection, process any STS or PKP data that's
         // relevant.
         DebugOnly<nsresult> rv = ProcessSecurityHeaders();
         MOZ_ASSERT(NS_SUCCEEDED(rv), "ProcessSTSHeader failed, continuing load.");
     }
 
-    MOZ_ASSERT(!mCachedContentIsValid);
+    MOZ_ASSERT(!mCachedContentIsValid || mRacingNetAndCache,
+               "We should not be hitting the network if we have valid cached "
+               "content unless we are racing the network and cache");
 
     ProcessSSLInformation();
 
     // notify "http-on-examine-response" observers
     gHttpHandler->OnExamineResponse(this);
 
     return ContinueProcessResponse1();
 }
@@ -3601,16 +3606,20 @@ nsHttpChannel::OpenCacheEntry(bool isHtt
             DebugOnly<bool> exists;
             MOZ_ASSERT(NS_SUCCEEDED(cacheStorage->Exists(openURI, extension, &exists)) && exists,
                        "The entry must exist in the cache after we create it here");
         }
 
         mCacheOpenWithPriority = cacheEntryOpenFlags & nsICacheStorage::OPEN_PRIORITY;
         mCacheQueueSizeWhenOpen = CacheStorageService::CacheQueueSize(mCacheOpenWithPriority);
 
+        if (sRCWNEnabled && mInterceptCache != INTERCEPTED) {
+            MaybeRaceNetworkWithCache();
+        }
+
         if (!mCacheOpenDelay) {
             rv = cacheStorage->AsyncOpenURI(openURI, extension, cacheEntryOpenFlags, this);
         } else {
             // We pass `this` explicitly as a parameter due to the raw pointer
             // to refcounted object in lambda analysis.
             mCacheOpenFunc = [openURI, extension, cacheEntryOpenFlags, cacheStorage] (nsHttpChannel* self) -> void {
                 cacheStorage->AsyncOpenURI(openURI, extension, cacheEntryOpenFlags, self);
             };
@@ -5145,17 +5154,17 @@ nsHttpChannel::FinalizeCacheEntry()
 nsresult
 nsHttpChannel::InstallCacheListener(int64_t offset)
 {
     nsresult rv;
 
     LOG(("Preparing to write data into the cache [uri=%s]\n", mSpec.get()));
 
     MOZ_ASSERT(mCacheEntry);
-    MOZ_ASSERT(mCacheEntryIsWriteOnly || mCachedContentIsPartial);
+    MOZ_ASSERT(mCacheEntryIsWriteOnly || mCachedContentIsPartial || mRacingNetAndCache);
     MOZ_ASSERT(mListener);
 
     nsAutoCString contentEncoding, contentType;
     Unused << mResponseHead->GetHeader(nsHttp::Content_Encoding, contentEncoding);
     mResponseHead->ContentType(contentType);
     // If the content is compressible and the server has not compressed it,
     // mark the cache entry for compression.
     if (contentEncoding.IsEmpty() &&
@@ -5801,16 +5810,24 @@ nsHttpChannel::AsyncOpen(nsIStreamListen
     MOZ_ASSERT(NS_IsMainThread());
 
     if (!gHttpHandler->Active()) {
         LOG(("  after HTTP shutdown..."));
         ReleaseListeners();
         return NS_ERROR_NOT_AVAILABLE;
     }
 
+    static bool sRCWNInited = false;
+    if (!sRCWNInited) {
+        sRCWNInited = true;
+        Preferences::AddBoolVarCache(&sRCWNEnabled, "network.http.rcwn.enabled");
+        Preferences::AddUintVarCache(&sRCWNQueueSizeNormal, "network.http.rcwn.cache_queue_normal_threshold");
+        Preferences::AddUintVarCache(&sRCWNQueueSizePriority, "network.http.rcwn.cache_queue_priority_threshold");
+    }
+
     rv = NS_CheckPortSafety(mURI);
     if (NS_FAILED(rv)) {
         ReleaseListeners();
         return rv;
     }
 
     if (mInterceptCache != INTERCEPTED && ShouldIntercept()) {
         mInterceptCache = MAYBE_INTERCEPT;
@@ -7160,33 +7177,34 @@ nsHttpChannel::OnDataAvailable(nsIReques
 {
     PROFILER_LABEL("nsHttpChannel", "OnDataAvailable",
         js::ProfileEntry::Category::NETWORK);
 
     LOG(("nsHttpChannel::OnDataAvailable [this=%p request=%p offset=%" PRIu64
          " count=%" PRIu32 "]\n",
         this, request, offset, count));
 
-    LOG(("OnDataAvailable %p requestFromCache: %d mFirstResponseSource: %d\n", this, request == mCachePump, mFirstResponseSource));
+    LOG(("  requestFromCache: %d mFirstResponseSource: %d\n",
+        request == mCachePump, mFirstResponseSource));
 
     // don't send out OnDataAvailable notifications if we've been canceled.
     if (mCanceled)
         return mStatus;
 
+    if (mAuthRetryPending || WRONG_RACING_RESPONSE_SOURCE(request) ||
+        (request == mTransactionPump && mTransactionReplaced)) {
+        uint32_t n;
+        return input->ReadSegments(NS_DiscardSegment, nullptr, count, &n);
+    }
+
     MOZ_ASSERT(mResponseHead, "No response head in ODA!!");
 
     MOZ_ASSERT(!(mCachedContentIsPartial && (request == mTransactionPump)),
                "transaction pump not suspended");
 
-    if (mAuthRetryPending || WRONG_RACING_RESPONSE_SOURCE(request) ||
-        (request == mTransactionPump && mTransactionReplaced)) {
-        uint32_t n;
-        return input->ReadSegments(NS_DiscardSegment, nullptr, count, &n);
-    }
-
     mIsReadingFromCache = (request == mCachePump);
 
     if (mListener) {
         //
         // synthesize transport progress event.  we do this here since we want
         // to delay OnProgress events until we start streaming data.  this is
         // crucially important since it impacts the lock icon (see bug 240053).
         //
@@ -8715,17 +8733,19 @@ nsHttpChannel::Test_triggerDelayedOpenCa
 }
 
 nsresult
 nsHttpChannel::TriggerNetwork(int32_t aTimeout)
 {
     MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
     // If a network request has already gone out, there is no point in
     // doing this again.
+    LOG(("nsHttpChannel::TriggerNetwork [this=%p]\n", this));
     if (mNetworkTriggered) {
+        LOG(("  network already triggered. Returning.\n"));
         return NS_OK;
     }
 
     if (!aTimeout) {
         mNetworkTriggered = true;
         if (!mOnCacheAvailableCalled) {
             // If the network was triggered before onCacheEntryAvailable was
             // called, we are either racing network and cache, or the load is
@@ -8738,28 +8758,53 @@ nsHttpChannel::TriggerNetwork(int32_t aT
         }
 
         // If we are waiting for a proxy request, that means we can't trigger
         // the next step just yet. We need for mConnectionInfo to be non-null
         // before we call TryHSTSPriming. OnProxyAvailable will trigger
         // BeginConnect, and Connect will call TryHSTSPriming even if it's
         // for the cache callbacks.
         if (mProxyRequest) {
+            LOG(("  proxy request in progress. Delaying network trigger.\n"));
             mWaitingForProxy = true;
             return NS_OK;
         }
 
+        LOG(("  triggering network\n"));
         return TryHSTSPriming();
     }
 
+    LOG(("  setting timer to trigger network: %d ms\n", aTimeout));
     mNetworkTriggerTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
     mNetworkTriggerTimer->InitWithCallback(this, aTimeout, nsITimer::TYPE_ONE_SHOT);
     return NS_OK;
 }
 
+nsresult
+nsHttpChannel::MaybeRaceNetworkWithCache()
+{
+    // Don't trigger the network if the load flags say so.
+    if (mLoadFlags & (LOAD_ONLY_FROM_CACHE | LOAD_NO_NETWORK_IO)) {
+        return NS_OK;
+    }
+
+
+    uint32_t threshold = mCacheOpenWithPriority ? sRCWNQueueSizePriority
+                                                : sRCWNQueueSizeNormal;
+    // No need to trigger to trigger the racing, since most likely the cache
+    // will be faster.
+    if (mCacheQueueSizeWhenOpen < threshold) {
+        return NS_OK;
+    }
+
+    MOZ_ASSERT(sRCWNEnabled, "The pref must be truned on.");
+    LOG(("nsHttpChannel::MaybeRaceNetworkWithCache [this=%p]\n", this));
+    return TriggerNetwork(0);
+}
+
 NS_IMETHODIMP
 nsHttpChannel::Test_triggerNetwork(int32_t aTimeout)
 {
     MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
     return TriggerNetwork(aTimeout);
 }
 
 NS_IMETHODIMP
--- a/netwerk/protocol/http/nsHttpChannel.h
+++ b/netwerk/protocol/http/nsHttpChannel.h
@@ -649,16 +649,20 @@ private:
 
     // We need to remember which is the source of the response we are using.
     enum {
         RESPONSE_PENDING,           // response is pending
         RESPONSE_FROM_CACHE,        // response coming from cache. no network.
         RESPONSE_FROM_NETWORK,      // response coming from the network
     } mFirstResponseSource = RESPONSE_PENDING;
 
+    // Determines if it's possible and advisable to race the network request
+    // with the cache fetch, and proceeds to do so.
+    nsresult MaybeRaceNetworkWithCache();
+
     nsresult TriggerNetwork(int32_t aTimeout);
     void CancelNetworkRequest(nsresult aStatus);
     // Timer used to delay the network request, or to trigger the network
     // request if retrieving the cache entry takes too long.
     nsCOMPtr<nsITimer> mNetworkTriggerTimer;
     // Is true if the network request has been triggered.
     bool mNetworkTriggered = false;
     bool mWaitingForProxy = false;