Bug 1413098 - Part 5 - Allow starting an AudioContext when gUM has been allowed. r=pehrsons draft
authorPaul Adenot <paul@paul.cx>
Fri, 27 Apr 2018 19:13:40 +0200
changeset 803532 927d9d0ca04dd593d49efdffe70ce9f70ddce8bd
parent 803531 a12632d384ce175b34b8ba987ca49ec96fbb7a1f
push id112134
push userpaul@paul.cx
push dateMon, 04 Jun 2018 13:38:42 +0000
reviewerspehrsons
bugs1413098
milestone62.0a1
Bug 1413098 - Part 5 - Allow starting an AudioContext when gUM has been allowed. r=pehrsons MozReview-Commit-ID: E0fcpIbLhYJ
dom/media/AutoplayPolicy.cpp
toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
--- a/dom/media/AutoplayPolicy.cpp
+++ b/dom/media/AutoplayPolicy.cpp
@@ -85,17 +85,26 @@ AutoplayPolicy::IsAudioContextAllowedToP
     return true;
   }
 
   nsPIDOMWindowInner* window = aContext->GetOwner();
   if (!window) {
     return false;
   }
 
-   nsCOMPtr<nsIPrincipal> principal = aContext->GetParentObject()->AsGlobal()->PrincipalOrNull();
+  // Pages which have been granted permission to capture WebRTC camera or
+  // microphone are assumed to be trusted, and are allowed to autoplay.
+  MediaManager* manager = MediaManager::GetIfExists();
+  if (manager) {
+    if (manager->IsActivelyCapturingOrHasAPermission(window->WindowID())) {
+      return true;
+    }
+  }
+
+  nsCOMPtr<nsIPrincipal> principal = aContext->GetParentObject()->AsGlobal()->PrincipalOrNull();
 
   // Whitelisted.
   if (principal &&
       nsContentUtils::IsExactSitePermAllow(principal, "autoplay-media")) {
     return true;
   }
 
   // Activated by user gesture.
--- a/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
+++ b/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js
@@ -12,17 +12,18 @@ var UserGestureTests = [
   {type: UserGestures.MOUSE_CLICK, isActivationGesture: true},
   {type: UserGestures.MOUSE_MOVE, isActivationGesture: false},
   {type: UserGestures.KEYBOARD_PRESS, isActivationGesture: true}
 ];
 
 function setup_test_preference() {
   return SpecialPowers.pushPrefEnv({"set": [
     ["media.autoplay.enabled", false],
-    ["media.autoplay.enabled.user-gestures-needed", true]
+    ["media.autoplay.enabled.user-gestures-needed", true],
+    ["media.navigator.permission.fake", true]
   ]});
 }
 
 function simulateUserGesture(gesture, targetBrowser) {
   info(`- simulate ${gesture.type} event -`);
   switch (gesture.type) {
     case UserGestures.MOUSE_CLICK:
       return BrowserTestUtils.synthesizeMouseAtCenter("body", {button: 0},
@@ -100,96 +101,153 @@ async function test_play_with_user_gestu
   }
 
   await ContentTask.spawn(tab.linkedBrowser, gesture, play_video);
 
   info("- remove tab -");
   BrowserTestUtils.removeTab(tab);
 }
 
-async function test_webaudio_with_user_gesture(gesture) {
-  function createAudioContext() {
-    content.ac = new content.AudioContext();
-    let ac = content.ac;
-    ac.resumePromises = [];
-    ac.stateChangePromise = new Promise(resolve => {
-      ac.addEventListener("statechange", function() {
+function createAudioContext() {
+  content.ac = new content.AudioContext();
+  let ac = content.ac;
+  ac.resumePromises = [];
+  ac.stateChangePromise = new Promise(resolve => {
+    ac.addEventListener("statechange", function() {
+      resolve();
+    }, {once: true});
+  });
+}
+
+async function checking_audio_context_running_state() {
+  let ac = content.ac;
+  await new Promise(r => setTimeout(r, 2000));
+  is(ac.state, "suspended", "audio context is still suspended");
+}
+
+function resume_without_expected_success() {
+  let ac = content.ac;
+  let promise = ac.resume();
+  ac.resumePromises.push(promise);
+  return new Promise((resolve, reject) => {
+    setTimeout(() => {
+      if (ac.state == "suspended") {
+        ok(true, "audio context is still suspended");
         resolve();
-      }, {once: true});
-    });
-  }
+      } else {
+        reject("audio context should not be allowed to start");
+      }
+    }, 2000);
+  });
+}
 
-  function checking_audio_context_running_state() {
-    let ac = content.ac;
-    return new Promise(resolve => {
-      setTimeout(() => {
-        ok(ac.state == "suspended", "audio context is still suspended");
-        resolve();
-      }, 4000);
-    });
+function resume_with_expected_success() {
+  let ac = content.ac;
+  ac.resumePromises.push(ac.resume());
+  return Promise.all(ac.resumePromises).then(() => {
+    ok(ac.state == "running", "audio context starts running");
+  });
+}
+
+function callGUM(testParameters) {
+  info("- calling gum with " + JSON.stringify(testParameters.constraints));
+  if (testParameters.shouldAllowStartingContext) {
+    // Because of the prefs we've set and passed, this is going to allow the
+    // window to start an AudioContext synchronously.
+    testParameters.constraints.fake = true;
+    return content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
   }
 
-  function resume_without_supported_user_gestures() {
-    let ac = content.ac;
-    let promise = ac.resume();
-    ac.resumePromises.push(promise);
-    return new Promise((resolve, reject) => {
-      setTimeout(() => {
-        if (ac.state == "suspended") {
-          ok(true, "audio context is still suspended");
-          resolve();
-        } else {
-          reject("audio context should not be allowed to start");
-        }
-      }, 4000);
-    });
-  }
+  // Call gUM, without sucess: we've made it so that only fake requests
+  // succeed without permission, and this is requesting non-fake-devices. Return
+  // a resolved promise so that the test continues, but the getUserMedia Promise
+  // will never be resolved.
+  // We do this to check that it's not merely calling gUM that allows starting
+  // an AudioContext, it's having the Promise it return resolved successfuly,
+  // because of saved permissions for an origin or explicit user consent using
+  // the prompt.
+  content.navigator.mediaDevices.getUserMedia(testParameters.constraints);
+  return Promise.resolve();
+}
 
-  function resume_with_supported_user_gestures() {
-    let ac = content.ac;
-    ac.resumePromises.push(ac.resume());
-    return Promise.all(ac.resumePromises).then(() => {
-      ok(ac.state == "running", "audio context starts running");
-    });
-  }
 
+async function test_webaudio_with_user_gesture(gesture) {
   info("- open new tab -");
   let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
                                                         "about:blank");
   info("- create audio context -");
-  // We want the same audio context could be used between different content
-  // tasks, so it *must* need to be loaded by frame script.
+  // We want the same audio context to be used across different content
+  // tasks, so it needs to be loaded by a frame script.
   let frameScript = createAudioContext;
   let mm = tab.linkedBrowser.messageManager;
   mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
 
   info("- check whether audio context starts running -");
   try {
     await ContentTask.spawn(tab.linkedBrowser, null,
                             checking_audio_context_running_state);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- calling resume() -");
   try {
     await ContentTask.spawn(tab.linkedBrowser, null,
-                            resume_without_supported_user_gestures);
+                            resume_without_expected_success);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- simulate user gesture -");
   await simulateUserGesture(gesture, tab.linkedBrowser);
 
   info("- calling resume() again");
   try {
     let resumeFunc = gesture.isActivationGesture ?
-      resume_with_supported_user_gestures :
-      resume_without_supported_user_gestures;
+      resume_with_expected_success :
+      resume_without_expected_success;
+    await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  info("- remove tab -");
+  await BrowserTestUtils.removeTab(tab);
+}
+
+async function test_webaudio_with_gum(testParameters) {
+  info("- open new tab -");
+  let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser,
+                                                        "about:blank");
+  info("- create audio context -");
+  // We want the same audio context be used between different content
+  // tasks, so it *must* be loaded by frame script.
+  let frameScript = createAudioContext;
+  let mm = tab.linkedBrowser.messageManager;
+  mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false);
+
+  info("- check whether audio context starts running -");
+  try {
+    await ContentTask.spawn(tab.linkedBrowser, null,
+                            checking_audio_context_running_state);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  try {
+    await ContentTask.spawn(tab.linkedBrowser, testParameters, callGUM);
+  } catch (error) {
+    ok(false, error.toString());
+  }
+
+  info("- calling resume() again");
+  try {
+    let resumeFunc = testParameters.shouldAllowStartingContext ?
+      resume_with_expected_success :
+      resume_without_expected_success;
     await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc);
   } catch (error) {
     ok(false, error.toString());
   }
 
   info("- remove tab -");
   await BrowserTestUtils.removeTab(tab);
 }
@@ -204,9 +262,32 @@ add_task(async function start_test() {
   info("- test play after page got user gesture -");
   for (let idx = 0; idx < UserGestureTests.length; idx++) {
     info("- test play after page got user gesture -");
     await test_play_with_user_gesture(UserGestureTests[idx]);
 
     info("- test web audio with user gesture -");
     await test_webaudio_with_user_gesture(UserGestureTests[idx]);
   }
+
+  info("- test web audio with gUM success -");
+
+  await test_webaudio_with_gum({constraints: { audio: true },
+                                shouldAllowStartingContext: true});
+  await test_webaudio_with_gum({constraints: { video: true },
+                                shouldAllowStartingContext: true});
+  await test_webaudio_with_gum({constraints: { video: true,
+                                               audio: true },
+                                shouldAllowStartingContext: true});
+
+  await SpecialPowers.pushPrefEnv({"set": [
+    ["media.navigator.permission.force", true],
+  ]}).then(async function() {
+    info("- test web audio with gUM denied -");
+    await test_webaudio_with_gum({constraints: { video: true },
+                                  shouldAllowStartingContext: false});
+    await test_webaudio_with_gum({constraints: { audio: true },
+                                  shouldAllowStartingContext: false});
+    await test_webaudio_with_gum({constraints: { video: true,
+                                                 audio: true },
+                                  shouldAllowStartingContext: false});
+  });
 });