Bug 1413098 - part1 : add policy to decide whether allow audio context to start draft
authorAlastor Wu <alwu@mozilla.com>
Wed, 13 Dec 2017 14:05:35 -0600
changeset 711383 f074420d6a1ea14d60c16ccf8e3ff6671cff9f50
parent 709416 457b0fe91e0d49a5bc35014fb6f86729cd5bac9b
child 711384 8ce87260c0c6a8be1309e6228de1e5983e15fbce
child 711386 12170ba0686e049eac4184b4ec23738edf7e7bf4
child 711388 4c2ae8241861327d1c1a272e7204efb5e9a33637
child 711390 8c655bbb9df03b7c4071ac9fc2f166e48b8a1f18
push id93061
push useralwu@mozilla.com
push dateWed, 13 Dec 2017 21:40:29 +0000
bugs1413098
milestone59.0a1
Bug 1413098 - part1 : add policy to decide whether allow audio context to start Audio context would be allowed to start if (1) its document has been activated by user gesture (2) it's a offline audio context, because it won't directly output sound to audio devices In addition, all resume promises would be pending until audio context has been allowed and user calls resume() again. MozReview-Commit-ID: G6RV8dDM6vQ
dom/media/AutoplayPolicy.cpp
dom/media/AutoplayPolicy.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
--- a/dom/media/AutoplayPolicy.cpp
+++ b/dom/media/AutoplayPolicy.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AutoplayPolicy.h"
 
 #include "mozilla/EventStateManager.h"
 #include "mozilla/Preferences.h"
+#include "mozilla/dom/AudioContext.h"
 #include "mozilla/dom/HTMLMediaElement.h"
 #include "nsIDocument.h"
 
 namespace mozilla {
 namespace dom {
 
 /* static */ bool
 AutoplayPolicy::IsDocumentAllowedToPlay(nsIDocument* aDoc)
@@ -45,10 +46,34 @@ AutoplayPolicy::IsMediaElementAllowedToP
       aElement->ReadyState() >= nsIDOMHTMLMediaElement::HAVE_METADATA &&
       !aElement->HasAudio()) {
     return true;
   }
 
   return AutoplayPolicy::IsDocumentAllowedToPlay(aElement->OwnerDoc());
 }
 
+/* static */ bool
+AutoplayPolicy::IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext)
+{
+  if (Preferences::GetBool("media.autoplay.enabled")) {
+    return true;
+  }
+
+  if (!Preferences::GetBool("media.autoplay.enabled.user-gestures-needed", false)) {
+    return true;
+  }
+
+  // Offline context won't directly output sound to audio devices.
+  if (aContext->IsOffline()) {
+    return true;
+  }
+
+  nsPIDOMWindowInner* window = aContext->GetOwner();
+  if (!window) {
+    return false;
+  }
+
+  return AutoplayPolicy::IsDocumentAllowedToPlay(window->GetExtantDoc());
+}
+
 } // namespace dom
 } // namespace mozilla
\ No newline at end of file
--- a/dom/media/AutoplayPolicy.h
+++ b/dom/media/AutoplayPolicy.h
@@ -10,32 +10,34 @@
 #include "mozilla/NotNull.h"
 
 class nsIDocument;
 
 namespace mozilla {
 namespace dom {
 
 class HTMLMediaElement;
+class AudioContext;
 
 /**
  * AutoplayPolicy is used to manage autoplay logic for all kinds of media,
  * including MediaElement, Web Audio and Web Speech.
  *
  * Autoplay could be disable by turn off the pref "media.autoplay.enabled".
  * Once user disable autoplay, media could only be played if one of following
  * conditions is true.
  * 1) Owner document is activated by user gestures
  *    We restrict user gestures to "mouse click", "keyboard press" and "touch".
  * 2) Muted media content or video without audio content
  */
 class AutoplayPolicy
 {
 public:
   static bool IsMediaElementAllowedToPlay(NotNull<HTMLMediaElement*> aElement);
+  static bool IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext);
 private:
   static bool IsDocumentAllowedToPlay(nsIDocument* aDoc);
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
\ No newline at end of file
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioContext.h"
 
 #include "blink/PeriodicWave.h"
 
 #include "mozilla/ErrorResult.h"
+#include "mozilla/NotNull.h"
 #include "mozilla/OwningNonNull.h"
 #include "mozilla/RefPtr.h"
 
 #include "mozilla/dom/AnalyserNode.h"
 #include "mozilla/dom/AnalyserNodeBinding.h"
 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
 #include "mozilla/dom/AudioContextBinding.h"
 #include "mozilla/dom/BaseAudioContextBinding.h"
@@ -38,16 +39,17 @@
 
 #include "AudioBuffer.h"
 #include "AudioBufferSourceNode.h"
 #include "AudioChannelService.h"
 #include "AudioDestinationNode.h"
 #include "AudioListener.h"
 #include "AudioNodeStream.h"
 #include "AudioStream.h"
+#include "AutoplayPolicy.h"
 #include "BiquadFilterNode.h"
 #include "ChannelMergerNode.h"
 #include "ChannelSplitterNode.h"
 #include "ConstantSourceNode.h"
 #include "ConvolverNode.h"
 #include "DelayNode.h"
 #include "DynamicsCompressorNode.h"
 #include "GainNode.h"
@@ -77,16 +79,17 @@ namespace dom {
 static dom::AudioContext::AudioContextId gAudioContextId = 1;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises)
   if (!tmp->mIsStarted) {
     NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
   }
   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly.
   // mAllNodes is an array of weak pointers, ignore it here.
   // mPannerNodes is an array of weak pointers, ignore it here.
   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
 
@@ -95,16 +98,17 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Au
   tmp->DisconnectFromWindow();
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
 
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
                                                   DOMEventTargetHelper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises)
   if (!tmp->mIsStarted) {
     MOZ_ASSERT(tmp->mIsOffline,
                "Online AudioContexts should always be started");
     NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
   }
   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly.
   // mAllNodes is an array of weak pointers, ignore it here.
   // mPannerNodes is an array of weak pointers, ignore it here.
@@ -143,23 +147,37 @@ AudioContext::AudioContext(nsPIDOMWindow
   , mCloseCalled(false)
   , mSuspendCalled(false)
   , mIsDisconnecting(false)
 {
   bool mute = aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
+  bool allowToStart = AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this));
   mDestination = new AudioDestinationNode(this, aIsOffline,
-                                          aNumberOfChannels, aLength, aSampleRate);
+                                          aNumberOfChannels,
+                                          aLength,
+                                          aSampleRate,
+                                          allowToStart);
 
   // The context can't be muted until it has a destination.
   if (mute) {
     Mute();
   }
+
+  // If we won't allow audio context to start, we need to suspend all its stream
+  // in order to delay the state changing from 'suspend' to 'start'.
+  if (!allowToStart) {
+    ErrorResult rv;
+    RefPtr<Promise> dummy = Suspend(rv);
+    MOZ_ASSERT(!rv.Failed(), "can't create promise");
+    MOZ_ASSERT(dummy->State() != Promise::PromiseState::Rejected,
+               "suspend failed");
+  }
 }
 
 nsresult
 AudioContext::Init()
 {
   if (!mIsOffline) {
     nsresult rv = mDestination->CreateAudioChannelAgent();
     if (NS_WARN_IF(NS_FAILED(rv))) {
@@ -676,16 +694,21 @@ AudioContext::Shutdown()
       RefPtr<Promise> ignored = Close(dummy);
     }
 
     for (auto p : mPromiseGripArray) {
       p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     }
 
     mPromiseGripArray.Clear();
+
+    for (const auto& p : mPendingResumePromises) {
+      p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
+    }
+    mPendingResumePromises.Clear();
   }
 
   // Release references to active nodes.
   // Active AudioNodes don't unregister in destructors, at which point the
   // Node is already unregistered.
   mActiveNodes.Clear();
 
   // For offline contexts, we can destroy the MediaStreamGraph at this point.
@@ -855,16 +878,26 @@ AudioContext::OnStateChanged(void* aProm
     // already freed memory.
     if (mPromiseGripArray.Contains(promise)) {
       promise->MaybeResolveWithUndefined();
       DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
       MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
     }
   }
 
+  // Resolve all pending promises once the audio context has been allowed to
+  // start.
+  if (mAudioContextState == AudioContextState::Suspended &&
+      aNewState == AudioContextState::Running) {
+    for (const auto& p : mPendingResumePromises) {
+      p->MaybeResolveWithUndefined();
+    }
+    mPendingResumePromises.Clear();
+  }
+
   if (mAudioContextState != aNewState) {
     RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
     Dispatch(task.forget());
   }
 
   mAudioContextState = aNewState;
 }
 
@@ -938,32 +971,34 @@ AudioContext::Resume(ErrorResult& aRv)
   }
 
   if (mAudioContextState == AudioContextState::Closed ||
       mCloseCalled) {
     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     return promise.forget();
   }
 
-  Destination()->Resume();
+  mPendingResumePromises.AppendElement(promise);
+
+  if (AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this))) {
+    Destination()->Resume();
 
-  nsTArray<MediaStream*> streams;
-  // If mSuspendCalled is false then we already resumed all our streams,
-  // so don't resume them again (since suspend(); resume(); resume(); should
-  // be OK). But we still need to do ApplyAudioContextOperation
-  // to ensure our new promise is resolved.
-  if (mSuspendCalled) {
-    streams = GetAllStreams();
+    nsTArray<MediaStream*> streams;
+    // If mSuspendCalled is false then we already resumed all our streams,
+    // so don't resume them again (since suspend(); resume(); resume(); should
+    // be OK). But we still need to do ApplyAudioContextOperation
+    // to ensure our new promise is resolved.
+    if (mSuspendCalled) {
+      streams = GetAllStreams();
+    }
+    Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
+                                        streams,
+                                        AudioContextOperation::Resume, promise);
+    mSuspendCalled = false;
   }
-  mPromiseGripArray.AppendElement(promise);
-  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                      streams,
-                                      AudioContextOperation::Resume, promise);
-
-  mSuspendCalled = false;
 
   return promise.forget();
 }
 
 already_AddRefed<Promise>
 AudioContext::Close(ErrorResult& aRv)
 {
   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -342,19 +342,24 @@ private:
   const AudioContextId mId;
   // Note that it's important for mSampleRate to be initialized before
   // mDestination, as mDestination's constructor needs to access it!
   const float mSampleRate;
   AudioContextState mAudioContextState;
   RefPtr<AudioDestinationNode> mDestination;
   RefPtr<AudioListener> mListener;
   nsTArray<UniquePtr<WebAudioDecodeJob> > mDecodeJobs;
-  // This array is used to keep the suspend/resume/close promises alive until
+  // This array is used to keep the suspend/close promises alive until
   // they are resolved, so we can safely pass them accross threads.
   nsTArray<RefPtr<Promise>> mPromiseGripArray;
+  // This array is used to onlly keep the resume promises alive until they are
+  // resolved, so we can safely pass them accross threads. If the audio context
+  // is not allowed to play, the promise would be pending in this array and be
+  // resolved until audio context has been allowed and user call resume() again.
+  nsTArray<RefPtr<Promise>> mPendingResumePromises;
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Raw (non-owning) references to all AudioNodes for this AudioContext.
   nsTHashtable<nsPtrHashKey<AudioNode> > mAllNodes;
   // Hashsets containing all the PannerNodes, to compute the doppler shift.
   // These are weak pointers.
   nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes;
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -319,17 +319,19 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
 
 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
                                            bool aIsOffline,
                                            uint32_t aNumberOfChannels,
-                                           uint32_t aLength, float aSampleRate)
+                                           uint32_t aLength,
+                                           float aSampleRate,
+                                           bool aAllowToStart)
   : AudioNode(aContext, aNumberOfChannels,
               ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mIsOffline(aIsOffline)
   , mAudioChannelSuspended(false)
   , mCaptured(false)
   , mAudible(AudioChannelService::AudibleState::eAudible)
 {
@@ -347,17 +349,17 @@ AudioDestinationNode::AudioDestinationNo
   AudioNodeStream::Flags flags =
     AudioNodeStream::NEED_MAIN_THREAD_CURRENT_TIME |
     AudioNodeStream::NEED_MAIN_THREAD_FINISHED |
     AudioNodeStream::EXTERNAL_OUTPUT;
   mStream = AudioNodeStream::Create(aContext, engine, flags, graph);
   mStream->AddMainThreadListener(this);
   mStream->AddAudioOutput(&gWebAudioOutputKey);
 
-  if (!aIsOffline) {
+  if (!aIsOffline && aAllowToStart) {
     graph->NotifyWhenGraphStarted(mStream);
   }
 }
 
 AudioDestinationNode::~AudioDestinationNode()
 {
 }
 
--- a/dom/media/webaudio/AudioDestinationNode.h
+++ b/dom/media/webaudio/AudioDestinationNode.h
@@ -22,17 +22,18 @@ class AudioDestinationNode final : publi
 {
 public:
   // This node type knows what MediaStreamGraph to use based on
   // whether it's in offline mode.
   AudioDestinationNode(AudioContext* aContext,
                        bool aIsOffline,
                        uint32_t aNumberOfChannels = 0,
                        uint32_t aLength = 0,
-                       float aSampleRate = 0.0f);
+                       float aSampleRate = 0.0f,
+                       bool aAllowToStart = true);
 
   void DestroyMediaStream() override;
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioDestinationNode, AudioNode)
   NS_DECL_NSIAUDIOCHANNELAGENTCALLBACK
 
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;