bug 1391482 add a mechanism to pass an AudioChunk from node to engine r?padenot draft
authorKarl Tomlinson <karlt+@karlt.net>
Tue, 01 Aug 2017 20:04:56 +1200
changeset 648598 2128750db4677bb595c5df4006b2bca10b80ba63
parent 648597 442fcfdf9246941fb0e62c5577be45b9322c7062
child 648599 e42ede828bb6f7f1cadbd0e8f5b6eb15ebc6a5ea
push id74809
push userktomlinson@mozilla.com
push dateFri, 18 Aug 2017 01:10:01 +0000
reviewerspadenot
bugs1391482
milestone57.0a1
bug 1391482 add a mechanism to pass an AudioChunk from node to engine r?padenot MozReview-Commit-ID: Akfy9xDKzXg
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -293,16 +293,20 @@ public:
                                        const dom::ThreeDPoint& aValue)
   {
     NS_ERROR("Invalid SetThreeDPointParameter index");
   }
   virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
   {
     NS_ERROR("SetBuffer called on engine that doesn't support it");
   }
+  virtual void SetBuffer(AudioChunk&& aBuffer)
+  {
+    NS_ERROR("SetBuffer called on engine that doesn't support it");
+  }
   // This consumes the contents of aData.  aData will be emptied after this returns.
   virtual void SetRawArrayData(nsTArray<float>& aData)
   {
     NS_ERROR("SetRawArrayData called on an engine that doesn't support it");
   }
 
   /**
    * Produce the next block of audio samples, given input samples aInput
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -270,16 +270,36 @@ AudioNodeStream::SetBuffer(already_AddRe
     }
     RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
   };
 
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aBuffer));
 }
 
 void
+AudioNodeStream::SetBuffer(AudioChunk&& aBuffer)
+{
+  class Message final : public ControlMessage
+  {
+  public:
+    Message(AudioNodeStream* aStream, AudioChunk&& aBuffer)
+      : ControlMessage(aStream), mBuffer(aBuffer)
+    {}
+    void Run() override
+    {
+      static_cast<AudioNodeStream*>(mStream)->Engine()->
+        SetBuffer(Move(mBuffer));
+    }
+    AudioChunk mBuffer;
+  };
+
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, Move(aBuffer)));
+}
+
+void
 AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
 {
   class Message final : public ControlMessage
   {
   public:
     Message(AudioNodeStream* aStream,
             nsTArray<float>& aData)
       : ControlMessage(aStream)
--- a/dom/media/webaudio/AudioNodeStream.h
+++ b/dom/media/webaudio/AudioNodeStream.h
@@ -87,16 +87,17 @@ public:
    * This time is converted to a time relative to this stream when it's set.
    */
   void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                               double aStreamTime);
   void SetDoubleParameter(uint32_t aIndex, double aValue);
   void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
   void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
   void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer);
+  void SetBuffer(AudioChunk&& aBuffer);
   // This sends a single event to the timeline on the MSG thread side.
   void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent);
   // This consumes the contents of aData.  aData will be emptied after this returns.
   void SetRawArrayData(nsTArray<float>& aData);
   void SetChannelMixingParameters(uint32_t aNumberOfChannels,
                                   ChannelCountMode aChannelCountMoe,
                                   ChannelInterpretation aChannelInterpretation);
   void SetPassThrough(bool aPassThrough);