Bug 1265394 - Use new PannerNode AudioParams; r?padenot draft
authorDan Minor <dminor@mozilla.com>
Thu, 23 Jun 2016 13:42:12 -0400
changeset 384170 9d7e8e734f54d18af44f6a3c72a21b2d9b38d83f
parent 384169 be8c2fea3a6b87b858579e099ad6f2ca362e5394
child 384171 376202ac4ff19a192042abd3f574b6e3b48f19ac
push id22187
push userdminor@mozilla.com
push dateTue, 05 Jul 2016 18:31:32 +0000
reviewerspadenot
bugs1265394
milestone50.0a1
Bug 1265394 - Use new PannerNode AudioParams; r?padenot MozReview-Commit-ID: 80n4dp8IrbM
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/PannerNode.h
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -19,57 +19,91 @@ using WebCore::HRTFDatabaseLoader;
 using WebCore::HRTFPanner;
 
 namespace mozilla {
 namespace dom {
 
 using namespace std;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode)
-
-NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(PannerNode)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PannerNode, AudioNode)
   if (tmp->Context()) {
     tmp->Context()->UnregisterPannerNode(tmp);
   }
-NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
-
+NS_IMPL_CYCLE_COLLECTION_UNLINK(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode)
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ)
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode)
 
 class PannerNodeEngine final : public AudioNodeEngine
 {
 public:
-  explicit PannerNodeEngine(AudioNode* aNode)
+  explicit PannerNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination)
     : AudioNodeEngine(aNode)
+    , mDestination(aDestination->Stream())
     // Please keep these default values consistent with PannerNode::PannerNode below.
     , mPanningModelFunction(&PannerNodeEngine::EqualPowerPanningFunction)
     , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction)
-    , mPosition()
-    , mOrientation(1., 0., 0.)
+    , mPositionX(0.)
+    , mPositionY(0.)
+    , mPositionZ(0.)
+    , mOrientationX(1.)
+    , mOrientationY(0.)
+    , mOrientationZ(0.)
     , mVelocity()
     , mRefDistance(1.)
     , mMaxDistance(10000.)
     , mRolloffFactor(1.)
     , mConeInnerAngle(360.)
     , mConeOuterAngle(360.)
     , mConeOuterGain(0.)
     // These will be initialized when a PannerNode is created, so just initialize them
     // to some dummy values here.
     , mListenerDopplerFactor(0.)
     , mListenerSpeedOfSound(0.)
     , mLeftOverData(INT_MIN)
   {
   }
 
+  void RecvTimelineEvent(uint32_t aIndex, AudioTimelineEvent& aEvent) override
+  {
+    MOZ_ASSERT(mDestination);
+    WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
+                                                    mDestination);
+    switch (aIndex) {
+    case PannerNode::POSITIONX:
+      mPositionX.InsertEvent<int64_t>(aEvent);
+      break;
+    case PannerNode::POSITIONY:
+      mPositionY.InsertEvent<int64_t>(aEvent);
+      break;
+    case PannerNode::POSITIONZ:
+      mPositionZ.InsertEvent<int64_t>(aEvent);
+      break;
+    case PannerNode::ORIENTATIONX:
+      mOrientationX.InsertEvent<int64_t>(aEvent);
+      break;
+    case PannerNode::ORIENTATIONY:
+      mOrientationY.InsertEvent<int64_t>(aEvent);
+      break;
+    case PannerNode::ORIENTATIONZ:
+      mOrientationZ.InsertEvent<int64_t>(aEvent);
+      break;
+    default:
+      NS_ERROR("Bad PannerNode TimelineParameter");
+    }
+  }
+
   void CreateHRTFPanner()
   {
     MOZ_ASSERT(NS_IsMainThread());
     if (mHRTFPanner) {
       return;
     }
     // HRTFDatabaseLoader needs to be fetched on the main thread.
     already_AddRefed<HRTFDatabaseLoader> loader =
@@ -115,18 +149,26 @@ public:
   }
   void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) override
   {
     switch (aIndex) {
     case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break;
     case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break;
     case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break;
     case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break;
-    case PannerNode::POSITION: mPosition = aParam; break;
-    case PannerNode::ORIENTATION: mOrientation = aParam; break;
+    case PannerNode::POSITION:
+      mPositionX.SetValue(aParam.x);
+      mPositionY.SetValue(aParam.y);
+      mPositionZ.SetValue(aParam.z);
+      break;
+    case PannerNode::ORIENTATION:
+      mOrientationX.SetValue(aParam.x);
+      mOrientationY.SetValue(aParam.y);
+      mOrientationZ.SetValue(aParam.z);
+      break;
     case PannerNode::VELOCITY: mVelocity = aParam; break;
     default:
       NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter");
     }
   }
   void SetDoubleParameter(uint32_t aIndex, double aParam) override
   {
     switch (aIndex) {
@@ -175,61 +217,69 @@ public:
         RefPtr<PlayingRefChangeHandler> refchanged =
           new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF);
         aStream->Graph()->
           DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
       }
       mLeftOverData = mHRTFPanner->maxTailFrames();
     }
 
-    (this->*mPanningModelFunction)(aInput, aOutput);
+    StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom);
+    (this->*mPanningModelFunction)(aInput, aOutput, tick);
   }
 
   bool IsActive() const override
   {
     return mLeftOverData != INT_MIN;
   }
 
-  void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation);
-  float ComputeConeGain();
+  void ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation);
+  float ComputeConeGain(const ThreeDPoint& position, const ThreeDPoint& orientation);
   // Compute how much the distance contributes to the gain reduction.
-  float ComputeDistanceGain();
+  float ComputeDistanceGain(const ThreeDPoint& position);
 
-  void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput);
-  void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput);
+  void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
+  void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
 
   float LinearGainFunction(float aDistance);
   float InverseGainFunction(float aDistance);
   float ExponentialGainFunction(float aDistance);
 
+  ThreeDPoint ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime& tick);
+
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
     if (mHRTFPanner) {
       amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf);
     }
 
     return amount;
   }
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
+  AudioNodeStream* mDestination;
   // This member is set on the main thread, but is not accessed on the rendering
   // thread untile mPanningModelFunction has changed, and this happens strictly
   // later, via a MediaStreamGraph ControlMessage.
   nsAutoPtr<HRTFPanner> mHRTFPanner;
-  typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput);
+  typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
   PanningModelFunction mPanningModelFunction;
   typedef float (PannerNodeEngine::*DistanceModelFunction)(float aDistance);
   DistanceModelFunction mDistanceModelFunction;
-  ThreeDPoint mPosition;
-  ThreeDPoint mOrientation;
+  AudioParamTimeline mPositionX;
+  AudioParamTimeline mPositionY;
+  AudioParamTimeline mPositionZ;
+  AudioParamTimeline mOrientationX;
+  AudioParamTimeline mOrientationY;
+  AudioParamTimeline mOrientationZ;
   ThreeDPoint mVelocity;
   double mRefDistance;
   double mMaxDistance;
   double mRolloffFactor;
   double mConeInnerAngle;
   double mConeOuterAngle;
   double mConeOuterGain;
   ThreeDPoint mListenerPosition;
@@ -244,28 +294,32 @@ public:
 PannerNode::PannerNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Clamped_max,
               ChannelInterpretation::Speakers)
   // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above.
   , mPanningModel(PanningModelType::Equalpower)
   , mDistanceModel(DistanceModelType::Inverse)
-  , mPosition()
-  , mOrientation(1., 0., 0.)
+  , mPositionX(new AudioParam(this, PannerNode::POSITIONX, 0., this->NodeType()))
+  , mPositionY(new AudioParam(this, PannerNode::POSITIONY, 0., this->NodeType()))
+  , mPositionZ(new AudioParam(this, PannerNode::POSITIONZ, 0., this->NodeType()))
+  , mOrientationX(new AudioParam(this, PannerNode::ORIENTATIONX, 1., this->NodeType()))
+  , mOrientationY(new AudioParam(this, PannerNode::ORIENTATIONY, 0., this->NodeType()))
+  , mOrientationZ(new AudioParam(this, PannerNode::ORIENTATIONZ, 0., this->NodeType()))
   , mVelocity()
   , mRefDistance(1.)
   , mMaxDistance(10000.)
   , mRolloffFactor(1.)
   , mConeInnerAngle(360.)
   , mConeOuterAngle(360.)
   , mConeOuterGain(0.)
 {
   mStream = AudioNodeStream::Create(aContext,
-                                    new PannerNodeEngine(this),
+                                    new PannerNodeEngine(this, aContext->Destination()),
                                     AudioNodeStream::NO_STREAM_FLAGS);
   // We should register once we have set up our stream and engine.
   Context()->Listener()->RegisterPannerNode(this);
 }
 
 PannerNode::~PannerNode()
 {
   if (Context()) {
@@ -329,93 +383,222 @@ PannerNodeEngine::InverseGainFunction(fl
 float
 PannerNodeEngine::ExponentialGainFunction(float aDistance)
 {
   return pow(aDistance / mRefDistance, -mRolloffFactor);
 }
 
 void
 PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput,
-                                      AudioBlock* aOutput)
+                                      AudioBlock* aOutput,
+                                      StreamTime tick)
 {
   // The output of this node is always stereo, no matter what the inputs are.
   aOutput->AllocateChannels(2);
 
   float azimuth, elevation;
-  ComputeAzimuthAndElevation(azimuth, elevation);
+
+  ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
+  ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
+  if (!orientation.IsZero()) {
+    orientation.Normalize();
+  }
+  ComputeAzimuthAndElevation(position, azimuth, elevation);
 
   AudioBlock input = aInput;
   // Gain is applied before the delay and convolution of the HRTF.
-  input.mVolume *= ComputeConeGain() * ComputeDistanceGain();
+  input.mVolume *= ComputeConeGain(position, orientation) * ComputeDistanceGain(position);
 
   mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
 }
 
+ThreeDPoint
+PannerNodeEngine::ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime &tick)
+{
+  return ThreeDPoint(aX.GetValueAtTime(tick),
+                     aY.GetValueAtTime(tick),
+                     aZ.GetValueAtTime(tick));
+}
+
 void
 PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput,
-                                            AudioBlock* aOutput)
+                                            AudioBlock* aOutput,
+                                            StreamTime tick)
 {
   float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
   int inputChannels = aInput.ChannelCount();
 
-  // If both the listener are in the same spot, and no cone gain is specified,
-  // this node is noop.
-  if (mListenerPosition == mPosition &&
-      mConeInnerAngle == 360 &&
-      mConeOuterAngle == 360) {
-    *aOutput = aInput;
-    return;
-  }
+  // Optimize the case where the position and orientation is constant for this
+  // processing block: we can just apply a constant gain on the left and right
+  // channel
+  if (mPositionX.HasSimpleValue() &&
+      mPositionY.HasSimpleValue() &&
+      mPositionZ.HasSimpleValue() &&
+      mOrientationX.HasSimpleValue() &&
+      mOrientationY.HasSimpleValue() &&
+      mOrientationZ.HasSimpleValue()) {
+
+    ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
+    ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
+    if (!orientation.IsZero()) {
+      orientation.Normalize();
+    }
+
+    // If both the listener are in the same spot, and no cone gain is specified,
+    // this node is noop.
+    if (mListenerPosition ==  position &&
+        mConeInnerAngle == 360 &&
+        mConeOuterAngle == 360) {
+      *aOutput = aInput;
+      return;
+    }
+
+    // The output of this node is always stereo, no matter what the inputs are.
+    aOutput->AllocateChannels(2);
+
+    ComputeAzimuthAndElevation(position, azimuth, elevation);
+    coneGain = ComputeConeGain(position, orientation);
+
+    // The following algorithm is described in the spec.
+    // Clamp azimuth in the [-90, 90] range.
+    azimuth = min(180.f, max(-180.f, azimuth));
 
-  // The output of this node is always stereo, no matter what the inputs are.
-  aOutput->AllocateChannels(2);
+    // Wrap around
+    if (azimuth < -90.f) {
+      azimuth = -180.f - azimuth;
+    } else if (azimuth > 90) {
+      azimuth = 180.f - azimuth;
+    }
+
+    // Normalize the value in the [0, 1] range.
+    if (inputChannels == 1) {
+      normalizedAzimuth = (azimuth + 90.f) / 180.f;
+    } else {
+      if (azimuth <= 0) {
+        normalizedAzimuth = (azimuth + 90.f) / 90.f;
+      } else {
+        normalizedAzimuth = azimuth / 90.f;
+      }
+    }
 
-  ComputeAzimuthAndElevation(azimuth, elevation);
-  coneGain = ComputeConeGain();
+    distanceGain = ComputeDistanceGain(position);
+
+    // Actually compute the left and right gain.
+    gainL = cos(0.5 * M_PI * normalizedAzimuth);
+    gainR = sin(0.5 * M_PI * normalizedAzimuth);
+
+    // Compute the output.
+    ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0);
 
-  // The following algorithm is described in the spec.
-  // Clamp azimuth in the [-90, 90] range.
-  azimuth = min(180.f, max(-180.f, azimuth));
+    aOutput->mVolume = aInput.mVolume * distanceGain * coneGain;
+  } else {
+    float positionX[WEBAUDIO_BLOCK_SIZE];
+    float positionY[WEBAUDIO_BLOCK_SIZE];
+    float positionZ[WEBAUDIO_BLOCK_SIZE];
+    float orientationX[WEBAUDIO_BLOCK_SIZE];
+    float orientationY[WEBAUDIO_BLOCK_SIZE];
+    float orientationZ[WEBAUDIO_BLOCK_SIZE];
+
+    // The output of this node is always stereo, no matter what the inputs are.
+    aOutput->AllocateChannels(2);
 
-  // Wrap around
-  if (azimuth < -90.f) {
-    azimuth = -180.f - azimuth;
-  } else if (azimuth > 90) {
-    azimuth = 180.f - azimuth;
-  }
-
-  // Normalize the value in the [0, 1] range.
-  if (inputChannels == 1) {
-    normalizedAzimuth = (azimuth + 90.f) / 180.f;
-  } else {
-    if (azimuth <= 0) {
-      normalizedAzimuth = (azimuth + 90.f) / 90.f;
+    if (!mPositionX.HasSimpleValue()) {
+      mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      positionX[0] = mPositionX.GetValueAtTime(tick);
+    }
+    if (!mPositionY.HasSimpleValue()) {
+      mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      positionY[0] = mPositionY.GetValueAtTime(tick);
+    }
+    if (!mPositionZ.HasSimpleValue()) {
+      mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      positionZ[0] = mPositionZ.GetValueAtTime(tick);
+    }
+    if (!mOrientationX.HasSimpleValue()) {
+      mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      orientationX[0] = mOrientationX.GetValueAtTime(tick);
+    }
+    if (!mOrientationY.HasSimpleValue()) {
+      mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE);
+    } else {
+      orientationY[0] = mOrientationY.GetValueAtTime(tick);
+    }
+    if (!mOrientationZ.HasSimpleValue()) {
+      mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE);
     } else {
-      normalizedAzimuth = azimuth / 90.f;
+      orientationZ[0] = mOrientationZ.GetValueAtTime(tick);
     }
-  }
+
+    float computedGain[2*WEBAUDIO_BLOCK_SIZE + 4];
+    bool onLeft[WEBAUDIO_BLOCK_SIZE];
 
-  distanceGain = ComputeDistanceGain();
+    float* alignedComputedGain = ALIGNED16(computedGain);
+    ASSERT_ALIGNED16(alignedComputedGain);
+    for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
+      ThreeDPoint position(mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter],
+                           mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter],
+                           mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]);
+      ThreeDPoint orientation(mOrientationX.HasSimpleValue() ? orientationX[0] : orientationX[counter],
+                              mOrientationY.HasSimpleValue() ? orientationY[0] : orientationY[counter],
+                              mOrientationZ.HasSimpleValue() ? orientationZ[0] : orientationZ[counter]);
+      if (!orientation.IsZero()) {
+        orientation.Normalize();
+      }
+
+      ComputeAzimuthAndElevation(position, azimuth, elevation);
+      coneGain = ComputeConeGain(position, orientation);
+
+      // The following algorithm is described in the spec.
+      // Clamp azimuth in the [-90, 90] range.
+      azimuth = min(180.f, max(-180.f, azimuth));
 
-  // Actually compute the left and right gain.
-  gainL = cos(0.5 * M_PI * normalizedAzimuth);
-  gainR = sin(0.5 * M_PI * normalizedAzimuth);
+      // Wrap around
+      if (azimuth < -90.f) {
+        azimuth = -180.f - azimuth;
+      } else if (azimuth > 90) {
+        azimuth = 180.f - azimuth;
+      }
 
-  // Compute the output.
-  ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0);
+      // Normalize the value in the [0, 1] range.
+      if (inputChannels == 1) {
+        normalizedAzimuth = (azimuth + 90.f) / 180.f;
+      } else {
+        if (azimuth <= 0) {
+          normalizedAzimuth = (azimuth + 90.f) / 90.f;
+        } else {
+          normalizedAzimuth = azimuth / 90.f;
+        }
+      }
+
+      distanceGain = ComputeDistanceGain(position);
 
-  aOutput->mVolume = aInput.mVolume * distanceGain * coneGain;
+      // Actually compute the left and right gain.
+      float gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain;
+      float gainR = sin(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain;
+
+      alignedComputedGain[counter] = gainL;
+      alignedComputedGain[WEBAUDIO_BLOCK_SIZE + counter] = gainR;
+      onLeft[counter] = azimuth <= 0;
+    }
+
+    // Apply the gain to the output buffer
+    ApplyStereoPanning(aInput, aOutput, alignedComputedGain, &alignedComputedGain[WEBAUDIO_BLOCK_SIZE], onLeft);
+
+  }
 }
 
 // This algorithm is specified in the webaudio spec.
 void
-PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation)
+PannerNodeEngine::ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation)
 {
-  ThreeDPoint sourceListener = mPosition - mListenerPosition;
-
+  ThreeDPoint sourceListener = position - mListenerPosition;
   if (sourceListener.IsZero()) {
     aAzimuth = 0.0;
     aElevation = 0.0;
     return;
   }
 
   sourceListener.Normalize();
 
@@ -456,29 +639,30 @@ PannerNodeEngine::ComputeAzimuthAndEleva
     aAzimuth = 90 - aAzimuth;
   } else {
     aAzimuth = 450 - aAzimuth;
   }
 }
 
 // This algorithm is described in the WebAudio spec.
 float
-PannerNodeEngine::ComputeConeGain()
+PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position,
+                                  const ThreeDPoint& orientation)
 {
   // Omnidirectional source
-  if (mOrientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) {
+  if (orientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) {
     return 1;
   }
 
   // Normalized source-listener vector
-  ThreeDPoint sourceToListener = mListenerPosition - mPosition;
+  ThreeDPoint sourceToListener = mListenerPosition - position;
   sourceToListener.Normalize();
 
   // Angle between the source orientation vector and the source-listener vector
-  double dotProduct = sourceToListener.DotProduct(mOrientation);
+  double dotProduct = sourceToListener.DotProduct(orientation);
   double angle = 180 * acos(dotProduct) / M_PI;
   double absAngle = fabs(angle);
 
   // Divide by 2 here since API is entire angle (not half-angle)
   double absInnerAngle = fabs(mConeInnerAngle) / 2;
   double absOuterAngle = fabs(mConeOuterAngle) / 2;
   double gain = 1;
 
@@ -494,35 +678,35 @@ PannerNodeEngine::ComputeConeGain()
     double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle);
     gain = (1 - x) + mConeOuterGain * x;
   }
 
   return gain;
 }
 
 float
-PannerNodeEngine::ComputeDistanceGain()
+PannerNodeEngine::ComputeDistanceGain(const ThreeDPoint& position)
 {
-  ThreeDPoint distanceVec = mPosition - mListenerPosition;
+  ThreeDPoint distanceVec = position - mListenerPosition;
   float distance = sqrt(distanceVec.DotProduct(distanceVec));
   return std::max(0.0f, (this->*mDistanceModelFunction)(distance));
 }
 
 float
 PannerNode::ComputeDopplerShift()
 {
   double dopplerShift = 1.0; // Initialize to default value
 
   AudioListener* listener = Context()->Listener();
 
   if (listener->DopplerFactor() > 0) {
     // Don't bother if both source and listener have no velocity.
     if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) {
       // Calculate the source to listener vector.
-      ThreeDPoint sourceToListener = mPosition - listener->Velocity();
+      ThreeDPoint sourceToListener = ConvertAudioParamTo3DP(mPositionX, mPositionY, mPositionZ) - listener->Velocity();
 
       double sourceListenerMagnitude = sourceToListener.Magnitude();
 
       double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude;
       double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude;
 
       listenerProjection = -listenerProjection;
       sourceProjection = -sourceProjection;
--- a/dom/media/webaudio/PannerNode.h
+++ b/dom/media/webaudio/PannerNode.h
@@ -64,38 +64,28 @@ public:
   void SetDistanceModel(DistanceModelType aDistanceModel)
   {
     mDistanceModel = aDistanceModel;
     SendInt32ParameterToStream(DISTANCE_MODEL, int32_t(mDistanceModel));
   }
 
   void SetPosition(double aX, double aY, double aZ)
   {
-    if (WebAudioUtils::FuzzyEqual(mPosition.x, aX) &&
-        WebAudioUtils::FuzzyEqual(mPosition.y, aY) &&
-        WebAudioUtils::FuzzyEqual(mPosition.z, aZ)) {
-      return;
-    }
-    mPosition.x = aX;
-    mPosition.y = aY;
-    mPosition.z = aZ;
-    SendThreeDPointParameterToStream(POSITION, mPosition);
+    mPositionX->SetValue(aX);
+    mPositionY->SetValue(aY);
+    mPositionZ->SetValue(aZ);
+    SendThreeDPointParameterToStream(POSITION, ConvertAudioParamTo3DP(mPositionX, mPositionY, mPositionZ));
   }
 
   void SetOrientation(double aX, double aY, double aZ)
   {
-    ThreeDPoint orientation(aX, aY, aZ);
-    if (!orientation.IsZero()) {
-      orientation.Normalize();
-    }
-    if (mOrientation.FuzzyEqual(orientation)) {
-      return;
-    }
-    mOrientation = orientation;
-    SendThreeDPointParameterToStream(ORIENTATION, mOrientation);
+    mOrientationX->SetValue(aX);
+    mOrientationY->SetValue(aY);
+    mOrientationZ->SetValue(aZ);
+    SendThreeDPointParameterToStream(ORIENTATION, ConvertAudioParamTo3DP(mOrientationX, mOrientationY, mOrientationZ));
   }
 
   void SetVelocity(double aX, double aY, double aZ)
   {
     if (WebAudioUtils::FuzzyEqual(mVelocity.x, aX) &&
         WebAudioUtils::FuzzyEqual(mVelocity.y, aY) &&
         WebAudioUtils::FuzzyEqual(mVelocity.z, aZ)) {
       return;
@@ -240,38 +230,46 @@ private:
     LISTENER_FRONT_VECTOR, // unit length
     LISTENER_RIGHT_VECTOR, // unit length, orthogonal to LISTENER_FRONT_VECTOR
     LISTENER_VELOCITY,
     LISTENER_DOPPLER_FACTOR,
     LISTENER_SPEED_OF_SOUND,
     PANNING_MODEL,
     DISTANCE_MODEL,
     POSITION,
+    POSITIONX,
+    POSITIONY,
+    POSITIONZ,
     ORIENTATION, // unit length or zero
+    ORIENTATIONX,
+    ORIENTATIONY,
+    ORIENTATIONZ,
     VELOCITY,
     REF_DISTANCE,
     MAX_DISTANCE,
     ROLLOFF_FACTOR,
     CONE_INNER_ANGLE,
     CONE_OUTER_ANGLE,
     CONE_OUTER_GAIN
   };
 
-private:
+  ThreeDPoint ConvertAudioParamTo3DP(RefPtr <AudioParam> aX, RefPtr <AudioParam> aY, RefPtr <AudioParam> aZ)
+  {
+    return ThreeDPoint(aX->GetValue(), aY->GetValue(), aZ->GetValue());
+  }
+
   PanningModelType mPanningModel;
   DistanceModelType mDistanceModel;
-  ThreeDPoint mPosition;
-  ThreeDPoint mOrientation;
-  ThreeDPoint mVelocity;
   RefPtr<AudioParam> mPositionX;
   RefPtr<AudioParam> mPositionY;
   RefPtr<AudioParam> mPositionZ;
   RefPtr<AudioParam> mOrientationX;
   RefPtr<AudioParam> mOrientationY;
   RefPtr<AudioParam> mOrientationZ;
+  ThreeDPoint mVelocity;
 
   double mRefDistance;
   double mMaxDistance;
   double mRolloffFactor;
   double mConeInnerAngle;
   double mConeOuterAngle;
   double mConeOuterGain;