Bug 1208371 - Add PrincipalHandle to MediaChunks. r?mt,jesup draft
authorAndreas Pehrson <pehrsons@gmail.com>
Tue, 15 Mar 2016 14:27:41 +0100
changeset 347657 fc6d99a2b427527e90ee6116593faedb361675df
parent 342169 265d381baeca0228a72b2bb48871270d608fa352
child 347658 444b27339e853f2b5d64cc7971d23446c609327a
push id14642
push userpehrsons@gmail.com
push dateTue, 05 Apr 2016 16:45:34 +0000
reviewersmt, jesup
bugs1208371
milestone47.0a1
Bug 1208371 - Add PrincipalHandle to MediaChunks. r?mt,jesup PrincipalHandle is a thread safe pointer to a holder of (the main-thread-only nsIPrincipal) that can be passed around the MSG. A MediaStreamTrack whose source has just updated its principal, sets the new principal aside (as its "pending principal"), and combines the new principal into its current principal. Then the source starts passing the new principal to the MediaStreamGraph as a PrincipalHandle. Changes to a track's PrincipalHandle on the MSG will be surfaced through the MediaStreamTrackListener API. These changes are dispatched to main thread and compared to a MediaStreamTrack's pending principal. In case of a match the track knows the correct principal is flowing and can move the pending principal to be the current principal and update any main thread principal observers. MozReview-Commit-ID: D0JXGWhQFFU
dom/html/HTMLCanvasElement.cpp
dom/media/AudioSegment.h
dom/media/CanvasCaptureMediaStream.cpp
dom/media/CanvasCaptureMediaStream.h
dom/media/DOMMediaStream.cpp
dom/media/DOMMediaStream.h
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/MediaSegment.h
dom/media/MediaStreamTrack.h
dom/media/VideoSegment.cpp
dom/media/VideoSegment.h
dom/media/encoder/TrackEncoder.cpp
dom/media/gtest/TestVideoSegment.cpp
dom/media/gtest/TestVideoTrackEncoder.cpp
dom/media/gtest/TestVorbisTrackEncoder.cpp
dom/media/mediasink/DecodedStream.cpp
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineCameraVideoSource.cpp
dom/media/webrtc/MediaEngineCameraVideoSource.h
dom/media/webrtc/MediaEngineDefault.cpp
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEngineGonkVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineTabVideoSource.cpp
dom/media/webrtc/MediaEngineTabVideoSource.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webspeech/recognition/SpeechRecognition.cpp
dom/media/webspeech/synth/nsSpeechTask.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/test/FakeMediaStreamsImpl.h
--- a/dom/html/HTMLCanvasElement.cpp
+++ b/dom/html/HTMLCanvasElement.cpp
@@ -669,23 +669,24 @@ HTMLCanvasElement::CaptureStream(const O
   RefPtr<CanvasCaptureMediaStream> stream =
     CanvasCaptureMediaStream::CreateSourceStream(window, this);
   if (!stream) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   TrackID videoTrackId = 1;
-  nsresult rv = stream->Init(aFrameRate, videoTrackId);
+  nsCOMPtr<nsIPrincipal> principal = NodePrincipal();
+  nsresult rv =
+    stream->Init(aFrameRate, videoTrackId, principal);
   if (NS_FAILED(rv)) {
     aRv.Throw(rv);
     return nullptr;
   }
 
-  nsCOMPtr<nsIPrincipal> principal = NodePrincipal();
   stream->CreateDOMTrack(videoTrackId, MediaSegment::VIDEO, nsString(),
                          new BasicUnstoppableTrackSource(principal));
 
   rv = RegisterFrameCaptureListener(stream->FrameCaptureListener());
   if (NS_FAILED(rv)) {
     aRv.Throw(rv);
     return nullptr;
   }
--- a/dom/media/AudioSegment.h
+++ b/dom/media/AudioSegment.h
@@ -140,16 +140,18 @@ DownmixAndInterleave(const nsTArray<cons
  * of the buffer. An AudioChunk maintains its own duration and channel data
  * pointers so it can represent a subinterval of a buffer without copying.
  * An AudioChunk can store its individual channels anywhere; it maintains
  * separate pointers to each channel's buffer.
  */
 struct AudioChunk {
   typedef mozilla::AudioSampleFormat SampleFormat;
 
+  AudioChunk() : mPrincipalHandle(PRINCIPAL_HANDLE_NONE) {}
+
   // Generic methods
   void SliceTo(StreamTime aStart, StreamTime aEnd)
   {
     MOZ_ASSERT(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
                "Slice out of bounds");
     if (mBuffer) {
       MOZ_ASSERT(aStart < INT32_MAX, "Can't slice beyond 32-bit sample lengths");
       for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) {
@@ -185,16 +187,17 @@ struct AudioChunk {
   bool IsNull() const { return mBuffer == nullptr; }
   void SetNull(StreamTime aDuration)
   {
     mBuffer = nullptr;
     mChannelData.Clear();
     mDuration = aDuration;
     mVolume = 1.0f;
     mBufferFormat = AUDIO_FORMAT_SILENCE;
+    mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
   }
 
   size_t ChannelCount() const { return mChannelData.Length(); }
 
   bool IsMuted() const { return mVolume == 0.0f; }
 
   size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const
   {
@@ -219,24 +222,29 @@ struct AudioChunk {
 
   template<typename T>
   const nsTArray<const T*>& ChannelData()
   {
     MOZ_ASSERT(AudioSampleTypeToFormat<T>::Format == mBufferFormat);
     return *reinterpret_cast<nsTArray<const T*>*>(&mChannelData);
   }
 
+  PrincipalHandle GetPrincipalHandle() const { return mPrincipalHandle; }
+
   StreamTime mDuration; // in frames within the buffer
   RefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes
   nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null
   float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull)
   SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull)
 #ifdef MOZILLA_INTERNAL_API
   mozilla::TimeStamp mTimeStamp;           // time at which this has been fetched from the MediaEngine
 #endif
+  // principalHandle for the data in this chunk.
+  // This can be compared to an nsIPrincipal* when back on main thread.
+  PrincipalHandle mPrincipalHandle;
 };
 
 /**
  * A list of audio samples consisting of a sequence of slices of SharedBuffers.
  * The audio rate is determined by the track, not stored in this class.
  */
 class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
 public:
@@ -296,56 +304,59 @@ public:
   }
 
   void ResampleChunks(SpeexResamplerState* aResampler,
                       uint32_t aInRate,
                       uint32_t aOutRate);
 
   void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
                     const nsTArray<const float*>& aChannelData,
-                    int32_t aDuration)
+                    int32_t aDuration, const PrincipalHandle& aPrincipalHandle)
   {
     AudioChunk* chunk = AppendChunk(aDuration);
     chunk->mBuffer = aBuffer;
     for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
       chunk->mChannelData.AppendElement(aChannelData[channel]);
     }
     chunk->mVolume = 1.0f;
     chunk->mBufferFormat = AUDIO_FORMAT_FLOAT32;
 #ifdef MOZILLA_INTERNAL_API
     chunk->mTimeStamp = TimeStamp::Now();
 #endif
+    chunk->mPrincipalHandle = aPrincipalHandle;
   }
   void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
                     const nsTArray<const int16_t*>& aChannelData,
-                    int32_t aDuration)
+                    int32_t aDuration, const PrincipalHandle& aPrincipalHandle)
   {
     AudioChunk* chunk = AppendChunk(aDuration);
     chunk->mBuffer = aBuffer;
     for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
       chunk->mChannelData.AppendElement(aChannelData[channel]);
     }
     chunk->mVolume = 1.0f;
     chunk->mBufferFormat = AUDIO_FORMAT_S16;
 #ifdef MOZILLA_INTERNAL_API
     chunk->mTimeStamp = TimeStamp::Now();
 #endif
+    chunk->mPrincipalHandle = aPrincipalHandle;
   }
   // Consumes aChunk, and returns a pointer to the persistent copy of aChunk
   // in the segment.
   AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk)
   {
     AudioChunk* chunk = AppendChunk(aChunk->mDuration);
     chunk->mBuffer = aChunk->mBuffer.forget();
     chunk->mChannelData.SwapElements(aChunk->mChannelData);
     chunk->mVolume = aChunk->mVolume;
     chunk->mBufferFormat = aChunk->mBufferFormat;
 #ifdef MOZILLA_INTERNAL_API
     chunk->mTimeStamp = TimeStamp::Now();
 #endif
+    chunk->mPrincipalHandle = aChunk->mPrincipalHandle;
     return chunk;
   }
   void ApplyVolume(float aVolume);
   // Mix the segment into a mixer, interleaved. This is useful to output a
   // segment to a system audio callback. It up or down mixes to aChannelCount
   // channels.
   void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount,
                uint32_t aSampleRate);
--- a/dom/media/CanvasCaptureMediaStream.cpp
+++ b/dom/media/CanvasCaptureMediaStream.cpp
@@ -19,20 +19,22 @@ using namespace mozilla::gfx;
 namespace mozilla {
 namespace dom {
 
 class OutputStreamDriver::StreamListener : public MediaStreamListener
 {
 public:
   explicit StreamListener(OutputStreamDriver* aDriver,
                           TrackID aTrackId,
+                          PrincipalHandle aPrincipalHandle,
                           SourceMediaStream* aSourceStream)
     : mEnded(false)
     , mSourceStream(aSourceStream)
     , mTrackId(aTrackId)
+    , mPrincipalHandle(aPrincipalHandle)
     , mMutex("CanvasCaptureMediaStream OutputStreamDriver::StreamListener")
     , mImage(nullptr)
   {
     MOZ_ASSERT(mSourceStream);
   }
 
   void EndStream() {
     mEnded = true;
@@ -50,44 +52,47 @@ public:
     StreamTime delta = aDesiredTime - mSourceStream->GetEndOfAppendedData(mTrackId);
     if (delta > 0) {
       MutexAutoLock lock(mMutex);
       MOZ_ASSERT(mSourceStream);
 
       RefPtr<Image> image = mImage;
       IntSize size = image ? image->GetSize() : IntSize(0, 0);
       VideoSegment segment;
-      segment.AppendFrame(image.forget(), delta, size);
+      segment.AppendFrame(image.forget(), delta, size, mPrincipalHandle);
 
       mSourceStream->AppendToTrack(mTrackId, &segment);
     }
 
     if (mEnded) {
       mSourceStream->EndAllTrackAndFinish();
     }
   }
 
 protected:
   ~StreamListener() { }
 
 private:
   Atomic<bool> mEnded;
   const RefPtr<SourceMediaStream> mSourceStream;
   const TrackID mTrackId;
+  const PrincipalHandle mPrincipalHandle;
 
   Mutex mMutex;
   // The below members are protected by mMutex.
   RefPtr<layers::Image> mImage;
 };
 
 OutputStreamDriver::OutputStreamDriver(SourceMediaStream* aSourceStream,
-                                       const TrackID& aTrackId)
+                                       const TrackID& aTrackId,
+                                       const PrincipalHandle& aPrincipalHandle)
   : FrameCaptureListener()
   , mSourceStream(aSourceStream)
-  , mStreamListener(new StreamListener(this, aTrackId, aSourceStream))
+  , mStreamListener(new StreamListener(this, aTrackId, aPrincipalHandle,
+                                       aSourceStream))
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mSourceStream);
   mSourceStream->AddListener(mStreamListener);
   mSourceStream->AddTrack(aTrackId, 0, new VideoSegment());
   mSourceStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   mSourceStream->SetPullEnabled(true);
 
@@ -115,18 +120,19 @@ OutputStreamDriver::SetImage(const RefPt
 
 // ----------------------------------------------------------------------
 
 class TimerDriver : public OutputStreamDriver
 {
 public:
   explicit TimerDriver(SourceMediaStream* aSourceStream,
                        const double& aFPS,
-                       const TrackID& aTrackId)
-    : OutputStreamDriver(aSourceStream, aTrackId)
+                       const TrackID& aTrackId,
+                       const PrincipalHandle& aPrincipalHandle)
+    : OutputStreamDriver(aSourceStream, aTrackId, aPrincipalHandle)
     , mFPS(aFPS)
     , mTimer(nullptr)
   {
     if (mFPS == 0.0) {
       return;
     }
 
     mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
@@ -173,18 +179,19 @@ private:
 };
 
 // ----------------------------------------------------------------------
 
 class AutoDriver : public OutputStreamDriver
 {
 public:
   explicit AutoDriver(SourceMediaStream* aSourceStream,
-                      const TrackID& aTrackId)
-    : OutputStreamDriver(aSourceStream, aTrackId) {}
+                      const TrackID& aTrackId,
+                      const PrincipalHandle& aPrincipalHandle)
+    : OutputStreamDriver(aSourceStream, aTrackId, aPrincipalHandle) {}
 
   void NewFrame(already_AddRefed<Image> aImage) override
   {
     // Don't reset `mFrameCaptureRequested` since AutoDriver shall always have
     // `mFrameCaptureRequested` set to true.
     // This also means we should accept every frame as NewFrame is called only
     // after something changed.
 
@@ -234,28 +241,31 @@ CanvasCaptureMediaStream::RequestFrame()
   MOZ_ASSERT(mOutputStreamDriver);
   if (mOutputStreamDriver) {
     mOutputStreamDriver->RequestFrameCapture();
   }
 }
 
 nsresult
 CanvasCaptureMediaStream::Init(const dom::Optional<double>& aFPS,
-                               const TrackID& aTrackId)
+                               const TrackID& aTrackId,
+                               nsIPrincipal* aPrincipal)
 {
+  PrincipalHandle principalHandle = MakePrincipalHandle(aPrincipal);
+
   if (!aFPS.WasPassed()) {
     mOutputStreamDriver =
-      new AutoDriver(GetInputStream()->AsSourceStream(), aTrackId);
+      new AutoDriver(GetInputStream()->AsSourceStream(), aTrackId, principalHandle);
   } else if (aFPS.Value() < 0) {
     return NS_ERROR_ILLEGAL_VALUE;
   } else {
     // Cap frame rate to 60 FPS for sanity
     double fps = std::min(60.0, aFPS.Value());
     mOutputStreamDriver =
-      new TimerDriver(GetInputStream()->AsSourceStream(), fps, aTrackId);
+      new TimerDriver(GetInputStream()->AsSourceStream(), fps, aTrackId, principalHandle);
   }
   return NS_OK;
 }
 
 already_AddRefed<CanvasCaptureMediaStream>
 CanvasCaptureMediaStream::CreateSourceStream(nsPIDOMWindowInner* aWindow,
                                              HTMLCanvasElement* aCanvas)
 {
--- a/dom/media/CanvasCaptureMediaStream.h
+++ b/dom/media/CanvasCaptureMediaStream.h
@@ -5,16 +5,18 @@
 
 #ifndef mozilla_dom_CanvasCaptureMediaStream_h_
 #define mozilla_dom_CanvasCaptureMediaStream_h_
 
 #include "DOMMediaStream.h"
 #include "mozilla/dom/HTMLCanvasElement.h"
 #include "StreamBuffer.h"
 
+class nsIPrincipal;
+
 namespace mozilla {
 class DOMMediaStream;
 class MediaStreamListener;
 class SourceMediaStream;
 
 namespace layers {
 class Image;
 } // namespace layers
@@ -63,17 +65,18 @@ class OutputStreamFrameListener;
  * Base class for drivers of the output stream.
  * It is up to each sub class to implement the NewFrame() callback of
  * FrameCaptureListener.
  */
 class OutputStreamDriver : public FrameCaptureListener
 {
 public:
   OutputStreamDriver(SourceMediaStream* aSourceStream,
-                     const TrackID& aTrackId);
+                     const TrackID& aTrackId,
+                     const PrincipalHandle& aPrincipalHandle);
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamDriver);
 
   /*
    * Sub classes can SetImage() to update the image being appended to the
    * output stream. It will be appended on the next NotifyPull from MSG.
    */
   void SetImage(const RefPtr<layers::Image>& aImage);
@@ -96,17 +99,18 @@ private:
 class CanvasCaptureMediaStream : public DOMMediaStream
 {
 public:
   CanvasCaptureMediaStream(nsPIDOMWindowInner* aWindow, HTMLCanvasElement* aCanvas);
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(CanvasCaptureMediaStream, DOMMediaStream)
 
-  nsresult Init(const dom::Optional<double>& aFPS, const TrackID& aTrackId);
+  nsresult Init(const dom::Optional<double>& aFPS, const TrackID& aTrackId,
+                nsIPrincipal* aPrincipal);
 
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   // WebIDL
   HTMLCanvasElement* Canvas() const { return mCanvas; }
   void RequestFrame();
   dom::FrameCaptureListener* FrameCaptureListener();
 
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -1251,16 +1251,30 @@ DOMAudioNodeMediaStream::CreateTrackUnio
   RefPtr<DOMAudioNodeMediaStream> stream = new DOMAudioNodeMediaStream(aWindow, aNode);
   stream->InitTrackUnionStream(aGraph);
   return stream.forget();
 }
 
 DOMHwMediaStream::DOMHwMediaStream(nsPIDOMWindowInner* aWindow)
   : DOMLocalMediaStream(aWindow, nullptr)
 {
+#ifdef MOZ_WIDGET_GONK
+  if (!mWindow) {
+    NS_ERROR("Expected window here.");
+    mPrincipalHandle = PRINCIPAL_ID_NONE;
+    return;
+  }
+  nsIDocument* doc = mWindow->GetDoc();
+  if (!doc) {
+    NS_ERROR("Expected document here.");
+    mPrincipalHandle = PRINCIPAL_ID_NONE;
+    return;
+  }
+  mPrincipalHandle = ConvertPrincipalToID(doc->GetPrincipal());
+#endif
 }
 
 DOMHwMediaStream::~DOMHwMediaStream()
 {
 }
 
 already_AddRefed<DOMHwMediaStream>
 DOMHwMediaStream::CreateHwStream(nsPIDOMWindowInner* aWindow,
@@ -1300,17 +1314,17 @@ DOMHwMediaStream::Init(MediaStream* stre
     VideoSegment segment;
 #ifdef MOZ_WIDGET_GONK
     const StreamTime delta = STREAM_TIME_MAX; // Because MediaStreamGraph will run out frames in non-autoplay mode,
                                               // we must give it bigger frame length to cover this situation.
 
     RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
     mozilla::gfx::IntSize size = image->GetSize();
 
-    segment.AppendFrame(image.forget(), delta, size);
+    segment.AppendFrame(image.forget(), delta, size, mPrincipalHandle);
 #endif
     srcStream->AddTrack(TRACK_VIDEO_PRIMARY, 0, new VideoSegment());
     srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
     srcStream->FinishAddTracks();
     srcStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   }
 }
 
@@ -1357,17 +1371,17 @@ DOMHwMediaStream::SetImageSize(uint32_t 
   track->GetSegment()->Clear();
 
   // Change the image size.
   const StreamTime delta = STREAM_TIME_MAX;
   RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
   mozilla::gfx::IntSize size = image->GetSize();
   VideoSegment segment;
 
-  segment.AppendFrame(image.forget(), delta, size);
+  segment.AppendFrame(image.forget(), delta, size, PRINCIPAL_ID_NONE);
   srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
 #endif
 }
 
 void
 DOMHwMediaStream::SetOverlayImage(OverlayImage* aImage)
 {
   if (!aImage) {
@@ -1393,17 +1407,17 @@ DOMHwMediaStream::SetOverlayImage(Overla
   track->GetSegment()->Clear();
 
   // Change the image size.
   const StreamTime delta = STREAM_TIME_MAX;
   RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
   mozilla::gfx::IntSize size = image->GetSize();
   VideoSegment segment;
 
-  segment.AppendFrame(image.forget(), delta, size);
+  segment.AppendFrame(image.forget(), delta, size, PRINCIPAL_ID_NONE);
   srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
 #endif
 }
 
 void
 DOMHwMediaStream::SetOverlayId(int32_t aOverlayId)
 {
 #ifdef MOZ_WIDGET_GONK
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -776,14 +776,15 @@ protected:
 private:
   void Init(MediaStream* aStream, OverlayImage* aImage);
 
 #ifdef MOZ_WIDGET_GONK
   const int DEFAULT_IMAGE_ID = 0x01;
   const int DEFAULT_IMAGE_WIDTH = 400;
   const int DEFAULT_IMAGE_HEIGHT = 300;
   RefPtr<OverlayImage> mOverlayImage;
+  PrincipalID mPrincipalHandle;
 #endif
 };
 
 } // namespace mozilla
 
 #endif /* NSDOMMEDIASTREAM_H_ */
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -289,24 +289,26 @@ public:
 
     switch (mType) {
       case MEDIA_START:
         {
           NS_ASSERTION(!NS_IsMainThread(), "Never call on main thread");
           nsresult rv;
 
           if (mAudioDevice) {
-            rv = mAudioDevice->GetSource()->Start(source, kAudioTrack);
+            rv = mAudioDevice->GetSource()->Start(source, kAudioTrack,
+                                                  mListener->GetPrincipalHandle());
             if (NS_FAILED(rv)) {
               ReturnCallbackError(rv, "Starting audio failed");
               return;
             }
           }
           if (mVideoDevice) {
-            rv = mVideoDevice->GetSource()->Start(source, kVideoTrack);
+            rv = mVideoDevice->GetSource()->Start(source, kVideoTrack,
+                                                  mListener->GetPrincipalHandle());
             if (NS_FAILED(rv)) {
               ReturnCallbackError(rv, "Starting video failed");
               return;
             }
           }
           // Start() queued the tracks to be added synchronously to avoid races
           source->FinishAddTracks();
 
@@ -1988,39 +1990,41 @@ MediaManager::GetUserMedia(nsPIDOMWindow
     }
   } else if (IsOn(c.mAudio)) {
    audioType = MediaSourceEnum::Microphone;
   }
 
   StreamListeners* listeners = AddWindowID(windowID);
 
   // Create a disabled listener to act as a placeholder
+  nsIPrincipal* principal = aWindow->GetExtantDoc()->NodePrincipal();
   RefPtr<GetUserMediaCallbackMediaStreamListener> listener =
-    new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowID);
+    new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowID,
+                                                MakePrincipalHandle(principal));
 
   // No need for locking because we always do this in the main thread.
   listeners->AppendElement(listener);
 
   if (!privileged) {
     // Check if this site has had persistent permissions denied.
     nsCOMPtr<nsIPermissionManager> permManager =
       do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
     NS_ENSURE_SUCCESS(rv, rv);
 
     uint32_t audioPerm = nsIPermissionManager::UNKNOWN_ACTION;
     if (IsOn(c.mAudio)) {
       rv = permManager->TestExactPermissionFromPrincipal(
-        aWindow->GetExtantDoc()->NodePrincipal(), "microphone", &audioPerm);
+        principal, "microphone", &audioPerm);
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
     uint32_t videoPerm = nsIPermissionManager::UNKNOWN_ACTION;
     if (IsOn(c.mVideo)) {
       rv = permManager->TestExactPermissionFromPrincipal(
-        aWindow->GetExtantDoc()->NodePrincipal(), "camera", &videoPerm);
+        principal, "camera", &videoPerm);
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
     if ((!IsOn(c.mAudio) || audioPerm == nsIPermissionManager::DENY_ACTION) &&
         (!IsOn(c.mVideo) || videoPerm == nsIPermissionManager::DENY_ACTION)) {
       RefPtr<MediaStreamError> error =
           new MediaStreamError(aWindow, NS_LITERAL_STRING("SecurityError"));
       onFailure->OnError(error);
@@ -2315,19 +2319,22 @@ MediaManager::EnumerateDevices(nsPIDOMWi
   MOZ_ASSERT(NS_IsMainThread());
   NS_ENSURE_TRUE(!sInShutdown, NS_ERROR_FAILURE);
   nsCOMPtr<nsIGetUserMediaDevicesSuccessCallback> onSuccess(aOnSuccess);
   nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onFailure(aOnFailure);
   uint64_t windowId = aWindow->WindowID();
 
   StreamListeners* listeners = AddWindowID(windowId);
 
+  nsIPrincipal* principal = aWindow->GetExtantDoc()->NodePrincipal();
+
   // Create a disabled listener to act as a placeholder
   RefPtr<GetUserMediaCallbackMediaStreamListener> listener =
-    new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowId);
+    new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowId,
+                                                MakePrincipalHandle(principal));
 
   // No need for locking because we always do this in the main thread.
   listeners->AppendElement(listener);
 
   bool fake = Preferences::GetBool("media.navigator.streams.fake");
 
   RefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowId,
                                                      MediaSourceEnum::Camera,
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -121,19 +121,21 @@ public:
  * to Start() and Stop() the underlying MediaEngineSource when MediaStreams
  * are assigned and deassigned in content.
  */
 class GetUserMediaCallbackMediaStreamListener : public MediaStreamListener
 {
 public:
   // Create in an inactive state
   GetUserMediaCallbackMediaStreamListener(base::Thread *aThread,
-    uint64_t aWindowID)
+    uint64_t aWindowID,
+    const PrincipalHandle& aPrincipalHandle)
     : mMediaThread(aThread)
     , mWindowID(aWindowID)
+    , mPrincipalHandle(aPrincipalHandle)
     , mStopped(false)
     , mFinished(false)
     , mRemoved(false)
     , mAudioStopped(false)
     , mVideoStopped(false) {}
 
   ~GetUserMediaCallbackMediaStreamListener()
   {
@@ -255,21 +257,21 @@ public:
   // Proxy NotifyPull() to sources
   void
   NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) override
   {
     // Currently audio sources ignore NotifyPull, but they could
     // watch it especially for fake audio.
     if (mAudioDevice) {
       mAudioDevice->GetSource()->NotifyPull(aGraph, mStream, kAudioTrack,
-                                            aDesiredTime);
+                                            aDesiredTime, mPrincipalHandle);
     }
     if (mVideoDevice) {
       mVideoDevice->GetSource()->NotifyPull(aGraph, mStream, kVideoTrack,
-                                            aDesiredTime);
+                                            aDesiredTime, mPrincipalHandle);
     }
   }
 
   void
   NotifyEvent(MediaStreamGraph* aGraph,
               MediaStreamListener::MediaStreamGraphEvent aEvent) override
   {
     switch (aEvent) {
@@ -296,20 +298,23 @@ public:
   NotifyFinished();
 
   void
   NotifyRemoved();
 
   void
   NotifyDirectListeners(MediaStreamGraph* aGraph, bool aHasListeners);
 
+  PrincipalHandle GetPrincipalHandle() const { return mPrincipalHandle; }
+
 private:
   // Set at construction
   base::Thread* mMediaThread;
   uint64_t mWindowID;
+  const PrincipalHandle mPrincipalHandle;
 
   // true after this listener has sent MEDIA_STOP. MainThread only.
   bool mStopped;
 
   // true after the stream this listener is listening to has finished in the
   // MediaStreamGraph. MainThread only.
   bool mFinished;
 
--- a/dom/media/MediaSegment.h
+++ b/dom/media/MediaSegment.h
@@ -2,16 +2,18 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_MEDIASEGMENT_H_
 #define MOZILLA_MEDIASEGMENT_H_
 
 #include "nsTArray.h"
+#include "nsIPrincipal.h"
+#include "nsProxyRelease.h"
 #ifdef MOZILLA_INTERNAL_API
 #include "mozilla/TimeStamp.h"
 #endif
 #include <algorithm>
 #include "Latency.h"
 
 namespace mozilla {
 
@@ -50,16 +52,60 @@ const StreamTime STREAM_TIME_MAX = MEDIA
 
 /**
  * Media time relative to the start of the graph timeline.
  */
 typedef MediaTime GraphTime;
 const GraphTime GRAPH_TIME_MAX = MEDIA_TIME_MAX;
 
 /**
+ * We pass the principal through the MediaStreamGraph by wrapping it in a thread
+ * safe nsMainThreadPtrHandle, since it cannot be used directly off the main
+ * thread. We can compare two PrincipalHandles to each other on any thread, but
+ * they can only be created and converted back to nsIPrincipal* on main thread.
+ */
+typedef nsMainThreadPtrHandle<nsIPrincipal> PrincipalHandle;
+
+inline PrincipalHandle MakePrincipalHandle(nsIPrincipal* aPrincipal)
+{
+  RefPtr<nsMainThreadPtrHolder<nsIPrincipal>> holder =
+    new nsMainThreadPtrHolder<nsIPrincipal>(aPrincipal);
+  return PrincipalHandle(holder);
+}
+
+const PrincipalHandle PRINCIPAL_HANDLE_NONE(nullptr);
+
+inline nsIPrincipal* GetPrincipalFromHandle(PrincipalHandle& aPrincipalHandle)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+  return aPrincipalHandle.get();
+}
+
+inline bool PrincipalHandleMatches(PrincipalHandle& aPrincipalHandle,
+                                   nsIPrincipal* aOther)
+{
+  if (!aOther) {
+    return false;
+  }
+
+  nsIPrincipal* principal = GetPrincipalFromHandle(aPrincipalHandle);
+  if (!principal) {
+    return false;
+  }
+
+  bool result;
+  if (NS_FAILED(principal->Equals(aOther, &result))) {
+    NS_ERROR("Principal check failed");
+    return false;
+  }
+
+  return result;
+}
+
+/**
  * A MediaSegment is a chunk of media data sequential in time. Different
  * types of data have different subclasses of MediaSegment, all inheriting
  * from MediaSegmentBase.
  * All MediaSegment data is timed using StreamTime. The actual tick rate
  * is defined on a per-track basis. For some track types, this can be
  * a fixed constant for all tracks of that type (e.g. 1MHz for video).
  *
  * Each media segment defines a concept of "null media data" (e.g. silence
@@ -81,16 +127,29 @@ public:
 
   /**
    * Gets the total duration of the segment.
    */
   StreamTime GetDuration() const { return mDuration; }
   Type GetType() const { return mType; }
 
   /**
+   * Gets the last principal id that was appended to this segment.
+   */
+  PrincipalHandle GetLastPrincipalHandle() const { return mLastPrincipalHandle; }
+  /**
+   * Called by the MediaStreamGraph as it appends a chunk with a different
+   * principal id than the current one.
+   */
+  void SetLastPrincipalHandle(PrincipalHandle aLastPrincipalHandle)
+  {
+    mLastPrincipalHandle = aLastPrincipalHandle;
+  }
+
+  /**
    * Create a MediaSegment of the same type.
    */
   virtual MediaSegment* CreateEmptyClone() const = 0;
   /**
    * Moves contents of aSource to the end of this segment.
    */
   virtual void AppendFrom(MediaSegment* aSource) = 0;
   /**
@@ -129,23 +188,28 @@ public:
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 protected:
-  explicit MediaSegment(Type aType) : mDuration(0), mType(aType)
+  explicit MediaSegment(Type aType)
+    : mDuration(0), mType(aType), mLastPrincipalHandle(PRINCIPAL_HANDLE_NONE)
   {
     MOZ_COUNT_CTOR(MediaSegment);
   }
 
   StreamTime mDuration; // total of mDurations of all chunks
   Type mType;
+
+  // The latest principal handle that the MediaStreamGraph has processed for
+  // this segment.
+  PrincipalHandle mLastPrincipalHandle;
 };
 
 /**
  * C is the implementation class subclassed from MediaSegmentBase.
  * C must contain a Chunk class.
  */
 template <class C, class Chunk> class MediaSegmentBase : public MediaSegment {
 public:
--- a/dom/media/MediaStreamTrack.h
+++ b/dom/media/MediaStreamTrack.h
@@ -4,16 +4,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MEDIASTREAMTRACK_H_
 #define MEDIASTREAMTRACK_H_
 
 #include "mozilla/DOMEventTargetHelper.h"
 #include "nsError.h"
 #include "nsID.h"
+#include "nsIPrincipal.h"
 #include "StreamBuffer.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/CORSMode.h"
 #include "PrincipalChangeObserver.h"
 
 namespace mozilla {
 
 class DOMMediaStream;
--- a/dom/media/VideoSegment.cpp
+++ b/dom/media/VideoSegment.cpp
@@ -11,38 +11,41 @@
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 
 using namespace layers;
 
 VideoFrame::VideoFrame(already_AddRefed<Image>& aImage,
                        const gfx::IntSize& aIntrinsicSize)
-  : mImage(aImage), mIntrinsicSize(aIntrinsicSize), mForceBlack(false)
+  : mImage(aImage), mIntrinsicSize(aIntrinsicSize), mForceBlack(false),
+    mPrincipalHandle(PRINCIPAL_HANDLE_NONE)
 {}
 
 VideoFrame::VideoFrame()
-  : mIntrinsicSize(0, 0), mForceBlack(false)
+  : mIntrinsicSize(0, 0), mForceBlack(false), mPrincipalHandle(PRINCIPAL_HANDLE_NONE)
 {}
 
 VideoFrame::~VideoFrame()
 {}
 
 void
 VideoFrame::SetNull() {
   mImage = nullptr;
   mIntrinsicSize = gfx::IntSize(0, 0);
+  mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
 }
 
 void
 VideoFrame::TakeFrom(VideoFrame* aFrame)
 {
   mImage = aFrame->mImage.forget();
   mIntrinsicSize = aFrame->mIntrinsicSize;
   mForceBlack = aFrame->GetForceBlack();
+  mPrincipalHandle = aFrame->mPrincipalHandle;
 }
 
 /* static */ already_AddRefed<Image>
 VideoFrame::CreateBlackImage(const gfx::IntSize& aSize)
 {
   RefPtr<ImageContainer> container = LayerManager::CreateImageContainer();
   RefPtr<PlanarYCbCrImage> image = container->CreatePlanarYCbCrImage();
   if (!image) {
@@ -90,21 +93,23 @@ VideoChunk::VideoChunk()
 
 VideoChunk::~VideoChunk()
 {}
 
 void
 VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage,
                           StreamTime aDuration,
                           const IntSize& aIntrinsicSize,
+                          const PrincipalHandle& aPrincipalHandle,
                           bool aForceBlack)
 {
   VideoChunk* chunk = AppendChunk(aDuration);
   VideoFrame frame(aImage, aIntrinsicSize);
   frame.SetForceBlack(aForceBlack);
+  frame.SetPrincipalHandle(aPrincipalHandle);
   chunk->mFrame.TakeFrom(&frame);
 }
 
 VideoSegment::VideoSegment()
   : MediaSegmentBase<VideoSegment, VideoChunk>(VIDEO)
 {}
 
 VideoSegment::~VideoSegment()
--- a/dom/media/VideoSegment.h
+++ b/dom/media/VideoSegment.h
@@ -35,30 +35,35 @@ public:
   bool operator!=(const VideoFrame& aFrame) const
   {
     return !operator==(aFrame);
   }
 
   Image* GetImage() const { return mImage; }
   void SetForceBlack(bool aForceBlack) { mForceBlack = aForceBlack; }
   bool GetForceBlack() const { return mForceBlack; }
+  void SetPrincipalHandle(const PrincipalHandle& aPrincipalHandle) { mPrincipalHandle = aPrincipalHandle; }
+  PrincipalHandle GetPrincipalHandle() const { return mPrincipalHandle; }
   const gfx::IntSize& GetIntrinsicSize() const { return mIntrinsicSize; }
   void SetNull();
   void TakeFrom(VideoFrame* aFrame);
 
   // Create a planar YCbCr black image.
   static already_AddRefed<Image> CreateBlackImage(const gfx::IntSize& aSize);
 
 protected:
   // mImage can be null to indicate "no video" (aka "empty frame"). It can
   // still have an intrinsic size in this case.
   RefPtr<Image> mImage;
   // The desired size to render the video frame at.
   gfx::IntSize mIntrinsicSize;
   bool mForceBlack;
+  // principalHandle for the image in this frame.
+  // This can be compared to an nsIPrincipal when back on main thread.
+  PrincipalHandle mPrincipalHandle;
 };
 
 struct VideoChunk {
   VideoChunk();
   ~VideoChunk();
   void SliceTo(StreamTime aStart, StreamTime aEnd)
   {
     NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
@@ -81,32 +86,35 @@ struct VideoChunk {
 
   size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const
   {
     // Future:
     // - mFrame
     return 0;
   }
 
+  PrincipalHandle GetPrincipalHandle() const { return mFrame.GetPrincipalHandle(); }
+
   StreamTime mDuration;
   VideoFrame mFrame;
   mozilla::TimeStamp mTimeStamp;
 };
 
 class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
 public:
   typedef mozilla::layers::Image Image;
   typedef mozilla::gfx::IntSize IntSize;
 
   VideoSegment();
   ~VideoSegment();
 
   void AppendFrame(already_AddRefed<Image>&& aImage,
                    StreamTime aDuration,
                    const IntSize& aIntrinsicSize,
+                   const PrincipalHandle& aPrincipalHandle,
                    bool aForceBlack = false);
   const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr)
   {
     VideoChunk* c = GetLastChunk();
     if (!c) {
       return nullptr;
     }
     if (aStart) {
--- a/dom/media/encoder/TrackEncoder.cpp
+++ b/dom/media/encoder/TrackEncoder.cpp
@@ -258,16 +258,17 @@ VideoTrackEncoder::AppendVideoSegment(co
       // Canonically incorrect - the duration should go to the previous frame
       // - but that would require delaying until the next frame arrives.
       // Best would be to do like OMXEncoder and pass an effective timestamp
       // in with each frame (don't zero mTotalFrameDuration)
       if (image) {
         mRawSegment.AppendFrame(image.forget(),
                                 mTotalFrameDuration,
                                 chunk.mFrame.GetIntrinsicSize(),
+                                PRINCIPAL_HANDLE_NONE,
                                 chunk.mFrame.GetForceBlack());
         mTotalFrameDuration = 0;
       }
     }
     mLastFrame.TakeFrom(&chunk.mFrame);
     iter.Next();
   }
 
--- a/dom/media/gtest/TestVideoSegment.cpp
+++ b/dom/media/gtest/TestVideoSegment.cpp
@@ -16,16 +16,17 @@ namespace mozilla {
 TEST(VideoSegment, TestAppendFrameForceBlack)
 {
   RefPtr<layers::Image> testImage = nullptr;
 
   VideoSegment segment;
   segment.AppendFrame(testImage.forget(),
                       mozilla::StreamTime(90000),
                       mozilla::gfx::IntSize(640, 480),
+                      PRINCIPAL_HANDLE_NONE,
                       true);
 
   VideoSegment::ChunkIterator iter(segment);
   while (!iter.IsEnded()) {
     VideoChunk chunk = *iter;
     EXPECT_TRUE(chunk.mFrame.GetForceBlack());
     iter.Next();
   }
@@ -33,17 +34,18 @@ TEST(VideoSegment, TestAppendFrameForceB
 
 TEST(VideoSegment, TestAppendFrameNotForceBlack)
 {
   RefPtr<layers::Image> testImage = nullptr;
 
   VideoSegment segment;
   segment.AppendFrame(testImage.forget(),
                       mozilla::StreamTime(90000),
-                      mozilla::gfx::IntSize(640, 480));
+                      mozilla::gfx::IntSize(640, 480),
+                      PRINCIPAL_HANDLE_NONE);
 
   VideoSegment::ChunkIterator iter(segment);
   while (!iter.IsEnded()) {
     VideoChunk chunk = *iter;
     EXPECT_FALSE(chunk.mFrame.GetForceBlack());
     iter.Next();
   }
 }
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -266,17 +266,20 @@ TEST(VP8VideoTrackEncoder, FrameEncode)
   generator.Generate(images);
 
   // Put generated YUV frame into video segment.
   // Duration of each frame is 1 second.
   VideoSegment segment;
   for (nsTArray<RefPtr<Image>>::size_type i = 0; i < images.Length(); i++)
   {
     RefPtr<Image> image = images[i];
-    segment.AppendFrame(image.forget(), mozilla::StreamTime(90000), generator.GetSize());
+    segment.AppendFrame(image.forget(),
+                        mozilla::StreamTime(90000),
+                        generator.GetSize(),
+                        PRINCIPAL_HANDLE_NONE);
   }
 
   // track change notification.
   encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
 
   // Pull Encoded Data back from encoder.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
--- a/dom/media/gtest/TestVorbisTrackEncoder.cpp
+++ b/dom/media/gtest/TestVorbisTrackEncoder.cpp
@@ -181,17 +181,17 @@ TEST(VorbisTrackEncoder, EncodedFrame)
     mozilla::SharedBuffer::Create(rate * sizeof(AudioDataValue));
   AudioDataValue* data = static_cast<AudioDataValue*>(samples->Data());
   for (int i = 0; i < rate; i++) {
     data[i] = ((i%8)*4000) - (7*4000)/2;
   }
   AutoTArray<const AudioDataValue*,1> channelData;
   channelData.AppendElement(data);
   AudioSegment segment;
-  segment.AppendFrames(samples.forget(), channelData, 44100);
+  segment.AppendFrames(samples.forget(), channelData, 44100, PRINCIPAL_HANDLE_NONE);
 
   // Track change notification.
   encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
 
   // Pull Encoded data back from encoder and verify encoded samples.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
   // Should have some encoded data.
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -487,17 +487,17 @@ SendStreamAudio(DecodedStreamData* aStre
   // DecodedAudioDataSink::PlayFromAudioQueue()
   audio->EnsureAudioBuffer();
   RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
   AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
   AutoTArray<const AudioDataValue*, 2> channels;
   for (uint32_t i = 0; i < audio->mChannels; ++i) {
     channels.AppendElement(bufferData + i * audio->mFrames);
   }
-  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames);
+  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, PRINCIPAL_HANDLE_NONE /* Fixed in later patch */);
   aStream->mAudioFramesWritten += audio->mFrames;
   aOutput->ApplyVolume(aVolume);
 
   aStream->mNextAudioTime = audio->GetEndTime();
 }
 
 void
 DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin)
@@ -545,17 +545,17 @@ WriteVideoToMediaStream(MediaStream* aSt
                         int64_t aStartMicroseconds,
                         const mozilla::gfx::IntSize& aIntrinsicSize,
                         VideoSegment* aOutput)
 {
   RefPtr<layers::Image> image = aImage;
   StreamTime duration =
       aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
       aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
-  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize);
+  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize, PRINCIPAL_HANDLE_NONE /* Fixed in later patch */);
 }
 
 static bool
 ZeroDurationAtLastChunk(VideoSegment& aInput)
 {
   // Get the last video frame's start time in VideoSegment aInput.
   // If the start time is equal to the duration of aInput, means the last video
   // frame's duration is zero.
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -116,26 +116,27 @@ public:
   virtual void GetUUID(nsACString&) = 0;
 
   /* Release the device back to the system. */
   virtual nsresult Deallocate() = 0;
 
   /* Start the device and add the track to the provided SourceMediaStream, with
    * the provided TrackID. You may start appending data to the track
    * immediately after. */
-  virtual nsresult Start(SourceMediaStream*, TrackID) = 0;
+  virtual nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) = 0;
 
   /* tell the source if there are any direct listeners attached */
   virtual void SetDirectListeners(bool) = 0;
 
   /* Called when the stream wants more data */
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
-                          StreamTime aDesiredTime) = 0;
+                          StreamTime aDesiredTime,
+                          const PrincipalHandle& aPrincipalHandle) = 0;
 
   /* Stop the device and release the corresponding MediaStream */
   virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0;
 
   /* Restart with new capability */
   virtual nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                            const MediaEnginePrefs &aPrefs,
                            const nsString& aDeviceId) = 0;
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -14,24 +14,25 @@ using namespace mozilla::dom;
 extern LogModule* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOGFRAME(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 // guts for appending data to the MSG track
 bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
                                                  layers::Image* aImage,
                                                  TrackID aID,
-                                                 StreamTime delta)
+                                                 StreamTime delta,
+                                                 const PrincipalHandle& aPrincipalHandle)
 {
   MOZ_ASSERT(aSource);
 
   VideoSegment segment;
   RefPtr<layers::Image> image = aImage;
   IntSize size(image ? mWidth : 0, image ? mHeight : 0);
-  segment.AppendFrame(image.forget(), delta, size);
+  segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
 
   // This is safe from any thread, and is safe if the track is Finished
   // or Destroyed.
   // This can fail if either a) we haven't added the track yet, or b)
   // we've removed or finished the track.
   return aSource->AppendToTrack(aID, &(segment));
 }
 
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.h
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.h
@@ -65,17 +65,18 @@ protected:
   typedef nsTArray<CapabilityCandidate> CapabilitySet;
 
   ~MediaEngineCameraVideoSource() {}
 
   // guts for appending data to the MSG track
   virtual bool AppendToTrack(SourceMediaStream* aSource,
                              layers::Image* aImage,
                              TrackID aID,
-                             StreamTime delta);
+                             StreamTime delta,
+                             const PrincipalHandle& aPrincipalHandle);
   uint32_t GetFitnessDistance(const webrtc::CaptureCapability& aCandidate,
                               const dom::MediaTrackConstraintSet &aConstraints,
                               bool aAdvanced,
                               const nsString& aDeviceId);
   static void TrimLessFitCandidates(CapabilitySet& set);
   static void LogConstraints(const dom::MediaTrackConstraintSet& aConstraints,
                              bool aAdvanced);
   static void LogCapability(const char* aHeader,
@@ -90,22 +91,23 @@ protected:
   void SetUUID(const char* aUUID);
   const nsCString& GetUUID(); // protected access
 
   // Engine variables.
 
   // mMonitor protects mImage access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack() and
   // image changes).
-  // mMonitor also protects mSources[] access/changes.
-  // mSources[] is accessed from webrtc threads.
+  // mMonitor also protects mSources[] and mPrincipalHandles[] access/changes.
+  // mSources[] and mPrincipalHandles[] are accessed from webrtc threads.
 
   // All the mMonitor accesses are from the child classes.
   Monitor mMonitor; // Monitor for processing Camera frames.
   nsTArray<RefPtr<SourceMediaStream>> mSources; // When this goes empty, we shut down HW
+  nsTArray<PrincipalHandle> mPrincipalHandles; // Directly mapped to mSources.
   RefPtr<layers::Image> mImage;
   RefPtr<layers::ImageContainer> mImageContainer;
   int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
   // end of data protected by mMonitor
 
 
   bool mInitDone;
   bool mHasDirectListeners;
--- a/dom/media/webrtc/MediaEngineDefault.cpp
+++ b/dom/media/webrtc/MediaEngineDefault.cpp
@@ -140,17 +140,18 @@ static void AllocateSolidColorFrame(laye
 }
 
 static void ReleaseFrame(layers::PlanarYCbCrData& aData)
 {
   PR_Free(aData.mYChannel);
 }
 
 nsresult
-MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+                                     const PrincipalHandle& aPrincipalHandle)
 {
   if (mState != kAllocated) {
     return NS_ERROR_FAILURE;
   }
 
   mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
   if (!mTimer) {
     return NS_ERROR_FAILURE;
@@ -267,33 +268,34 @@ MediaEngineDefaultVideoSource::Notify(ns
 
   return NS_OK;
 }
 
 void
 MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                           SourceMediaStream *aSource,
                                           TrackID aID,
-                                          StreamTime aDesiredTime)
+                                          StreamTime aDesiredTime,
+                                          const PrincipalHandle& aPrincipalHandle)
 {
   // AddTrack takes ownership of segment
   VideoSegment segment;
   MonitorAutoLock lock(mMonitor);
   if (mState != kStarted) {
     return;
   }
 
   // Note: we're not giving up mImage here
   RefPtr<layers::Image> image = mImage;
   StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
 
   if (delta > 0) {
     // nullptr images are allowed
     IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
-    segment.AppendFrame(image.forget(), delta, size);
+    segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
     // This can fail if either a) we haven't added the track yet, or b)
     // we've removed or finished the track.
     aSource->AppendToTrack(aID, &segment);
     // Generate null data for fake tracks.
     if (mHasFakeTracks) {
       for (int i = 0; i < kFakeVideoTrackCount; ++i) {
         VideoSegment nullSegment;
         nullSegment.AppendNullData(delta);
@@ -353,16 +355,17 @@ private:
 
 /**
  * Default audio source.
  */
 NS_IMPL_ISUPPORTS(MediaEngineDefaultAudioSource, nsITimerCallback)
 
 MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
   : MediaEngineAudioSource(kReleased)
+  , mPrincipalHandle(PRINCIPAL_HANDLE_NONE)
   , mTimer(nullptr)
 {
 }
 
 MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
 {}
 
 void
@@ -417,17 +420,18 @@ MediaEngineDefaultAudioSource::Deallocat
   if (mState != kStopped && mState != kAllocated) {
     return NS_ERROR_FAILURE;
   }
   mState = kReleased;
   return NS_OK;
 }
 
 nsresult
-MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID,
+                                     const PrincipalHandle& aPrincipalHandle)
 {
   if (mState != kAllocated) {
     return NS_ERROR_FAILURE;
   }
 
   mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
   if (!mTimer) {
     return NS_ERROR_FAILURE;
@@ -451,16 +455,19 @@ MediaEngineDefaultAudioSource::Start(Sou
       mSource->AddAudioTrack(kTrackCount + kFakeVideoTrackCount+i,
                              AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
     }
   }
 
   // Remember TrackID so we can finish later
   mTrackID = aID;
 
+  // Remember PrincipalHandle since we don't append in NotifyPull.
+  mPrincipalHandle = aPrincipalHandle;
+
   mLastNotify = TimeStamp::Now();
 
   // 1 Audio frame per 10ms
 #if defined(MOZ_WIDGET_GONK) && defined(DEBUG)
 // B2G emulator debug is very, very slow and has problems dealing with realtime audio inputs
   mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS*10,
                            nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
 #else
@@ -509,17 +516,17 @@ MediaEngineDefaultAudioSource::AppendToS
                                                TrackTicks aSamples)
 {
   RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
 
   mSineGenerator->generate(dest, aSamples);
   AutoTArray<const int16_t*,1> channels;
   channels.AppendElement(dest);
-  aSegment.AppendFrames(buffer.forget(), channels, aSamples);
+  aSegment.AppendFrames(buffer.forget(), channels, aSamples, mPrincipalHandle);
 }
 
 NS_IMETHODIMP
 MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
 {
   TimeStamp now = TimeStamp::Now();
   TimeDuration timeSinceLastNotify = now - mLastNotify;
   mLastNotify = now;
--- a/dom/media/webrtc/MediaEngineDefault.h
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -43,26 +43,27 @@ public:
   void GetName(nsAString&) override;
   void GetUUID(nsACString&) override;
 
   nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
                     const MediaEnginePrefs &aPrefs,
                     const nsString& aDeviceId,
                     const nsACString& aOrigin) override;
   nsresult Deallocate() override;
-  nsresult Start(SourceMediaStream*, TrackID) override;
+  nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
   nsresult Stop(SourceMediaStream*, TrackID) override;
   nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                    const MediaEnginePrefs &aPrefs,
                    const nsString& aDeviceId) override;
   void SetDirectListeners(bool aHasDirectListeners) override {};
   void NotifyPull(MediaStreamGraph* aGraph,
                   SourceMediaStream *aSource,
                   TrackID aId,
-                  StreamTime aDesiredTime) override;
+                  StreamTime aDesiredTime,
+                  const PrincipalHandle& aPrincipalHandle) override;
   uint32_t GetBestFitnessDistance(
       const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
       const nsString& aDeviceId) override;
 
   bool IsFake() override {
     return true;
   }
 
@@ -113,27 +114,29 @@ public:
   void GetName(nsAString&) override;
   void GetUUID(nsACString&) override;
 
   nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
                     const MediaEnginePrefs &aPrefs,
                     const nsString& aDeviceId,
                     const nsACString& aOrigin) override;
   nsresult Deallocate() override;
-  nsresult Start(SourceMediaStream*, TrackID) override;
+  nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
   nsresult Stop(SourceMediaStream*, TrackID) override;
   nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                    const MediaEnginePrefs &aPrefs,
                    const nsString& aDeviceId) override;
   void SetDirectListeners(bool aHasDirectListeners) override {};
-  void AppendToSegment(AudioSegment& aSegment, TrackTicks aSamples);
+  void AppendToSegment(AudioSegment& aSegment,
+                       TrackTicks aSamples);
   void NotifyPull(MediaStreamGraph* aGraph,
                   SourceMediaStream *aSource,
                   TrackID aId,
-                  StreamTime aDesiredTime) override
+                  StreamTime aDesiredTime,
+                  const PrincipalHandle& aPrincipalHandle) override
   {
 #ifdef DEBUG
     StreamBuffer::Track* data = aSource->FindTrack(aId);
     NS_WARN_IF_FALSE(!data || data->IsEnded() ||
                      aDesiredTime <= aSource->GetEndOfAppendedData(aId),
                      "MediaEngineDefaultAudioSource data underrun");
 #endif
   }
@@ -165,16 +168,17 @@ public:
 
   NS_DECL_THREADSAFE_ISUPPORTS
   NS_DECL_NSITIMERCALLBACK
 
 protected:
   ~MediaEngineDefaultAudioSource();
 
   TrackID mTrackID;
+  PrincipalHandle mPrincipalHandle;
   nsCOMPtr<nsITimer> mTimer;
 
   TimeStamp mLastNotify;
   TrackTicks mBufferSize;
 
   SourceMediaStream* mSource;
   nsAutoPtr<SineWaveGenerator> mSineGenerator;
 };
--- a/dom/media/webrtc/MediaEngineGonkVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineGonkVideoSource.cpp
@@ -62,17 +62,18 @@ NS_IMPL_RELEASE_INHERITED(MediaEngineGon
 
 // Called if the graph thinks it's running out of buffered video; repeat
 // the last frame for whatever minimum period it think it needs. Note that
 // this means that no *real* frame can be inserted during this period.
 void
 MediaEngineGonkVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                        SourceMediaStream* aSource,
                                        TrackID aID,
-                                       StreamTime aDesiredTime)
+                                       StreamTime aDesiredTime,
+                                       const PrincipalID& aPrincipalHandle)
 {
   VideoSegment segment;
 
   MonitorAutoLock lock(mMonitor);
   // B2G does AddTrack, but holds kStarted until the hardware changes state.
   // So mState could be kReleased here. We really don't care about the state,
   // though.
 
@@ -90,20 +91,20 @@ MediaEngineGonkVideoSource::NotifyPull(M
   // light; we should consider surfacing this so that we can switch to a
   // lower resolution (which may up the frame rate)
 
   // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
   // Doing so means a negative delta and thus messes up handling of the graph
   if (delta > 0) {
     // nullptr images are allowed
     IntSize size(image ? mWidth : 0, image ? mHeight : 0);
-    segment.AppendFrame(image.forget(), delta, size);
+    segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
     // This can fail if either a) we haven't added the track yet, or b)
     // we've removed or finished the track.
-    aSource->AppendToTrack(aID, &(segment));
+    aSource->AppendToTrack(aID, &(segment), aPrincipalHandle);
   }
 }
 
 size_t
 MediaEngineGonkVideoSource::NumCapabilities()
 {
   // TODO: Stop hardcoding. Use GetRecorderProfiles+GetProfileInfo (Bug 1128550)
   //
@@ -878,17 +879,17 @@ MediaEngineGonkVideoSource::OnNewMediaBu
     for (uint32_t i = 0; i < len; i++) {
       if (mSources[i]) {
         // Duration is 1 here.
         // Ideally, it should be camera timestamp here and the MSG will have
         // enough sample duration without calling NotifyPull() anymore.
         // Unfortunately, clock in gonk camera looks like is a different one
         // comparing to MSG. As result, it causes time inaccurate. (frames be
         // queued in MSG longer and longer as time going by in device like Frame)
-        AppendToTrack(mSources[i], mImage, mTrackID, 1);
+        AppendToTrack(mSources[i], mImage, mTrackID, 1, mPrincipalHandles[i]);
       }
     }
     if (mImage->AsGrallocImage()) {
       GonkCameraImage* cameraImage = static_cast<GonkCameraImage*>(mImage.get());
       // Clear MediaBuffer immediately, it prevents MediaBuffer is kept in
       // MediaStreamGraph thread.
       cameraImage->ClearMediaBuffer();
     }
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -72,16 +72,17 @@ MediaEngineRemoteVideoSource::Shutdown()
     SourceMediaStream *source;
     bool empty;
 
     while (1) {
       {
         MonitorAutoLock lock(mMonitor);
         empty = mSources.IsEmpty();
         if (empty) {
+          MOZ_ASSERT(mPrincipalHandles.IsEmpty());
           break;
         }
         source = mSources[0];
       }
       Stop(source, kVideoTrack); // XXX change to support multiple tracks
     }
     MOZ_ASSERT(mState == kStopped);
   }
@@ -125,16 +126,17 @@ MediaEngineRemoteVideoSource::Allocate(c
       return NS_ERROR_FAILURE;
     }
     mState = kAllocated;
     LOG(("Video device %d allocated for %s", mCaptureIndex,
          PromiseFlatCString(aOrigin).get()));
   } else if (MOZ_LOG_TEST(GetMediaManagerLog(), mozilla::LogLevel::Debug)) {
     MonitorAutoLock lock(mMonitor);
     if (mSources.IsEmpty()) {
+      MOZ_ASSERT(mPrincipalHandles.IsEmpty());
       LOG(("Video device %d reallocated", mCaptureIndex));
     } else {
       LOG(("Video device %d allocated shared", mCaptureIndex));
     }
   }
 
   ++mNrAllocations;
 
@@ -161,28 +163,31 @@ MediaEngineRemoteVideoSource::Deallocate
     LOG(("Video device %d deallocated", mCaptureIndex));
   } else {
     LOG(("Video device %d deallocated but still in use", mCaptureIndex));
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineRemoteVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineRemoteVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+                                    const PrincipalHandle& aPrincipalHandle)
 {
   LOG((__PRETTY_FUNCTION__));
   AssertIsOnOwningThread();
   if (!mInitDone || !aStream) {
     LOG(("No stream or init not done"));
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
+    mPrincipalHandles.AppendElement(aPrincipalHandle);
+    MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
   }
 
   aStream->AddTrack(aID, 0, new VideoSegment(), SourceMediaStream::ADDTRACK_QUEUED);
 
   if (mState == kStarted) {
     return NS_OK;
   }
   mImageContainer = layers::LayerManager::CreateImageContainer();
@@ -204,21 +209,26 @@ nsresult
 MediaEngineRemoteVideoSource::Stop(mozilla::SourceMediaStream* aSource,
                                    mozilla::TrackID aID)
 {
   LOG((__PRETTY_FUNCTION__));
   AssertIsOnOwningThread();
   {
     MonitorAutoLock lock(mMonitor);
 
-    if (!mSources.RemoveElement(aSource)) {
+    size_t i = mSources.IndexOf(aSource);
+    if (i == mSources.NoIndex) {
       // Already stopped - this is allowed
       return NS_OK;
     }
 
+    MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
+    mSources.RemoveElementAt(i);
+    mPrincipalHandles.RemoveElementAt(i);
+
     aSource->EndTrack(aID);
 
     if (!mSources.IsEmpty()) {
       return NS_OK;
     }
     if (mState != kStarted) {
       return NS_ERROR_FAILURE;
     }
@@ -263,26 +273,32 @@ MediaEngineRemoteVideoSource::Restart(co
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 void
 MediaEngineRemoteVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream* aSource,
-                                         TrackID aID, StreamTime aDesiredTime)
+                                         TrackID aID, StreamTime aDesiredTime,
+                                         const PrincipalHandle& aPrincipalHandle)
 {
   VideoSegment segment;
 
   MonitorAutoLock lock(mMonitor);
   StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
 
   if (delta > 0) {
+    size_t i = mSources.IndexOf(aSource);
+    if (i == mSources.NoIndex) {
+      NS_ERROR("aSource not in mSources");
+      return;
+    }
     // nullptr images are allowed
-    AppendToTrack(aSource, mImage, aID, delta);
+    AppendToTrack(aSource, mImage, aID, delta, aPrincipalHandle);
   }
 }
 
 int
 MediaEngineRemoteVideoSource::FrameSizeChange(unsigned int w, unsigned int h,
                                               unsigned int streams)
 {
   mWidth = w;
@@ -356,17 +372,18 @@ MediaEngineRemoteVideoSource::DeliverFra
 
   // XXX The timestamp for the frame should be based on the Capture time,
   // not the MSG time, and MSG should never, ever block on a (realtime)
   // video frame (or even really for streaming - audio yes, video probably no).
   // Note that MediaPipeline currently ignores the timestamps from MSG
   uint32_t len = mSources.Length();
   for (uint32_t i = 0; i < len; i++) {
     if (mSources[i]) {
-      AppendToTrack(mSources[i], mImage, mTrackID, 1); // shortest possible duration
+      // shortest possible duration
+      AppendToTrack(mSources[i], mImage, mTrackID, 1, mPrincipalHandles[i]);
     }
   }
 
   return 0;
 }
 
 size_t
 MediaEngineRemoteVideoSource::NumCapabilities()
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.h
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -71,25 +71,26 @@ public:
                                dom::MediaSourceEnum aMediaSource,
                                const char* aMonitorName = "RemoteVideo.Monitor");
 
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
                     const nsString& aDeviceId,
                     const nsACString& aOrigin) override;
   nsresult Deallocate() override;;
-  nsresult Start(SourceMediaStream*, TrackID) override;
+  nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
   nsresult Stop(SourceMediaStream*, TrackID) override;
   nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                    const MediaEnginePrefs &aPrefs,
                    const nsString& aDeviceId) override;
   void NotifyPull(MediaStreamGraph* aGraph,
                   SourceMediaStream* aSource,
                   TrackID aId,
-                  StreamTime aDesiredTime) override;
+                  StreamTime aDesiredTime,
+                  const PrincipalHandle& aPrincipalHandle) override;
   dom::MediaSourceEnum GetMediaSource() const override {
     return mMediaSource;
   }
 
   bool ChooseCapability(const dom::MediaTrackConstraints &aConstraints,
                         const MediaEnginePrefs &aPrefs,
                         const nsString& aDeviceId) override;
 
--- a/dom/media/webrtc/MediaEngineTabVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.cpp
@@ -170,44 +170,47 @@ MediaEngineTabVideoSource::Restart(const
 
 nsresult
 MediaEngineTabVideoSource::Deallocate()
 {
   return NS_OK;
 }
 
 nsresult
-MediaEngineTabVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineTabVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+                                 const PrincipalHandle& aPrincipalHandle)
 {
   nsCOMPtr<nsIRunnable> runnable;
   if (!mWindow)
     runnable = new InitRunnable(this);
   else
     runnable = new StartRunnable(this);
   NS_DispatchToMainThread(runnable);
   aStream->AddTrack(aID, 0, new VideoSegment());
 
   return NS_OK;
 }
 
 void
 MediaEngineTabVideoSource::NotifyPull(MediaStreamGraph*,
                                       SourceMediaStream* aSource,
-                                      TrackID aID, StreamTime aDesiredTime)
+                                      TrackID aID, StreamTime aDesiredTime,
+                                      const PrincipalHandle& aPrincipalHandle)
 {
   VideoSegment segment;
   MonitorAutoLock mon(mMonitor);
 
   // Note: we're not giving up mImage here
   RefPtr<layers::SourceSurfaceImage> image = mImage;
   StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
   if (delta > 0) {
     // nullptr images are allowed
     gfx::IntSize size = image ? image->GetSize() : IntSize(0, 0);
-    segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size);
+    segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size,
+                        aPrincipalHandle);
     // This can fail if either a) we haven't added the track yet, or b)
     // we've removed or finished the track.
     aSource->AppendToTrack(aID, &(segment));
   }
 }
 
 void
 MediaEngineTabVideoSource::Draw() {
--- a/dom/media/webrtc/MediaEngineTabVideoSource.h
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.h
@@ -22,19 +22,19 @@ class MediaEngineTabVideoSource : public
     void Shutdown() override {};
     void GetName(nsAString_internal&) override;
     void GetUUID(nsACString_internal&) override;
     nsresult Allocate(const dom::MediaTrackConstraints &,
                       const mozilla::MediaEnginePrefs&,
                       const nsString& aDeviceId,
                       const nsACString& aOrigin) override;
     nsresult Deallocate() override;
-    nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID) override;
+    nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID, const mozilla::PrincipalHandle&) override;
     void SetDirectListeners(bool aHasDirectListeners) override {};
-    void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime) override;
+    void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, const mozilla::PrincipalHandle& aPrincipalHandle) override;
     nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID) override;
     nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                      const mozilla::MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId) override;
     bool IsFake() override;
     dom::MediaSourceEnum GetMediaSource() const override {
       return dom::MediaSourceEnum::Browser;
     }
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -83,33 +83,38 @@ public:
   {
     // Nothing to do here, everything is managed in MediaManager.cpp
     return NS_OK;
   }
   void Shutdown() override
   {
     // Nothing to do here, everything is managed in MediaManager.cpp
   }
-  nsresult Start(SourceMediaStream* aMediaStream, TrackID aId) override;
+  nsresult Start(SourceMediaStream* aMediaStream,
+                 TrackID aId,
+                 const PrincipalHandle& aPrincipalHandle) override;
   nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
   nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                    const MediaEnginePrefs &aPrefs,
                    const nsString& aDeviceId) override;
   void SetDirectListeners(bool aDirect) override
   {}
   void NotifyOutputData(MediaStreamGraph* aGraph,
                         AudioDataValue* aBuffer, size_t aFrames,
                         TrackRate aRate, uint32_t aChannels) override
   {}
   void NotifyInputData(MediaStreamGraph* aGraph,
                        const AudioDataValue* aBuffer, size_t aFrames,
                        TrackRate aRate, uint32_t aChannels) override
   {}
-  void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
-                  TrackID aID, StreamTime aDesiredTime) override
+  void NotifyPull(MediaStreamGraph* aGraph,
+                  SourceMediaStream* aSource,
+                  TrackID aID,
+                  StreamTime aDesiredTime,
+                  const PrincipalHandle& aPrincipalHandle) override
   {}
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::AudioCapture;
   }
   bool IsFake() override
   {
     return false;
@@ -444,27 +449,30 @@ public:
   void GetName(nsAString& aName) override;
   void GetUUID(nsACString& aUUID) override;
 
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
                     const nsString& aDeviceId,
                     const nsACString& aOrigin) override;
   nsresult Deallocate() override;
-  nsresult Start(SourceMediaStream* aStream, TrackID aID) override;
+  nsresult Start(SourceMediaStream* aStream,
+                 TrackID aID,
+                 const PrincipalHandle& aPrincipalHandle) override;
   nsresult Stop(SourceMediaStream* aSource, TrackID aID) override;
   nsresult Restart(const dom::MediaTrackConstraints& aConstraints,
                    const MediaEnginePrefs &aPrefs,
                    const nsString& aDeviceId) override;
   void SetDirectListeners(bool aHasDirectListeners) override {};
 
   void NotifyPull(MediaStreamGraph* aGraph,
                   SourceMediaStream* aSource,
                   TrackID aId,
-                  StreamTime aDesiredTime) override;
+                  StreamTime aDesiredTime,
+                  const PrincipalHandle& aPrincipalHandle) override;
 
   // AudioDataListenerInterface methods
   void NotifyOutputData(MediaStreamGraph* aGraph,
                         AudioDataValue* aBuffer, size_t aFrames,
                         TrackRate aRate, uint32_t aChannels) override;
   void NotifyInputData(MediaStreamGraph* aGraph,
                        const AudioDataValue* aBuffer, size_t aFrames,
                        TrackRate aRate, uint32_t aChannels) override;
@@ -507,21 +515,23 @@ private:
 
   ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
   ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
   ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
 
   nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
 
-  // mMonitor protects mSources[] access/changes, and transitions of mState
-  // from kStarted to kStopped (which are combined with EndTrack()).
-  // mSources[] is accessed from webrtc threads.
+  // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
+  // transitions of mState from kStarted to kStopped (which are combined with
+  // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
+  // threads.
   Monitor mMonitor;
   nsTArray<RefPtr<SourceMediaStream>> mSources;
+  nsTArray<PrincipalHandle> mPrincipalHandles; // Maps to mSources.
   nsCOMPtr<nsIThread> mThread;
   int mCapIndex;
   int mChannel;
   int mNrAllocations; // When this becomes 0, we shut down HW
   TrackID mTrackID;
   bool mInitDone;
   bool mStarted;
 
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -333,26 +333,29 @@ MediaEngineWebRTCMicrophoneSource::Deall
   } else {
     LOG(("Audio device %d deallocated but still in use", mCapIndex));
   }
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
-                                         TrackID aID)
+                                         TrackID aID,
+                                         const PrincipalHandle& aPrincipalHandle)
 {
   AssertIsOnOwningThread();
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
+    mPrincipalHandles.AppendElement(aPrincipalHandle);
+    MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
   }
 
   AudioSegment* segment = new AudioSegment();
   aStream->AddAudioTrack(aID, mSampleFrequency, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
 
   // XXX Make this based on the pref.
   aStream->RegisterForAudioMixing();
   LOG(("Start audio for stream %p", aStream));
@@ -393,20 +396,24 @@ MediaEngineWebRTCMicrophoneSource::Start
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
   AssertIsOnOwningThread();
   {
     MonitorAutoLock lock(mMonitor);
 
-    if (!mSources.RemoveElement(aSource)) {
+    size_t sourceIndex = mSources.IndexOf(aSource);
+    if (sourceIndex == mSources.NoIndex) {
       // Already stopped - this is allowed
       return NS_OK;
     }
+    mSources.RemoveElementAt(sourceIndex);
+    mPrincipalHandles.RemoveElementAt(sourceIndex);
+    MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
 
     aSource->EndTrack(aID);
 
     if (!mSources.IsEmpty()) {
       mAudioInput->StopRecording(aSource);
       return NS_OK;
     }
     if (mState != kStarted) {
@@ -431,17 +438,18 @@ MediaEngineWebRTCMicrophoneSource::Stop(
   }
   return NS_OK;
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
                                               SourceMediaStream *aSource,
                                               TrackID aID,
-                                              StreamTime aDesiredTime)
+                                              StreamTime aDesiredTime,
+                                              const PrincipalHandle& aPrincipalHandle)
 {
   // Ignore - we push audio data
   LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
                                                     AudioDataValue* aBuffer,
@@ -649,17 +657,18 @@ MediaEngineWebRTCMicrophoneSource::Proce
     RefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
 
     sample* dest = static_cast<sample*>(buffer->Data());
     memcpy(dest, audio10ms, length * sizeof(sample));
 
     nsAutoPtr<AudioSegment> segment(new AudioSegment());
     AutoTArray<const sample*,1> channels;
     channels.AppendElement(dest);
-    segment->AppendFrames(buffer.forget(), channels, length);
+    segment->AppendFrames(buffer.forget(), channels, length,
+                          mPrincipalHandles[i]);
     TimeStamp insertTime;
     segment->GetStartTime(insertTime);
 
     if (mSources[i]) {
       // Make sure we include the stream and the track.
       // The 0:1 is a flag to note when we've done the final insert for a given input block.
       LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
               (i+1 < len) ? 0 : 1, insertTime);
@@ -702,17 +711,18 @@ MediaEngineWebRTCAudioCaptureSource::Get
   asciiString.AssignASCII(uuidBuffer);
 
   // Remove {} and the null terminator
   aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
 }
 
 nsresult
 MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
-                                           TrackID aId)
+                                           TrackID aId,
+                                           const PrincipalHandle& aPrincipalHandle)
 {
   AssertIsOnOwningThread();
   aMediaStream->AddTrack(aId, 0, new AudioSegment());
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp
@@ -155,17 +155,17 @@ SpeechRecognition::WrapObject(JSContext*
 {
   return SpeechRecognitionBinding::Wrap(aCx, this, aGivenProto);
 }
 
 bool
 SpeechRecognition::IsAuthorized(JSContext* aCx, JSObject* aGlobal)
 {
   nsCOMPtr<nsIPrincipal> principal = nsContentUtils::ObjectPrincipal(aGlobal);
-  
+
   nsresult rv;
   nsCOMPtr<nsIPermissionManager> mgr = do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return false;
   }
 
   uint32_t speechRecognition = nsIPermissionManager::UNKNOWN_ACTION;
   rv = mgr->TestExactPermissionFromPrincipal(principal, "speech-recognition", &speechRecognition);
@@ -929,17 +929,18 @@ SpeechRecognition::CreateAudioSegment(ns
 {
   AudioSegment* segment = new AudioSegment();
   for (uint32_t i = 0; i < aChunks.Length(); ++i) {
     RefPtr<SharedBuffer> buffer = aChunks[i];
     const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());
 
     AutoTArray<const int16_t*, 1> channels;
     channels.AppendElement(chunkData);
-    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
+    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk,
+                          PRINCIPAL_HANDLE_NONE);
   }
 
   return segment;
 }
 
 void
 SpeechRecognition::FeedAudioData(already_AddRefed<SharedBuffer> aSamples,
                                  uint32_t aDuration,
--- a/dom/media/webspeech/synth/nsSpeechTask.cpp
+++ b/dom/media/webspeech/synth/nsSpeechTask.cpp
@@ -327,17 +327,18 @@ nsSpeechTask::SendAudioImpl(RefPtr<mozil
   if (aDataLen == 0) {
     mStream->EndAllTrackAndFinish();
     return;
   }
 
   AudioSegment segment;
   AutoTArray<const int16_t*, 1> channelData;
   channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
-  segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
+  segment.AppendFrames(aSamples.forget(), channelData, aDataLen,
+                       PRINCIPAL_HANDLE_NONE);
   mStream->AppendToTrack(1, &segment);
   mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 }
 
 NS_IMETHODIMP
 nsSpeechTask::DispatchStart()
 {
   if (!mIndirectAudio) {
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -1477,17 +1477,18 @@ NotifyPull(MediaStreamGraph* graph, Stre
 
     DeinterleaveAndConvertBuffer(scratch_buffer,
                                  frames,
                                  channelCount,
                                  channels.Elements());
 
     outputChannels.AppendElements(channels);
 
-    segment.AppendFrames(samples.forget(), outputChannels, frames);
+    segment.AppendFrames(samples.forget(), outputChannels, frames,
+                         PRINCIPAL_HANDLE_NONE /* Fixed in later patch */);
 
     // Handle track not actually added yet or removed/finished
     if (source_->AppendToTrack(track_id_, &segment)) {
       played_ticks_ += frames;
     } else {
       MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
       // we can't un-read the data, but that's ok since we don't want to
       // buffer - but don't i-loop!
@@ -1606,17 +1607,18 @@ NotifyPull(MediaStreamGraph* graph, Stre
 #if defined(MOZILLA_INTERNAL_API)
   StreamTime delta = desired_time - played_ticks_;
 
   // Don't append if we've already provided a frame that supposedly
   // goes past the current aDesiredTime Doing so means a negative
   // delta and thus messes up handling of the graph
   if (delta > 0) {
     VideoSegment segment;
-    segment.AppendFrame(image.forget(), delta, IntSize(width_, height_));
+    segment.AppendFrame(image.forget(), delta, IntSize(width_, height_),
+                        PRINCIPAL_HANDLE_NONE /* Fixed in later patch */);
     // Handle track not actually added yet or removed/finished
     if (source_->AppendToTrack(track_id_, &segment)) {
       played_ticks_ = desired_time;
     } else {
       MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
       return;
     }
   }
--- a/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
+++ b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
@@ -113,17 +113,20 @@ void Fake_AudioStreamSource::Periodic() 
     //saw tooth audio sample
     data[i] = ((mCount % 8) * 4000) - (7*4000)/2;
     mCount++;
   }
 
   mozilla::AudioSegment segment;
   AutoTArray<const int16_t *,1> channels;
   channels.AppendElement(data);
-  segment.AppendFrames(samples.forget(), channels, AUDIO_BUFFER_SIZE);
+  segment.AppendFrames(samples.forget(),
+                       channels,
+                       AUDIO_BUFFER_SIZE,
+                       mozilla::PRINCIPAL_HANDLE_NONE);
 
   for(std::set<RefPtr<Fake_MediaStreamListener>>::iterator it = mListeners.begin();
        it != mListeners.end(); ++it) {
     (*it)->NotifyQueuedTrackChanges(nullptr, // Graph
                                     0, // TrackID
                                     0, // Offset TODO(ekr@rtfm.com) fix
                                     0, // ???
                                     segment,