Bug 1404977 - Part 13 - Remove useless mutex, and assert why they were useless, in WebRTCAudioDataListeners. r?pehrsons
MozReview-Commit-ID: 2Mb5WZXbYgS
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -873,16 +873,20 @@ MediaStreamGraphImpl::CloseAudioInputImp
MOZ_ASSERT(aID.isSome(), "Closing an audio input that was not opened.");
}
nsTArray<RefPtr<AudioDataListener>>* listeners = mInputDeviceUsers.GetValue(aID.value());
MOZ_ASSERT(listeners);
DebugOnly<bool> wasPresent = listeners->RemoveElement(aListener);
MOZ_ASSERT(wasPresent);
+
+ // Breaks the cycle between the MSG and the listener.
+ aListener->Disconnect(this);
+
if (!listeners->IsEmpty()) {
// There is still a consumer for this audio input device
return;
}
mInputDeviceID = nullptr; // reset to default
mInputDeviceUsers.Remove(aID.value());
@@ -985,17 +989,17 @@ void MediaStreamGraphImpl::DeviceChanged
if (!mInputDeviceID) {
return;
}
nsTArray<RefPtr<AudioDataListener>>* listeners =
mInputDeviceUsers.GetValue(mInputDeviceID);
for (auto& listener : *listeners) {
- listener->DeviceChanged();
+ listener->DeviceChanged(this);
}
}
void MediaStreamGraphImpl::DeviceChanged()
{
// This is safe to be called from any thread: this message comes from an
// underlying platform API, and we don't have much guarantees. If it is not
// called from the main thread (and it probably will rarely be), it will post
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -101,36 +101,41 @@ protected:
public:
/* These are for cubeb audio input & output streams: */
/**
* Output data to speakers, for use as the "far-end" data for echo
* cancellation. This is not guaranteed to be in any particular size
* chunks.
*/
- virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+ virtual void NotifyOutputData(MediaStreamGraphImpl* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) = 0;
/**
* Input data from a microphone (or other audio source. This is not
* guaranteed to be in any particular size chunks.
*/
- virtual void NotifyInputData(MediaStreamGraph* aGraph,
+ virtual void NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) = 0;
/**
* Number of audio input channels.
*/
- virtual uint32_t InputChannelCount() = 0;
+ virtual uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) = 0;
/**
* Called when the underlying audio device has changed.
*/
- virtual void DeviceChanged() = 0;
+ virtual void DeviceChanged(MediaStreamGraphImpl* aGraph) = 0;
+
+ /**
+ * Called when the underlying audio device is being closed.
+ */
+ virtual void Disconnect(MediaStreamGraphImpl* aGraph) = 0;
};
class AudioDataListener : public AudioDataListenerInterface {
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~AudioDataListener() {}
public:
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -479,17 +479,17 @@ public:
uint32_t maxInputChannels = 0;
// When/if we decide to support multiple input device per graph, this needs
// loop over them.
nsTArray<RefPtr<AudioDataListener>>* listeners =
mInputDeviceUsers.GetValue(mInputDeviceID);
MOZ_ASSERT(listeners);
for (const auto& listener : *listeners) {
maxInputChannels =
- std::max(maxInputChannels, listener->InputChannelCount());
+ std::max(maxInputChannels, listener->RequestedInputChannelCount(this));
}
return maxInputChannels;
}
CubebUtils::AudioDeviceID InputDeviceID()
{
return mInputDeviceID;
}
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -155,49 +155,49 @@ private:
Mutex mMutex;
nsTArray<RefPtr<AudioDeviceInfo>> mDevices;
// If mManualInvalidation is true, then it is necessary to query the device
// list each time instead of relying on automatic invalidation of the cache by
// cubeb itself. Set in the constructor and then can be access on any thread.
bool mManualInvalidation;
};
+// This class is instantiated on the MediaManager thread, and is then sent and
+// only ever access again on the MediaStreamGraph.
class WebRTCAudioDataListener : public AudioDataListener
{
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~WebRTCAudioDataListener() {}
public:
explicit WebRTCAudioDataListener(MediaEngineWebRTCMicrophoneSource* aAudioSource)
- : mMutex("WebRTCAudioDataListener::mMutex")
- , mAudioSource(aAudioSource)
+ : mAudioSource(aAudioSource)
{}
// AudioDataListenerInterface methods
- void NotifyOutputData(MediaStreamGraph* aGraph,
+ void NotifyOutputData(MediaStreamGraphImpl* aGraph,
AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels) override;
- void NotifyInputData(MediaStreamGraph* aGraph,
+ void NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels) override;
- uint32_t InputChannelCount() override;
+ uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) override;
- void DeviceChanged() override;
+ void DeviceChanged(MediaStreamGraphImpl* aGraph) override;
- void Shutdown();
+ void Disconnect(MediaStreamGraphImpl* aGraph) override;
private:
- Mutex mMutex;
RefPtr<MediaEngineWebRTCMicrophoneSource> mAudioSource;
};
class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource,
public AudioDataListenerInterface
{
public:
MediaEngineWebRTCMicrophoneSource(RefPtr<AudioDeviceInfo> aInfo,
@@ -242,30 +242,32 @@ public:
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream,
TrackID aTrackID,
StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
// AudioDataListenerInterface methods
- void NotifyOutputData(MediaStreamGraph* aGraph,
+ void NotifyOutputData(MediaStreamGraphImpl* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) override;
- void NotifyInputData(MediaStreamGraph* aGraph,
+ void NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) override;
- void DeviceChanged() override;
+ void DeviceChanged(MediaStreamGraphImpl* aGraph) override;
uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) override
{
return GetRequestedInputChannelCount(aGraph);
}
+ void Disconnect(MediaStreamGraphImpl* aGraph) override;
+
dom::MediaSourceEnum GetMediaSource() const override
{
return dom::MediaSourceEnum::Microphone;
}
nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
{
return NS_ERROR_NOT_IMPLEMENTED;
@@ -359,34 +361,36 @@ private:
bool HasEnabledTrack() const;
template<typename T>
void InsertInGraph(const T* aBuffer,
size_t aFrames,
uint32_t aChannels);
- void PacketizeAndProcess(MediaStreamGraph* aGraph,
+ void PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels);
// This is true when all processing is disabled, we can skip
// packetization, resampling and other processing passes.
// Graph thread only.
bool PassThrough(MediaStreamGraphImpl* aGraphImpl) const;
// Graph thread only.
void SetPassThrough(bool aPassThrough);
uint32_t GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl);
void SetRequestedInputChannelCount(uint32_t aRequestedInputChannelCount);
- // Owning thread only.
+ // mListener is created on the MediaManager thread, and then sent to the MSG
+ // thread. On shutdown, we send this pointer to the MSG thread again, telling
+ // it to clean up.
RefPtr<WebRTCAudioDataListener> mListener;
// Can be shared on any thread.
const RefPtr<AudioDeviceInfo> mDeviceInfo;
const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
// accessed from the GraphDriver thread except for deletion.
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -53,59 +53,62 @@ LogModule* AudioLogModule() {
void
WebRTCAudioDataListener::NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels)
{
- MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
if (mAudioSource) {
mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels);
}
}
void
-WebRTCAudioDataListener::NotifyInputData(MediaStreamGraph* aGraph,
+WebRTCAudioDataListener::NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels)
{
- MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
if (mAudioSource) {
mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels);
}
}
void
-WebRTCAudioDataListener::DeviceChanged()
+WebRTCAudioDataListener::DeviceChanged(MediaStreamGraphImpl* aGraph)
{
- MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
if (mAudioSource) {
- mAudioSource->DeviceChanged();
+ mAudioSource->DeviceChanged(aGraph);
}
}
uint32_t
-WebRTCAudioDataListener::InputChannelCount()
+WebRTCAudioDataListener::RequestedInputChannelCount(MediaStreamGraphImpl* aGraph)
{
- MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
if (mAudioSource) {
- return mAudioSource->InputChannelCount();
+ return mAudioSource->RequestedInputChannelCount(aGraph);
}
return 0;
}
void
-WebRTCAudioDataListener::Shutdown()
+WebRTCAudioDataListener::Disconnect(MediaStreamGraphImpl* aGraph)
{
- MutexAutoLock lock(mMutex);
- mAudioSource = nullptr;
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
+ if (mAudioSource) {
+ mAudioSource->Disconnect(aGraph);
+ mAudioSource = nullptr;
+ }
}
/**
* WebRTC Microphone MediaEngineSource.
*/
MediaEngineWebRTCMicrophoneSource::Allocation::Allocation(
const RefPtr<AllocationHandle>& aHandle)
@@ -524,31 +527,37 @@ MediaEngineWebRTCMicrophoneSource::SetPa
"Wrong calling pattern, don't call this before ::SetTrack.");
}
mSkipProcessing = aPassThrough;
}
uint32_t
MediaEngineWebRTCMicrophoneSource::GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl)
{
+ MOZ_ASSERT(aGraphImpl->CurrentDriver()->OnThread(),
+ "Wrong calling pattern, don't call this before ::SetTrack.");
+
if (mState == kReleased) {
// This source has been released, and is waiting for collection. Simply
// return 0, this source won't contribute to the channel count decision.
// Again, this is temporary.
return 0;
}
return mRequestedInputChannelCount;
}
void
MediaEngineWebRTCMicrophoneSource::SetRequestedInputChannelCount(
uint32_t aRequestedInputChannelCount)
{
MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mAllocations.Length() <= 1);
+
if (mAllocations.IsEmpty()) {
return;
}
MOZ_ASSERT(mAllocations.Length() == 1 &&
mAllocations[0].mStream &&
mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread(),
"Wrong calling pattern, don't call this before ::SetTrack.");
mRequestedInputChannelCount = aRequestedInputChannelCount;
@@ -556,16 +565,22 @@ MediaEngineWebRTCMicrophoneSource::SetRe
}
void
MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
RefPtr<MediaStreamGraphImpl> aGraph)
{
AssertIsOnOwningThread();
MOZ_DIAGNOSTIC_ASSERT(aGraph);
+#ifdef DEBUG
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mAllocations.Length() <= 1);
+ }
+#endif
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(aGraph), aPrefs]() mutable {
that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
@@ -611,49 +626,59 @@ MediaEngineWebRTCMicrophoneSource::Alloc
AllocationHandle** aOutHandle,
const char** aOutBadConstraint)
{
AssertIsOnOwningThread();
MOZ_ASSERT(aOutHandle);
auto handle = MakeRefPtr<AllocationHandle>(aConstraints, aPrincipalInfo,
aDeviceId);
+#ifdef DEBUG
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mAllocations.Length() <= 1);
+ }
+#endif
LOG(("Mic source %p allocation %p Allocate()", this, handle.get()));
nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
aOutBadConstraint);
if (NS_FAILED(rv)) {
return rv;
}
{
MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mAllocations.IsEmpty(), "Only allocate once.");
mAllocations.AppendElement(Allocation(handle));
}
handle.forget(aOutHandle);
return NS_OK;
}
nsresult
MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
{
AssertIsOnOwningThread();
+ MOZ_ASSERT(mState == kStopped);
+
size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
MOZ_DIAGNOSTIC_ASSERT(!mAllocations[i].mEnabled,
"Source should be stopped for the track before removing");
if (mAllocations[i].mStream && IsTrackIDExplicit(mAllocations[i].mTrackID)) {
mAllocations[i].mStream->EndTrack(mAllocations[i].mTrackID);
}
{
MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once.");
mAllocations.RemoveElementAt(i);
}
if (mAllocations.IsEmpty()) {
// If empty, no callbacks to deliver data should be occuring
MOZ_ASSERT(mState != kReleased, "Source not allocated");
MOZ_ASSERT(mState != kStarted, "Source not stopped");
@@ -677,16 +702,18 @@ MediaEngineWebRTCMicrophoneSource::SetTr
MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
if (mAllocations.Length() == 1 &&
mAllocations[0].mStream &&
mAllocations[0].mStream->Graph() != aStream->Graph()) {
return NS_ERROR_NOT_AVAILABLE;
}
+ MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once.");
+
size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
MOZ_ASSERT(!mAllocations[i].mStream);
MOZ_ASSERT(mAllocations[i].mTrackID == TRACK_NONE);
MOZ_ASSERT(mAllocations[i].mPrincipal == PRINCIPAL_HANDLE_NONE);
{
MutexAutoLock lock(mMutex);
mAllocations[i].mStream = aStream;
@@ -761,54 +788,51 @@ MediaEngineWebRTCMicrophoneSource::Start
ApplySettings(mNetPrefs, allocation.mStream->GraphImpl());
return NS_OK;
}
nsresult
MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
{
+ MOZ_ASSERT(mAllocations.Length() <= 1);
AssertIsOnOwningThread();
LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex,
"Cannot stop track that we don't know about");
Allocation& allocation = mAllocations[i];
+ MOZ_ASSERT(allocation.mStream, "SetTrack must have been called before ::Stop");
if (!allocation.mEnabled) {
// Already stopped - this is allowed
return NS_OK;
}
{
// This spans setting both the enabled state and mState.
MutexAutoLock lock(mMutex);
allocation.mEnabled = false;
CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
allocation.mStream->CloseAudioInput(id, mListener);
+ mListener = nullptr;
if (HasEnabledTrack()) {
// Another track is keeping us from stopping
return NS_OK;
}
MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
mState = kStopped;
}
- if (mListener) {
- // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
- mListener->Shutdown();
- mListener = nullptr;
- }
-
return NS_OK;
}
void
MediaEngineWebRTCMicrophoneSource::GetSettings(dom::MediaTrackSettings& aOutSettings) const
{
MOZ_ASSERT(NS_IsMainThread());
aOutSettings = *mSettings;
@@ -879,22 +903,24 @@ MediaEngineWebRTCMicrophoneSource::Pull(
}
AudioSegment audio;
audio.AppendNullData(delta);
aStream->AppendToTrack(aTrackID, &audio);
}
void
-MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
+MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraphImpl* aGraph,
AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels)
{
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
+
if (!mPacketizerOutput ||
mPacketizerOutput->PacketSize() != aRate/100u ||
mPacketizerOutput->Channels() != aChannels) {
// It's ok to drop the audio still in the packetizer here: if this changes,
// we changed devices or something.
mPacketizerOutput =
new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
}
@@ -972,17 +998,17 @@ MediaEngineWebRTCMicrophoneSource::Notif
deinterleavedPacketDataChannelPointers.Elements());
MOZ_ASSERT(!err, "Could not process the reverse stream.");
}
}
// Only called if we're not in passthrough mode
void
-MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
+MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels)
{
MOZ_ASSERT(!PassThrough(aGraph), "This should be bypassed when in PassThrough mode.");
size_t offset = 0;
@@ -1197,22 +1223,23 @@ MediaEngineWebRTCMicrophoneSource::Inser
allocation.mStream->AppendToTrack(allocation.mTrackID, &segment);
}
}
// Called back on GraphDriver thread!
// Note this can be called back after ::Shutdown()
void
-MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
+MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels)
{
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
TRACE_AUDIO_CALLBACK();
{
MutexAutoLock lock(mMutex);
if (mAllocations.IsEmpty()) {
// This can happen because mAllocations is not yet using message passing, and
// is access both on the media manager thread and the MSG thread. This is to
// be fixed soon.
@@ -1248,25 +1275,34 @@ do {
#_processing " on device change."); \
return; \
} \
\
} \
} while(0)
void
-MediaEngineWebRTCMicrophoneSource::DeviceChanged()
+MediaEngineWebRTCMicrophoneSource::DeviceChanged(MediaStreamGraphImpl* aGraph)
{
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
// Reset some processing
ResetProcessingIfNeeded(gain_control);
ResetProcessingIfNeeded(echo_cancellation);
ResetProcessingIfNeeded(noise_suppression);
}
void
+MediaEngineWebRTCMicrophoneSource::Disconnect(MediaStreamGraphImpl* aGraph)
+{
+ // This method is just for asserts.
+ MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
+ MOZ_ASSERT(!mListener);
+}
+
+void
MediaEngineWebRTCMicrophoneSource::Shutdown()
{
AssertIsOnOwningThread();
if (mListener) {
// breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
mListener->Shutdown();
// Don't release the webrtc.org pointers yet until the Listener is (async) shutdown