Bug 1404997 - P23. Strongly enforced that our destination buffer is big enough. r?padenot draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Wed, 13 Dec 2017 18:44:30 +0100
changeset 712543 80976e983656e79ca823206824e7cd27433791f9
parent 712542 4587bcde88c0d089c9168ccabc9e0e8c89ed704d
child 712544 67033baaf5d156672b8e43b87507b88021329a71
push id93357
push userbmo:jyavenard@mozilla.com
push dateSun, 17 Dec 2017 09:29:04 +0000
reviewerspadenot
bugs1404997
milestone59.0a1
Bug 1404997 - P23. Strongly enforced that our destination buffer is big enough. r?padenot MozReview-Commit-ID: A1kLsH75SzX
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -710,28 +710,27 @@ WebrtcAudioConduit::SendAudioFrame(const
 MediaConduitErrorCode
 WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
                                   int32_t samplingFreqHz,
                                   int32_t capture_delay,
                                   int& lengthSamples)
 {
 
   CSFLogDebug(LOGTAG,  "%s ", __FUNCTION__);
-  unsigned int numSamples = 0;
 
   //validate params
   if(!speechData )
   {
     CSFLogError(LOGTAG,"%s Null Audio Buffer Pointer", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   // Validate sample length
-  if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0  )
+  if(GetNum10msSamplesForFrequency(samplingFreqHz) == 0)
   {
     CSFLogError(LOGTAG,"%s Invalid Sampling Frequency ", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   //validate capture time
   if(capture_delay < 0 )
@@ -744,35 +743,35 @@ WebrtcAudioConduit::GetAudioFrame(int16_
   //Conduit should have reception enabled before we ask for decoded
   // samples
   if(!mEngineReceiving)
   {
     CSFLogError(LOGTAG, "%s Engine not Receiving ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-
+  int lengthSamplesAllowed = lengthSamples;
   lengthSamples = 0;  //output paramter
 
   if (mPtrVoEXmedia->GetAudioFrame(mChannel,
                                    samplingFreqHz,
                                    &mAudioFrame) != 0) {
     int error = mPtrVoEBase->LastError();
     CSFLogError(LOGTAG,  "%s Getting audio data Failed %d", __FUNCTION__, error);
     if(error == VE_RUNTIME_PLAY_ERROR)
     {
       return kMediaConduitPlayoutError;
     }
     return kMediaConduitUnknownError;
   }
 
   // XXX Annoying, have to copy to our buffers -- refactor?
   lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
-  PodCopy(speechData, mAudioFrame.data_,
-          lengthSamples);
+  MOZ_RELEASE_ASSERT(lengthSamples <= lengthSamplesAllowed);
+  PodCopy(speechData, mAudioFrame.data_, lengthSamples);
 
   // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
   mSamples += lengthSamples;
   if (mSamples >= mLastSyncLog + samplingFreqHz) {
     int jitter_buffer_delay_ms;
     int playout_buffer_delay_ms;
     int avsync_offset_ms;
     if (GetAVStats(&jitter_buffer_delay_ms,
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -135,16 +135,17 @@ public:
 
   /**
    * Function to grab a decoded audio-sample from the media engine for rendering
    * / playoutof length 10 milliseconds.
    *
    * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
    * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
    * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
+   * @param lengthSamples [in]: Contain maximum length of speechData array.
    * @param lengthSamples [out]: Will contain length of the audio frame in samples at return.
                                  Ex: A value of 160 implies 160 samples each of 16-bits was copied
                                      into speechData
    * NOTE: This function should be invoked every 10 milliseconds for the best
    *          peformance
    * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the decoded samples are ready for reading and playout is enabled.
    *
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2216,19 +2216,21 @@ private:
   {
     uint32_t samplesPer10ms = mRate / 100;
     // Determine how many frames we need.
     // As we get frames from conduit_ at the same rate as the graph's rate,
     // the number of frames needed straightfully determined.
     TrackTicks framesNeeded = aDesiredTime - mPlayedTicks;
 
     while (framesNeeded >= 0) {
-      int16_t scratchBuffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+      const int scratchBufferLength =
+        AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t);
+      int16_t scratchBuffer[scratchBufferLength];
 
-      int samplesLength;
+      int samplesLength = scratchBufferLength;
 
       // This fetches 10ms of data, either mono or stereo
       MediaConduitErrorCode err =
         static_cast<AudioSessionConduit*>(mConduit.get())
           ->GetAudioFrame(scratchBuffer,
                           mRate,
                           0, // TODO(ekr@rtfm.com): better estimate of "capture"
                              // (really playout) delay
@@ -2243,18 +2245,17 @@ private:
                     mPlayedTicks,
                     aDesiredTime,
                     mSource->StreamTimeToSeconds(aDesiredTime));
         // if this is not enough we'll loop and provide more
         samplesLength = samplesPer10ms;
         PodArrayZero(scratchBuffer);
       }
 
-      MOZ_ASSERT(samplesLength * sizeof(uint16_t) <=
-                 AUDIO_SAMPLE_BUFFER_MAX_BYTES);
+      MOZ_RELEASE_ASSERT(samplesLength <= scratchBufferLength);
 
       CSFLogDebug(
         LOGTAG, "Audio conduit returned buffer of length %u", samplesLength);
 
       RefPtr<SharedBuffer> samples =
         SharedBuffer::Create(samplesLength * sizeof(uint16_t));
       int16_t* samplesData = static_cast<int16_t*>(samples->Data());
       AudioSegment segment;