--- a/dom/media/systemservices/CamerasChild.cpp
+++ b/dom/media/systemservices/CamerasChild.cpp
@@ -509,35 +509,45 @@ CamerasChild::RemoveCallback(const Captu
}
}
}
int
CamerasChild::StartCapture(CaptureEngine aCapEngine,
const int capture_id,
webrtc::VideoCaptureCapability& webrtcCaps,
+ webrtc::VideoCaptureCapability& webrtcConstraint,
FrameRelay* cb)
{
LOG((__PRETTY_FUNCTION__));
AddCallback(aCapEngine, capture_id, cb);
VideoCaptureCapability capCap(webrtcCaps.width,
webrtcCaps.height,
webrtcCaps.maxFPS,
webrtcCaps.expectedCaptureDelay,
webrtcCaps.rawType,
webrtcCaps.codecType,
webrtcCaps.interlaced);
+ VideoCaptureCapability capConstraint(webrtcConstraint.width,
+ webrtcConstraint.height,
+ webrtcConstraint.maxFPS,
+ webrtcConstraint.expectedCaptureDelay,
+ webrtcConstraint.rawType,
+ webrtcConstraint.codecType,
+ webrtcConstraint.interlaced);
nsCOMPtr<nsIRunnable> runnable = mozilla::
- NewNonOwningRunnableMethod<CaptureEngine, int, VideoCaptureCapability>(
+ NewNonOwningRunnableMethod<CaptureEngine, int,
+ VideoCaptureCapability, VideoCaptureCapability>(
"camera::PCamerasChild::SendStartCapture",
this,
&CamerasChild::SendStartCapture,
aCapEngine,
capture_id,
- capCap);
+ capCap,
+ capConstraint);
LockAndDispatch<> dispatcher(this, __func__, runnable);
return dispatcher.ReturnValue();
}
int
CamerasChild::StopCapture(CaptureEngine aCapEngine, const int capture_id)
{
LOG((__PRETTY_FUNCTION__));
--- a/dom/media/systemservices/CamerasChild.h
+++ b/dom/media/systemservices/CamerasChild.h
@@ -180,16 +180,17 @@ public:
// run on the MediaManager thread
int NumberOfCaptureDevices(CaptureEngine aCapEngine);
int NumberOfCapabilities(CaptureEngine aCapEngine,
const char* deviceUniqueIdUTF8);
int ReleaseCaptureDevice(CaptureEngine aCapEngine,
const int capture_id);
int StartCapture(CaptureEngine aCapEngine,
const int capture_id, webrtc::VideoCaptureCapability& capability,
+ webrtc::VideoCaptureCapability& constraint,
FrameRelay* func);
int StopCapture(CaptureEngine aCapEngine, const int capture_id);
int AllocateCaptureDevice(CaptureEngine aCapEngine,
const char* unique_idUTF8,
const unsigned int unique_idUTF8Length,
int& capture_id,
const mozilla::ipc::PrincipalInfo& aPrincipalInfo);
int GetCaptureCapability(CaptureEngine aCapEngine,
--- a/dom/media/systemservices/CamerasParent.cpp
+++ b/dom/media/systemservices/CamerasParent.cpp
@@ -786,43 +786,53 @@ CamerasParent::RecvReleaseCaptureDevice(
});
DispatchToVideoCaptureThread(webrtc_runnable);
return IPC_OK();
}
mozilla::ipc::IPCResult
CamerasParent::RecvStartCapture(const CaptureEngine& aCapEngine,
const int& capnum,
- const VideoCaptureCapability& ipcCaps)
+ const VideoCaptureCapability& ipcCaps,
+ const VideoCaptureCapability& ipcConstraint)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasParent> self(this);
RefPtr<Runnable> webrtc_runnable =
- media::NewRunnableFrom([self, aCapEngine, capnum, ipcCaps]() -> nsresult {
+ media::NewRunnableFrom([self, aCapEngine, capnum, ipcCaps, ipcConstraint]() -> nsresult {
LOG((__PRETTY_FUNCTION__));
CallbackHelper** cbh;
VideoEngine* engine = nullptr;
int error = -1;
if (self->EnsureInitialized(aCapEngine)) {
cbh = self->mCallbacks.AppendElement(
new CallbackHelper(static_cast<CaptureEngine>(aCapEngine), capnum, self));
engine = self->mEngines[aCapEngine];
- engine->WithEntry(capnum, [&engine, &error, &ipcCaps, &cbh](VideoEngine::CaptureEntry& cap) {
+ engine->WithEntry(capnum, [&engine, &error, &ipcCaps, &ipcConstraint, &cbh](VideoEngine::CaptureEntry& cap) {
error = 0;
webrtc::VideoCaptureCapability capability;
capability.width = ipcCaps.width();
capability.height = ipcCaps.height();
capability.maxFPS = ipcCaps.maxFPS();
capability.expectedCaptureDelay = ipcCaps.expectedCaptureDelay();
capability.rawType = static_cast<webrtc::RawVideoType>(ipcCaps.rawType());
capability.codecType = static_cast<webrtc::VideoCodecType>(ipcCaps.codecType());
capability.interlaced = ipcCaps.interlaced();
+ webrtc::VideoCaptureCapability constraint;
+ constraint.width = ipcConstraint.width();
+ constraint.height = ipcConstraint.height();
+ constraint.maxFPS = ipcConstraint.maxFPS();
+ constraint.expectedCaptureDelay = ipcConstraint.expectedCaptureDelay();
+ constraint.rawType = static_cast<webrtc::RawVideoType>(ipcConstraint.rawType());
+ constraint.codecType = static_cast<webrtc::VideoCodecType>(ipcConstraint.codecType());
+ constraint.interlaced = ipcConstraint.interlaced();
+ cap.VideoCapture()->SetConstraint(constraint);
if (!error) {
error = cap.VideoCapture()->StartCapture(capability);
}
if (!error) {
engine->Startup();
cap.VideoCapture()->RegisterCaptureDataCallback(static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(*cbh));
}
});
--- a/dom/media/systemservices/CamerasParent.h
+++ b/dom/media/systemservices/CamerasParent.h
@@ -91,16 +91,17 @@ public:
const int&) override;
virtual mozilla::ipc::IPCResult RecvNumberOfCaptureDevices(const CaptureEngine&) override;
virtual mozilla::ipc::IPCResult RecvNumberOfCapabilities(const CaptureEngine&,
const nsCString&) override;
virtual mozilla::ipc::IPCResult RecvGetCaptureCapability(const CaptureEngine&, const nsCString&,
const int&) override;
virtual mozilla::ipc::IPCResult RecvGetCaptureDevice(const CaptureEngine&, const int&) override;
virtual mozilla::ipc::IPCResult RecvStartCapture(const CaptureEngine&, const int&,
+ const VideoCaptureCapability&,
const VideoCaptureCapability&) override;
virtual mozilla::ipc::IPCResult RecvStopCapture(const CaptureEngine&, const int&) override;
virtual mozilla::ipc::IPCResult RecvReleaseFrame(mozilla::ipc::Shmem&&) override;
virtual mozilla::ipc::IPCResult RecvAllDone() override;
virtual void ActorDestroy(ActorDestroyReason aWhy) override;
virtual mozilla::ipc::IPCResult RecvEnsureInitialized(const CaptureEngine&) override;
nsIEventTarget* GetBackgroundEventTarget() { return mPBackgroundEventTarget; };
--- a/dom/media/systemservices/PCameras.ipdl
+++ b/dom/media/systemservices/PCameras.ipdl
@@ -75,17 +75,17 @@ parent:
async GetCaptureCapability(CaptureEngine engine, nsCString unique_idUTF8,
int capability_number);
async GetCaptureDevice(CaptureEngine engine, int num);
async AllocateCaptureDevice(CaptureEngine engine, nsCString unique_idUTF8,
PrincipalInfo principal);
async ReleaseCaptureDevice(CaptureEngine engine, int numdev);
- async StartCapture(CaptureEngine engine, int numdev, VideoCaptureCapability capability);
+ async StartCapture(CaptureEngine engine, int numdev, VideoCaptureCapability capability, VideoCaptureCapability constraint);
async StopCapture(CaptureEngine engine, int numdev);
// transfers frame back
async ReleaseFrame(Shmem s);
// Ask parent to delete us
async AllDone();
// setup camera engine
async EnsureInitialized(CaptureEngine engine);
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.h
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.h
@@ -114,16 +114,17 @@ protected:
bool mInitDone;
bool mHasDirectListeners;
int mCaptureIndex;
TrackID mTrackID;
webrtc::CaptureCapability mCapability;
+ webrtc::CaptureCapability mConstraint;
mutable nsTArray<webrtc::CaptureCapability> mHardcodedCapabilities;
private:
nsString mDeviceName;
nsCString mUniqueId;
nsString mFacingMode;
};
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -183,17 +183,17 @@ MediaEngineRemoteVideoSource::Start(Sour
mImageContainer =
layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
mState = kStarted;
mTrackID = aID;
if (mozilla::camera::GetChildAndCall(
&mozilla::camera::CamerasChild::StartCapture,
- mCapEngine, mCaptureIndex, mCapability, this)) {
+ mCapEngine, mCaptureIndex, mCapability, mConstraint, this)) {
LOG(("StartCapture failed"));
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
@@ -284,17 +284,17 @@ MediaEngineRemoteVideoSource::UpdateSing
break;
case kStarted:
if (mCapability != mLastCapability) {
camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
mCapEngine, mCaptureIndex);
if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
mCapEngine, mCaptureIndex, mCapability,
- this)) {
+ mConstraint, this)) {
LOG(("StartCapture failed"));
return NS_ERROR_FAILURE;
}
SetLastCapability(mCapability);
}
break;
default:
@@ -455,30 +455,27 @@ MediaEngineRemoteVideoSource::NumCapabil
bool
MediaEngineRemoteVideoSource::ChooseCapability(
const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId)
{
AssertIsOnOwningThread();
+ FlattenedConstraints c(aConstraints);
+ mConstraint.width = (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 |
+ (c.mWidth.mMax & 0xffff);
+ mConstraint.height = (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 |
+ (c.mHeight.mMax & 0xffff);
+ mConstraint.maxFPS = c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
+
switch(mMediaSource) {
case dom::MediaSourceEnum::Screen:
case dom::MediaSourceEnum::Window:
case dom::MediaSourceEnum::Application: {
- FlattenedConstraints c(aConstraints);
- // The actual resolution to constrain around is not easy to find ahead of
- // time (and may in fact change over time), so as a hack, we push ideal
- // and max constraints down to desktop_capture_impl.cc and finish the
- // algorithm there.
- mCapability.width = (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 |
- (c.mWidth.mMax & 0xffff);
- mCapability.height = (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 |
- (c.mHeight.mMax & 0xffff);
- mCapability.maxFPS = c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
return true;
}
default:
return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId);
}
}
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.h
@@ -154,16 +154,19 @@ class VideoCaptureModule: public rtc::Re
// Remove capture data callback
virtual void DeRegisterCaptureDataCallback() = 0;
// Start capture device
virtual int32_t StartCapture(
const VideoCaptureCapability& capability) = 0;
+ virtual void SetConstraint(
+ const VideoCaptureCapability& constraint) = 0;
+
virtual int32_t StopCapture() = 0;
// Returns the name of the device used by this module.
virtual const char* CurrentDeviceName() const = 0;
// Returns true if the capture device is running
virtual bool CaptureStarted() = 0;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
@@ -79,27 +79,33 @@ int32_t VideoCaptureImpl::RotationInDegr
return -1;
}
VideoCaptureImpl::VideoCaptureImpl()
: _deviceUniqueId(NULL),
_apiCs(*CriticalSectionWrapper::CreateCriticalSection()),
_captureDelay(0),
_requestedCapability(),
+ _constraint(),
_lastProcessTimeNanos(rtc::TimeNanos()),
_lastFrameRateCallbackTimeNanos(rtc::TimeNanos()),
_dataCallBack(NULL),
_lastProcessFrameTimeNanos(rtc::TimeNanos()),
_rotateFrame(kVideoRotation_0),
apply_rotation_(true) {
_requestedCapability.width = kDefaultWidth;
_requestedCapability.height = kDefaultHeight;
_requestedCapability.maxFPS = 30;
_requestedCapability.rawType = kVideoI420;
_requestedCapability.codecType = kVideoCodecUnknown;
+ _constraint.width = kDefaultWidth;
+ _constraint.height = kDefaultHeight;
+ _constraint.maxFPS = 30;
+ _constraint.rawType = kVideoI420;
+ _constraint.codecType = kVideoCodecUnknown;
memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
}
VideoCaptureImpl::~VideoCaptureImpl()
{
DeRegisterCaptureDataCallback();
delete &_apiCs;
@@ -190,17 +196,39 @@ int32_t VideoCaptureImpl::IncomingFrame(
return -1;
}
VideoFrame captureFrame(
buffer, 0, rtc::TimeMillis(),
!apply_rotation ? _rotateFrame : kVideoRotation_0);
captureFrame.set_ntp_time_ms(captureTime);
- DeliverCapturedFrame(captureFrame);
+ int32_t req_max_width = _constraint.width & 0xffff;
+ int32_t req_max_height = _constraint.height & 0xffff;
+ int32_t req_ideal_width = (_constraint.width >> 16) & 0xffff;
+ int32_t req_ideal_height = (_constraint.height >> 16) & 0xffff;
+
+ int32_t dest_max_width = std::min(req_max_width, target_width);
+ int32_t dest_max_height = std::min(req_max_height, target_height);
+ int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : target_width, dest_max_width);
+ int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : target_height, dest_max_height);
+
+ int dst_stride_y = dst_width;
+ int dst_stride_uv = (dst_width + 1) / 2;
+ if (dst_width == target_width && dst_height == target_height) {
+ DeliverCapturedFrame(captureFrame);
+ } else {
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer;
+ buffer = I420Buffer::Create(dst_width, dst_height, dst_stride_y,
+ dst_stride_uv, dst_stride_uv);
+
+ buffer->CropAndScaleFrom(*captureFrame.video_frame_buffer().get());
+ webrtc::VideoFrame scaledFrame(buffer, 0, 0, kVideoRotation_0);
+ DeliverCapturedFrame(scaledFrame);
+ }
}
else // Encoded format
{
assert(false);
return -1;
}
return 0;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.h
@@ -77,30 +77,35 @@ public:
int64_t captureTime = 0) override;
// Platform dependent
int32_t StartCapture(const VideoCaptureCapability& capability) override
{
_requestedCapability = capability;
return -1;
}
+ void SetConstraint(const VideoCaptureCapability& constraint) override
+ {
+ _constraint = constraint;
+ }
int32_t StopCapture() override { return -1; }
bool CaptureStarted() override {return false; }
int32_t CaptureSettings(VideoCaptureCapability& /*settings*/) override
{ return -1; }
protected:
VideoCaptureImpl();
virtual ~VideoCaptureImpl();
int32_t DeliverCapturedFrame(VideoFrame& captureFrame);
char* _deviceUniqueId; // current Device unique name;
CriticalSectionWrapper& _apiCs;
int32_t _captureDelay; // Current capture delay. May be changed of platform dependent parts.
VideoCaptureCapability _requestedCapability; // Should be set by platform dependent code in StartCapture.
+ VideoCaptureCapability _constraint;
private:
void UpdateFrameCount();
uint32_t CalculateFrameRate(int64_t now_ns);
// last time the module process function was called.
int64_t _lastProcessTimeNanos;
// last time the frame rate callback function was called.
int64_t _lastFrameRateCallbackTimeNanos;
--- a/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
@@ -431,17 +431,17 @@ int32_t DesktopCaptureImpl::Init(const c
return 0;
}
DesktopCaptureImpl::DesktopCaptureImpl(const int32_t id)
: _id(id),
_deviceUniqueId(""),
_apiCs(*CriticalSectionWrapper::CreateCriticalSection()),
- _requestedCapability(),
+ _constraint(),
_callBackCs(*CriticalSectionWrapper::CreateCriticalSection()),
_dataCallBack(NULL),
_rotateFrame(kVideoRotation_0),
last_capture_time_(rtc::TimeNanos()/rtc::kNumNanosecsPerMillisec),
// XXX Note that this won't capture drift!
delta_ntp_internal_ms_(Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() -
last_capture_time_),
time_event_(EventWrapper::Create()),
@@ -449,21 +449,21 @@ DesktopCaptureImpl::DesktopCaptureImpl(c
#if defined(_WIN32)
capturer_thread_(new rtc::PlatformUIThread(Run, this, "ScreenCaptureThread")),
#else
capturer_thread_(new rtc::PlatformThread(Run, this, "ScreenCaptureThread")),
#endif
started_(false) {
//-> TODO @@NG why is this crashing (seen on Linux)
//-> capturer_thread_->SetPriority(rtc::kHighPriority);
- _requestedCapability.width = kDefaultWidth;
- _requestedCapability.height = kDefaultHeight;
- _requestedCapability.maxFPS = 30;
- _requestedCapability.rawType = kVideoI420;
- _requestedCapability.codecType = kVideoCodecUnknown;
+ _constraint.width = kDefaultWidth;
+ _constraint.height = kDefaultHeight;
+ _constraint.maxFPS = 30;
+ _constraint.rawType = kVideoI420;
+ _constraint.codecType = kVideoCodecUnknown;
memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
}
DesktopCaptureImpl::~DesktopCaptureImpl() {
time_event_->Set();
capturer_thread_->Stop();
DeRegisterCaptureDataCallback();
@@ -567,20 +567,20 @@ int32_t DesktopCaptureImpl::IncomingFram
webrtc::VideoFrame captureFrame(buffer, 0, 0, kVideoRotation_0);
if (conversionResult < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
"Failed to convert capture frame from type %d to I420",
frameInfo.rawType);
return -1;
}
- int32_t req_max_width = _requestedCapability.width & 0xffff;
- int32_t req_max_height = _requestedCapability.height & 0xffff;
- int32_t req_ideal_width = (_requestedCapability.width >> 16) & 0xffff;
- int32_t req_ideal_height = (_requestedCapability.height >> 16) & 0xffff;
+ int32_t req_max_width = _constraint.width & 0xffff;
+ int32_t req_max_height = _constraint.height & 0xffff;
+ int32_t req_ideal_width = (_constraint.width >> 16) & 0xffff;
+ int32_t req_ideal_height = (_constraint.height >> 16) & 0xffff;
int32_t dest_max_width = std::min(req_max_width, target_width);
int32_t dest_max_height = std::min(req_max_height, target_height);
int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : target_width, dest_max_width);
int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : target_height, dest_max_height);
// scale to average of portrait and landscape
float scale_width = (float)dst_width / (float)target_width;
@@ -677,30 +677,33 @@ uint32_t DesktopCaptureImpl::CalculateFr
{
return uint32_t((nrOfFrames * 1000.0f / diff) + 0.5f);
}
}
return nrOfFrames;
}
-int32_t DesktopCaptureImpl::StartCapture(const VideoCaptureCapability& capability) {
- _requestedCapability = capability;
+int32_t DesktopCaptureImpl::StartCapture(const VideoCaptureCapability& capability /*ignored*/) {
#if defined(_WIN32)
- uint32_t maxFPSNeeded = 1000/_requestedCapability.maxFPS;
+ uint32_t maxFPSNeeded = 1000/_constraint.maxFPS;
capturer_thread_->RequestCallbackTimer(maxFPSNeeded);
#endif
desktop_capturer_cursor_composer_->Start(this);
capturer_thread_->Start();
started_ = true;
return 0;
}
+void DesktopCaptureImpl::SetConstraint(const VideoCaptureCapability& constraint) {
+ _constraint = constraint;
+}
+
int32_t DesktopCaptureImpl::StopCapture() {
if (started_) {
capturer_thread_->Stop(); // thread is guaranteed stopped before this returns
desktop_capturer_cursor_composer_->Stop();
started_ = false;
return 0;
}
return -1;
@@ -739,16 +742,16 @@ void DesktopCaptureImpl::process() {
#endif
desktop_capturer_cursor_composer_->CaptureFrame();
#if !defined(_WIN32)
const uint32_t processTime =
((uint32_t)(rtc::TimeNanos() - startProcessTime))/rtc::kNumNanosecsPerMillisec;
// Use at most x% CPU or limit framerate
- const uint32_t maxFPSNeeded = 1000/_requestedCapability.maxFPS;
+ const uint32_t maxFPSNeeded = 1000/_constraint.maxFPS;
const float sleepTimeFactor = (100.0f / kMaxDesktopCaptureCpuUsage) - 1.0f;
const uint32_t sleepTime = sleepTimeFactor * processTime;
time_event_->Wait(std::max<uint32_t>(maxFPSNeeded, sleepTime));
#endif
}
} // namespace webrtc
--- a/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
+++ b/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
@@ -185,32 +185,33 @@ public:
// |capture_time| must be specified in the NTP time format in milliseconds.
virtual int32_t IncomingFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime = 0) override;
// Platform dependent
virtual int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ virtual void SetConstraint(const VideoCaptureCapability& constraint) override;
virtual int32_t StopCapture() override;
virtual bool CaptureStarted() override;
virtual int32_t CaptureSettings(VideoCaptureCapability& settings) override;
protected:
DesktopCaptureImpl(const int32_t id);
virtual ~DesktopCaptureImpl();
int32_t DeliverCapturedFrame(webrtc::VideoFrame& captureFrame,
int64_t capture_time);
static const uint32_t kMaxDesktopCaptureCpuUsage = 50; // maximum CPU usage in %
int32_t _id; // Module ID
std::string _deviceUniqueId; // current Device unique name;
CriticalSectionWrapper& _apiCs;
- VideoCaptureCapability _requestedCapability; // Should be set by platform dependent code in StartCapture.
+ VideoCaptureCapability _constraint;
private:
void UpdateFrameCount();
uint32_t CalculateFrameRate(int64_t now_ns);
CriticalSectionWrapper& _callBackCs;
rtc::VideoSinkInterface<VideoFrame>* _dataCallBack;