--- a/gfx/layers/Layers.cpp
+++ b/gfx/layers/Layers.cpp
@@ -1112,17 +1112,18 @@ ContainerLayer::ContainerLayer(LayerMana
mInheritedYScale(1.0f),
mPresShellResolution(1.0f),
mScaleToResolution(false),
mUseIntermediateSurface(false),
mSupportsComponentAlphaChildren(false),
mMayHaveReadbackChild(false),
mChildrenChanged(false),
mEventRegionsOverride(EventRegionsOverride::NoOverride),
- mVRDeviceID(0)
+ mVRDeviceID(0),
+ mInputFrameID(0)
{
MOZ_COUNT_CTOR(ContainerLayer);
mContentFlags = 0; // Clear NO_TEXT, NO_TEXT_OVER_TRANSPARENT
}
ContainerLayer::~ContainerLayer()
{
MOZ_COUNT_DTOR(ContainerLayer);
@@ -1274,17 +1275,18 @@ ContainerLayer::RepositionChild(Layer* a
void
ContainerLayer::FillSpecificAttributes(SpecificLayerAttributes& aAttrs)
{
aAttrs = ContainerLayerAttributes(mPreXScale, mPreYScale,
mInheritedXScale, mInheritedYScale,
mPresShellResolution, mScaleToResolution,
mEventRegionsOverride,
- mVRDeviceID);
+ mVRDeviceID,
+ mInputFrameID);
}
bool
ContainerLayer::Creates3DContextWithExtendingChildren()
{
if (Extend3DContext()) {
return false;
}
@@ -2141,17 +2143,17 @@ ContainerLayer::PrintInfo(std::stringstr
}
if (mEventRegionsOverride & EventRegionsOverride::ForceDispatchToContent) {
aStream << " [force-dtc]";
}
if (mEventRegionsOverride & EventRegionsOverride::ForceEmptyHitRegion) {
aStream << " [force-ehr]";
}
if (mVRDeviceID) {
- aStream << nsPrintfCString(" [hmd=%lu]", mVRDeviceID).get();
+ aStream << nsPrintfCString(" [hmd=%lu] [hmdframe=%l]", mVRDeviceID, mInputFrameID).get();
}
}
void
ContainerLayer::DumpPacket(layerscope::LayersPacket* aPacket, const void* aParent)
{
Layer::DumpPacket(aPacket, aParent);
// Get this layer data
--- a/gfx/layers/Layers.h
+++ b/gfx/layers/Layers.h
@@ -2138,20 +2138,28 @@ public:
return mEventRegionsOverride;
}
/**
* VR
*/
void SetVRDeviceID(uint32_t aVRDeviceID) {
mVRDeviceID = aVRDeviceID;
+ Mutated();
}
uint32_t GetVRDeviceID() {
return mVRDeviceID;
}
+ void SetInputFrameID(int32_t aInputFrameID) {
+ mInputFrameID = aInputFrameID;
+ Mutated();
+ }
+ int32_t GetInputFrameID() {
+ return mInputFrameID;
+ }
/**
* Replace the current effective transform with the given one,
* returning the old one. This is currently added as a hack for VR
* rendering, and might go away if we find a better way to do this.
* If you think you have a need for this method, talk with
* vlad/mstange/mwoodrow first.
*/
@@ -2218,16 +2226,17 @@ protected:
bool mUseIntermediateSurface;
bool mSupportsComponentAlphaChildren;
bool mMayHaveReadbackChild;
// This is updated by ComputeDifferences. This will be true if we need to invalidate
// the intermediate surface.
bool mChildrenChanged;
EventRegionsOverride mEventRegionsOverride;
uint32_t mVRDeviceID;
+ int32_t mInputFrameID;
};
/**
* A Layer which just renders a solid color in its visible region. It actually
* can fill any area that contains the visible region, so if you need to
* restrict the area filled, set a clip region on this layer.
*/
class ColorLayer : public Layer {
--- a/gfx/layers/client/CanvasClient.cpp
+++ b/gfx/layers/client/CanvasClient.cpp
@@ -20,16 +20,17 @@
#include "mozilla/layers/GrallocTextureClient.h"
#include "mozilla/layers/LayersTypes.h"
#include "mozilla/layers/TextureClient.h" // for TextureClient, etc
#include "mozilla/layers/TextureClientOGL.h"
#include "nsAutoPtr.h" // for nsRefPtr
#include "nsDebug.h" // for printf_stderr, NS_ASSERTION
#include "nsXULAppAPI.h" // for XRE_GetProcessType, etc
#include "TextureClientSharedSurface.h"
+#include "VRManagerChild.h"
using namespace mozilla::gfx;
using namespace mozilla::gl;
namespace mozilla {
namespace layers {
/* static */ already_AddRefed<CanvasClient>
@@ -120,16 +121,17 @@ CanvasClient2D::Update(gfx::IntSize aSiz
}
if (updated) {
AutoTArray<CompositableForwarder::TimedTextureClient,1> textures;
CompositableForwarder::TimedTextureClient* t = textures.AppendElement();
t->mTextureClient = mBuffer;
t->mPictureRect = nsIntRect(nsIntPoint(0, 0), mBuffer->GetSize());
t->mFrameID = mFrameID;
+ t->mInputFrameID = VRManagerChild::Get()->GetInputFrameID();
GetForwarder()->UseTextures(this, textures);
mBuffer->SyncWithObject(GetForwarder()->GetSyncObject());
}
}
already_AddRefed<TextureClient>
CanvasClient2D::CreateTextureClientForCanvas(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
@@ -476,16 +478,21 @@ CanvasClientSharedSurface::Updated()
// Add the new TexClient.
MOZ_ALWAYS_TRUE( AddTextureClient(mFront) );
AutoTArray<CompositableForwarder::TimedTextureClient,1> textures;
CompositableForwarder::TimedTextureClient* t = textures.AppendElement();
t->mTextureClient = mFront;
t->mPictureRect = nsIntRect(nsIntPoint(0, 0), mFront->GetSize());
t->mFrameID = mFrameID;
+ // XXX TODO - This reference to VRManagerChild will be moved with the
+ // implementation of the WebVR 1.0 API, which will enable
+ // the inputFrameID to be passed through Javascript with
+ // the new VRDisplay API.
+ t->mInputFrameID = VRManagerChild::Get()->GetInputFrameID();
forwarder->UseTextures(this, textures);
}
void
CanvasClientSharedSurface::ClearSurfaces()
{
mFront = nullptr;
mNewFront = nullptr;
--- a/gfx/layers/composite/CompositableHost.h
+++ b/gfx/layers/composite/CompositableHost.h
@@ -189,16 +189,17 @@ public:
virtual void PrintInfo(std::stringstream& aStream, const char* aPrefix) = 0;
struct TimedTexture {
RefPtr<TextureHost> mTexture;
TimeStamp mTimeStamp;
gfx::IntRect mPictureRect;
int32_t mFrameID;
int32_t mProducerID;
+ int32_t mInputFrameID;
};
virtual void UseTextureHost(const nsTArray<TimedTexture>& aTextures);
virtual void UseComponentAlphaTextures(TextureHost* aTextureOnBlack,
TextureHost* aTextureOnWhite);
virtual void UseOverlaySource(OverlaySource aOverlay,
const gfx::IntRect& aPictureRect) { }
virtual void RemoveTextureHost(TextureHost* aTexture);
@@ -230,16 +231,18 @@ public:
virtual bool Lock() { return false; }
virtual void Unlock() { }
virtual already_AddRefed<TexturedEffect> GenEffect(const gfx::Filter& aFilter) {
return nullptr;
}
+ virtual int32_t GetLastInputFrameID() const { return -1; }
+
protected:
TextureInfo mTextureInfo;
uint64_t mAsyncID;
uint64_t mCompositorID;
RefPtr<Compositor> mCompositor;
Layer* mLayer;
uint32_t mFlashCounter; // used when the pref "layers.flash-borders" is true.
bool mAttached;
--- a/gfx/layers/composite/ContainerLayerComposite.cpp
+++ b/gfx/layers/composite/ContainerLayerComposite.cpp
@@ -134,18 +134,21 @@ struct PreparedLayer
RenderTargetIntRect mClipRect;
};
template<class ContainerT> void
ContainerRenderVR(ContainerT* aContainer,
LayerManagerComposite* aManager,
const gfx::IntRect& aClipRect,
- RefPtr<gfx::VRHMDInfo> aHMD)
+ RefPtr<gfx::VRHMDInfo> aHMD,
+ int32_t aInputFrameID)
{
+ int32_t inputFrameID = -1;
+
RefPtr<CompositingRenderTarget> surface;
Compositor* compositor = aManager->GetCompositor();
RefPtr<CompositingRenderTarget> previousTarget = compositor->GetCurrentRenderTarget();
float opacity = aContainer->GetEffectiveOpacity();
@@ -260,16 +263,24 @@ ContainerRenderVR(ContainerT* aContainer
}
// XXX these are both clip rects, which end up as scissor rects in the compositor. So we just
// pass the full target surface rect here.
layerToRender->Prepare(RenderTargetIntRect(surfaceRect.x, surfaceRect.y,
surfaceRect.width, surfaceRect.height));
layerToRender->RenderLayer(surfaceRect);
+ CompositableHost *ch = layerToRender->GetCompositableHost();
+ if (ch) {
+ int32_t compositableInputFrameID = ch->GetLastInputFrameID();
+ if (compositableInputFrameID != -1) {
+ inputFrameID = compositableInputFrameID;
+ }
+ }
+
if (restoreTransform) {
layer->ReplaceEffectiveTransform(childTransform);
}
} else {
// Gecko-rendered CSS VR -- not supported yet, so just don't render this layer!
}
}
@@ -277,17 +288,17 @@ ContainerRenderVR(ContainerT* aContainer
// Now put back the original transfom on this container
aContainer->ReplaceEffectiveTransform(origTransform);
// then bind the original target and draw with distortion
compositor->SetRenderTarget(previousTarget);
if (vrRendering) {
- vrRendering->SubmitFrame(aContainer->mVRRenderTargetSet);
+ vrRendering->SubmitFrame(aContainer->mVRRenderTargetSet, inputFrameID);
DUMP("<<< ContainerRenderVR [used vrRendering] [%p]\n", aContainer);
if (!gfxPrefs::VRMirrorTextures()) {
return;
}
}
gfx::Rect rect(surfaceRect.x, surfaceRect.y, surfaceRect.width, surfaceRect.height);
gfx::Rect clipRect(aClipRect.x, aClipRect.y, aClipRect.width, aClipRect.height);
@@ -691,17 +702,17 @@ template<class ContainerT> void
ContainerRender(ContainerT* aContainer,
LayerManagerComposite* aManager,
const gfx::IntRect& aClipRect)
{
MOZ_ASSERT(aContainer->mPrepared);
RefPtr<gfx::VRHMDInfo> hmdInfo = gfx::VRManager::Get()->GetDevice(aContainer->GetVRDeviceID());
if (hmdInfo && hmdInfo->GetConfiguration().IsValid()) {
- ContainerRenderVR(aContainer, aManager, aClipRect, hmdInfo);
+ ContainerRenderVR(aContainer, aManager, aClipRect, hmdInfo, aContainer->GetInputFrameID());
aContainer->mPrepared = nullptr;
return;
}
if (aContainer->UseIntermediateSurface()) {
RefPtr<CompositingRenderTarget> surface;
if (aContainer->mPrepared->mNeedsSurfaceCopy) {
--- a/gfx/layers/composite/ImageHost.cpp
+++ b/gfx/layers/composite/ImageHost.cpp
@@ -27,16 +27,17 @@ namespace layers {
class ISurfaceAllocator;
ImageHost::ImageHost(const TextureInfo& aTextureInfo)
: CompositableHost(aTextureInfo)
, mImageContainer(nullptr)
, mLastFrameID(-1)
, mLastProducerID(-1)
+ , mLastInputFrameID(-1)
, mBias(BIAS_NONE)
, mLocked(false)
{}
ImageHost::~ImageHost()
{
SetImageContainer(nullptr);
}
@@ -79,16 +80,17 @@ ImageHost::UseTextureHost(const nsTArray
mImages.RemoveElementAt(i);
break;
}
}
img.mTimeStamp = t.mTimeStamp;
img.mPictureRect = t.mPictureRect;
img.mFrameID = t.mFrameID;
img.mProducerID = t.mProducerID;
+ img.mInputFrameID = t.mInputFrameID;
}
// Recycle any leftover mTextureSources and call PrepareTextureSource on all
// images.
for (auto& img : newImages) {
if (!img.mTextureSource && !mImages.IsEmpty()) {
img.mTextureSource = mImages.LastElement().mTextureSource;
mImages.RemoveElementAt(mImages.Length() - 1);
}
@@ -352,16 +354,17 @@ ImageHost::Composite(LayerComposite* aLa
aLayer->GetLayerManager()->
AppendImageCompositeNotification(ImageCompositeNotification(
mImageContainer, nullptr,
img->mTimeStamp, GetCompositor()->GetCompositionTime(),
img->mFrameID, img->mProducerID));
}
mLastFrameID = img->mFrameID;
mLastProducerID = img->mProducerID;
+ mLastInputFrameID = img->mInputFrameID;
}
aEffectChain.mPrimaryEffect = effect;
gfx::Rect pictureRect(0, 0, img->mPictureRect.width, img->mPictureRect.height);
BigImageIterator* it = img->mTextureSource->AsBigImageIterator();
if (it) {
// This iteration does not work if we have multiple texture sources here
// (e.g. 3 YCbCr textures). There's nothing preventing the different
--- a/gfx/layers/composite/ImageHost.h
+++ b/gfx/layers/composite/ImageHost.h
@@ -95,16 +95,17 @@ public:
int32_t GetProducerID()
{
const TimedImage* img = ChooseImage();
return img ? img->mProducerID : -1;
}
int32_t GetLastFrameID() const { return mLastFrameID; }
int32_t GetLastProducerID() const { return mLastProducerID; }
+ virtual int32_t GetLastInputFrameID() const override { return mLastInputFrameID; }
enum Bias {
// Don't apply bias to frame times
BIAS_NONE,
// Apply a negative bias to frame times to keep them before the vsync time
BIAS_NEGATIVE,
// Apply a positive bias to frame times to keep them after the vsync time
BIAS_POSITIVE,
@@ -113,16 +114,17 @@ public:
protected:
struct TimedImage {
CompositableTextureHostRef mFrontBuffer;
CompositableTextureSourceRef mTextureSource;
TimeStamp mTimeStamp;
gfx::IntRect mPictureRect;
int32_t mFrameID;
int32_t mProducerID;
+ int32_t mInputFrameID;
};
/**
* ChooseImage is guaranteed to return the same TimedImage every time it's
* called during the same composition, up to the end of Composite() ---
* it depends only on mImages, mCompositor->GetCompositionTime(), and mBias.
* mBias is updated at the end of Composite().
*/
@@ -130,16 +132,17 @@ protected:
TimedImage* ChooseImage();
int ChooseImageIndex() const;
nsTArray<TimedImage> mImages;
// Weak reference, will be null if mImageContainer has been destroyed.
ImageContainerParent* mImageContainer;
int32_t mLastFrameID;
int32_t mLastProducerID;
+ int32_t mLastInputFrameID;
/**
* Bias to apply to the next frame.
*/
Bias mBias;
bool mLocked;
RefPtr<ImageHostOverlay> mImageHostOverlay;
--- a/gfx/layers/ipc/CompositableForwarder.h
+++ b/gfx/layers/ipc/CompositableForwarder.h
@@ -108,23 +108,24 @@ public:
* Only ImageBridge implements it.
*/
virtual void RemoveTextureFromCompositableAsync(AsyncTransactionTracker* aAsyncTransactionTracker,
CompositableClient* aCompositable,
TextureClient* aTexture) {}
struct TimedTextureClient {
TimedTextureClient()
- : mTextureClient(nullptr), mFrameID(0), mProducerID(0) {}
+ : mTextureClient(nullptr), mFrameID(0), mProducerID(0), mInputFrameID(0) {}
TextureClient* mTextureClient;
TimeStamp mTimeStamp;
nsIntRect mPictureRect;
int32_t mFrameID;
int32_t mProducerID;
+ int32_t mInputFrameID;
};
/**
* Tell the CompositableHost on the compositor side what textures to use for
* the next composition.
*/
virtual void UseTextures(CompositableClient* aCompositable,
const nsTArray<TimedTextureClient>& aTextures) = 0;
virtual void UseComponentAlphaTextures(CompositableClient* aCompositable,
--- a/gfx/layers/ipc/CompositableTransactionParent.cpp
+++ b/gfx/layers/ipc/CompositableTransactionParent.cpp
@@ -177,16 +177,17 @@ CompositableParentManager::ReceiveCompos
CompositableHost::TimedTexture* t = textures.AppendElement();
t->mTexture =
TextureHost::AsTextureHost(timedTexture.textureParent());
MOZ_ASSERT(t->mTexture);
t->mTimeStamp = timedTexture.timeStamp();
t->mPictureRect = timedTexture.picture();
t->mFrameID = timedTexture.frameID();
t->mProducerID = timedTexture.producerID();
+ t->mInputFrameID = timedTexture.inputFrameID();
MOZ_ASSERT(ValidatePictureRect(t->mTexture->GetSize(), t->mPictureRect));
MaybeFence maybeFence = timedTexture.fence();
if (maybeFence.type() == MaybeFence::TFenceHandle) {
FenceHandle fence = maybeFence.get_FenceHandle();
if (fence.IsValid()) {
t->mTexture->SetAcquireFenceHandle(fence);
}
--- a/gfx/layers/ipc/ImageBridgeChild.cpp
+++ b/gfx/layers/ipc/ImageBridgeChild.cpp
@@ -186,17 +186,17 @@ ImageBridgeChild::UseTextures(Compositab
if (!t.mTextureClient->IsSharedWithCompositor()) {
return;
}
FenceHandle fence = t.mTextureClient->GetAcquireFenceHandle();
textures.AppendElement(TimedTexture(nullptr, t.mTextureClient->GetIPDLActor(),
fence.IsValid() ? MaybeFence(fence) : MaybeFence(null_t()),
t.mTimeStamp, t.mPictureRect,
- t.mFrameID, t.mProducerID));
+ t.mFrameID, t.mProducerID, t.mInputFrameID));
}
mTxn->AddNoSwapEdit(OpUseTexture(nullptr, aCompositable->GetIPDLActor(),
textures));
}
void
ImageBridgeChild::UseComponentAlphaTextures(CompositableClient* aCompositable,
TextureClient* aTextureOnBlack,
--- a/gfx/layers/ipc/LayerTransactionParent.cpp
+++ b/gfx/layers/ipc/LayerTransactionParent.cpp
@@ -409,16 +409,17 @@ LayerTransactionParent::RecvUpdate(Infal
}
const ContainerLayerAttributes& attrs =
specific.get_ContainerLayerAttributes();
containerLayer->SetPreScale(attrs.preXScale(), attrs.preYScale());
containerLayer->SetInheritedScale(attrs.inheritedXScale(), attrs.inheritedYScale());
containerLayer->SetScaleToResolution(attrs.scaleToResolution(),
attrs.presShellResolution());
containerLayer->SetEventRegionsOverride(attrs.eventRegionsOverride());
+ containerLayer->SetInputFrameID(attrs.inputFrameID());
if (attrs.hmdDeviceID()) {
containerLayer->SetVRDeviceID(attrs.hmdDeviceID());
}
break;
}
case Specific::TColorLayerAttributes: {
--- a/gfx/layers/ipc/LayersMessages.ipdlh
+++ b/gfx/layers/ipc/LayersMessages.ipdlh
@@ -247,16 +247,17 @@ struct ContainerLayerAttributes {
float preXScale;
float preYScale;
float inheritedXScale;
float inheritedYScale;
float presShellResolution;
bool scaleToResolution;
EventRegionsOverride eventRegionsOverride;
uint32_t hmdDeviceID;
+ int32_t inputFrameID;
};
struct ColorLayerAttributes { LayerColor color; IntRect bounds; };
struct CanvasLayerAttributes { Filter filter; IntRect bounds; };
struct RefLayerAttributes {
int64_t id;
// TODO: Once bug 1132895 is fixed we shouldn't need to propagate the override
// explicitly here.
EventRegionsOverride eventRegionsOverride;
@@ -391,16 +392,17 @@ union MaybeFence {
struct TimedTexture {
PTexture texture;
MaybeFence fence;
TimeStamp timeStamp;
IntRect picture;
uint32_t frameID;
uint32_t producerID;
+ int32_t inputFrameID;
};
/**
* Tells the compositor-side which textures to use (for example, as front buffer
* if there are several textures for double buffering).
* This provides a list of textures with timestamps, ordered by timestamp.
* The newest texture whose timestamp is <= the current time is rendered
* (where null is considered less than every other timestamp). If there is no
--- a/gfx/layers/ipc/ShadowLayers.cpp
+++ b/gfx/layers/ipc/ShadowLayers.cpp
@@ -391,17 +391,17 @@ ShadowLayerForwarder::UseTextures(Compos
for (auto& t : aTextures) {
MOZ_ASSERT(t.mTextureClient);
MOZ_ASSERT(t.mTextureClient->GetIPDLActor());
FenceHandle fence = t.mTextureClient->GetAcquireFenceHandle();
textures.AppendElement(TimedTexture(nullptr, t.mTextureClient->GetIPDLActor(),
fence.IsValid() ? MaybeFence(fence) : MaybeFence(null_t()),
t.mTimeStamp, t.mPictureRect,
- t.mFrameID, t.mProducerID));
+ t.mFrameID, t.mProducerID, t.mInputFrameID));
if ((t.mTextureClient->GetFlags() & TextureFlags::IMMEDIATE_UPLOAD)
&& t.mTextureClient->HasInternalBuffer()) {
// We use IMMEDIATE_UPLOAD when we want to be sure that the upload cannot
// race with updates on the main thread. In this case we want the transaction
// to be synchronous.
mTxn->MarkSyncTransaction();
}
--- a/gfx/vr/gfxVR.h
+++ b/gfx/vr/gfxVR.h
@@ -156,16 +156,17 @@ struct VRDeviceInfo
return !(*this == other);
}
};
struct VRHMDSensorState {
double timestamp;
+ int32_t inputFrameID;
VRStateValidFlags flags;
float orientation[4];
float position[3];
float angularVelocity[3];
float angularAcceleration[3];
float linearVelocity[3];
float linearAcceleration[3];
@@ -239,17 +240,17 @@ public:
virtual already_AddRefed<layers::CompositingRenderTarget> GetNextRenderTarget() = 0;
protected:
virtual ~RenderTargetSet();
};
virtual already_AddRefed<RenderTargetSet> CreateRenderTargetSet(layers::Compositor *aCompositor, const IntSize& aSize) = 0;
virtual void DestroyRenderTargetSet(RenderTargetSet *aRTSet) = 0;
- virtual void SubmitFrame(RenderTargetSet *aRTSet) = 0;
+ virtual void SubmitFrame(RenderTargetSet *aRTSet, int32_t aInputFrameID) = 0;
protected:
VRHMDRenderingSupport() { }
};
class VRHMDInfo {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VRHMDInfo)
--- a/gfx/vr/gfxVROculus.cpp
+++ b/gfx/vr/gfxVROculus.cpp
@@ -241,16 +241,17 @@ FromFovPort(const ovrFovPort& aFOV)
return fovInfo;
}
} // namespace
HMDInfoOculus::HMDInfoOculus(ovrSession aSession)
: VRHMDInfo(VRHMDType::Oculus, false)
, mSession(aSession)
+ , mInputFrameID(0)
{
MOZ_ASSERT(sizeof(HMDInfoOculus::DistortionVertex) == sizeof(VRDistortionVertex),
"HMDInfoOculus::DistortionVertex must match the size of VRDistortionVertex");
MOZ_COUNT_CTOR_INHERITED(HMDInfoOculus, VRHMDInfo);
mDeviceInfo.mDeviceName.AssignLiteral("Oculus VR HMD");
@@ -340,17 +341,17 @@ HMDInfoOculus::KeepSensorTracking()
{
// Oculus PC SDK 0.8 and newer enable tracking by default
return true;
}
void
HMDInfoOculus::NotifyVsync(const mozilla::TimeStamp& aVsyncTimestamp)
{
-
+ ++mInputFrameID;
}
void
HMDInfoOculus::ZeroSensor()
{
ovr_RecenterPose(mSession);
}
@@ -513,17 +514,17 @@ HMDInfoOculus::CreateRenderTargetSet(lay
void
HMDInfoOculus::DestroyRenderTargetSet(RenderTargetSet *aRTSet)
{
RenderTargetSetOculus *rts = static_cast<RenderTargetSetOculus*>(aRTSet);
rts->Destroy();
}
void
-HMDInfoOculus::SubmitFrame(RenderTargetSet *aRTSet)
+HMDInfoOculus::SubmitFrame(RenderTargetSet *aRTSet, int32_t aInputFrameID)
{
RenderTargetSetOculus *rts = static_cast<RenderTargetSetOculus*>(aRTSet);
MOZ_ASSERT(rts->hmd != nullptr);
MOZ_ASSERT(rts->textureSet != nullptr);
ovrLayerEyeFov layer;
layer.Header.Type = ovrLayerType_EyeFov;
layer.Header.Flags = 0;
@@ -542,17 +543,17 @@ HMDInfoOculus::SubmitFrame(RenderTargetS
const Point3D& l = rts->hmd->mDeviceInfo.mEyeTranslation[0];
const Point3D& r = rts->hmd->mDeviceInfo.mEyeTranslation[1];
const ovrVector3f hmdToEyeViewOffset[2] = { { l.x, l.y, l.z },
{ r.x, r.y, r.z } };
do_CalcEyePoses(rts->hmd->mLastTrackingState.HeadPose.ThePose, hmdToEyeViewOffset, layer.RenderPose);
ovrLayerHeader *layers = &layer.Header;
- ovrResult orv = ovr_SubmitFrame(mSession, 0, nullptr, &layers, 1);
+ ovrResult orv = ovr_SubmitFrame(mSession, aInputFrameID, nullptr, &layers, 1);
//printf_stderr("Submitted frame %d, result: %d\n", rts->textureSet->CurrentIndex, orv);
if (orv != ovrSuccess) {
// not visible? failed?
}
}
/*static*/ already_AddRefed<VRHMDManagerOculus>
VRHMDManagerOculus::Create()
--- a/gfx/vr/gfxVROculus.h
+++ b/gfx/vr/gfxVROculus.h
@@ -40,17 +40,17 @@ public:
VRHMDRenderingSupport* GetRenderingSupport() override { return this; }
void Destroy();
/* VRHMDRenderingSupport */
already_AddRefed<RenderTargetSet> CreateRenderTargetSet(layers::Compositor *aCompositor, const IntSize& aSize) override;
void DestroyRenderTargetSet(RenderTargetSet *aRTSet) override;
- void SubmitFrame(RenderTargetSet *aRTSet) override;
+ void SubmitFrame(RenderTargetSet *aRTSet, int32_t aInputFrameID) override;
ovrSession GetOculusSession() const { return mSession; }
protected:
virtual ~HMDInfoOculus() {
Destroy();
MOZ_COUNT_DTOR_INHERITED(HMDInfoOculus, VRHMDInfo);
}
@@ -63,16 +63,17 @@ protected:
float texB[2];
float genericAttribs[4];
};
ovrSession mSession;
ovrHmdDesc mDesc;
ovrFovPort mFOVPort[2];
ovrTrackingState mLastTrackingState;
+ int mInputFrameID;
};
} // namespace impl
class VRHMDManagerOculus : public VRHMDManager
{
public:
static already_AddRefed<VRHMDManagerOculus> Create();
--- a/gfx/vr/ipc/VRManagerChild.cpp
+++ b/gfx/vr/ipc/VRManagerChild.cpp
@@ -19,16 +19,17 @@ namespace gfx {
static StaticRefPtr<VRManagerChild> sVRManagerChildSingleton;
static StaticRefPtr<VRManagerParent> sVRManagerParentSingleton;
void ReleaseVRManagerParentSingleton() {
sVRManagerParentSingleton = nullptr;
}
VRManagerChild::VRManagerChild()
+ : mInputFrameID(-1)
{
MOZ_COUNT_CTOR(VRManagerChild);
MOZ_ASSERT(NS_IsMainThread());
}
VRManagerChild::~VRManagerChild()
{
MOZ_ASSERT(NS_IsMainThread());
@@ -152,16 +153,17 @@ bool
VRManagerChild::RecvUpdateDeviceSensors(nsTArray<VRSensorUpdate>&& aDeviceSensorUpdates)
{
// mDevices could be a hashed container for more scalability, but not worth
// it now as we expect < 10 entries.
for (auto& sensorUpdate: aDeviceSensorUpdates) {
for (auto& device: mDevices) {
if (device->GetDeviceInfo().GetDeviceID() == sensorUpdate.mDeviceID) {
device->UpdateSensorState(sensorUpdate.mSensorState);
+ mInputFrameID = sensorUpdate.mSensorState.inputFrameID;
break;
}
}
}
return true;
}
@@ -177,10 +179,16 @@ VRManagerChild::RefreshVRDevicesWithCall
{
bool success = SendRefreshDevices();
if (success) {
mNavigatorCallbacks.AppendElement(aNavigator);
}
return success;
}
+int
+VRManagerChild::GetInputFrameID()
+{
+ return mInputFrameID;
+}
+
} // namespace gfx
} // namespace mozilla
--- a/gfx/vr/ipc/VRManagerChild.h
+++ b/gfx/vr/ipc/VRManagerChild.h
@@ -20,16 +20,17 @@ namespace gfx {
class VRDeviceProxy;
class VRManagerChild : public PVRManagerChild
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING_WITH_MAIN_THREAD_DESTRUCTION(VRManagerChild)
+ int GetInputFrameID();
bool GetVRDevices(nsTArray<RefPtr<VRDeviceProxy> >& aDevices);
bool RefreshVRDevicesWithCallback(dom::Navigator* aNavigator);
static VRManagerChild* StartUpInChildProcess(Transport* aTransport,
ProcessId aOtherProcess);
static void StartUpSameProcess();
static void ShutDown();
@@ -47,14 +48,15 @@ protected:
friend class layers::CompositorChild;
private:
nsTArray<RefPtr<VRDeviceProxy> > mDevices;
nsTArray<dom::Navigator*> mNavigatorCallbacks;
+ int32_t mInputFrameID;
};
} // namespace mozilla
} // namespace gfx
#endif // MOZILLA_GFX_VR_VRMANAGERCHILD_H
\ No newline at end of file
--- a/gfx/vr/ipc/VRMessageUtils.h
+++ b/gfx/vr/ipc/VRMessageUtils.h
@@ -122,16 +122,17 @@ struct ParamTraits<mozilla::gfx::VRDevic
template <>
struct ParamTraits<mozilla::gfx::VRHMDSensorState>
{
typedef mozilla::gfx::VRHMDSensorState paramType;
static void Write(Message* aMsg, const paramType& aParam)
{
WriteParam(aMsg, aParam.timestamp);
+ WriteParam(aMsg, aParam.inputFrameID);
WriteParam(aMsg, aParam.flags);
WriteParam(aMsg, aParam.orientation[0]);
WriteParam(aMsg, aParam.orientation[1]);
WriteParam(aMsg, aParam.orientation[2]);
WriteParam(aMsg, aParam.orientation[3]);
WriteParam(aMsg, aParam.position[0]);
WriteParam(aMsg, aParam.position[1]);
WriteParam(aMsg, aParam.position[2]);
@@ -147,16 +148,17 @@ struct ParamTraits<mozilla::gfx::VRHMDSe
WriteParam(aMsg, aParam.linearAcceleration[0]);
WriteParam(aMsg, aParam.linearAcceleration[1]);
WriteParam(aMsg, aParam.linearAcceleration[2]);
}
static bool Read(const Message* aMsg, void** aIter, paramType* aResult)
{
if (!ReadParam(aMsg, aIter, &(aResult->timestamp)) ||
+ !ReadParam(aMsg, aIter, &(aResult->inputFrameID)) ||
!ReadParam(aMsg, aIter, &(aResult->flags)) ||
!ReadParam(aMsg, aIter, &(aResult->orientation[0])) ||
!ReadParam(aMsg, aIter, &(aResult->orientation[1])) ||
!ReadParam(aMsg, aIter, &(aResult->orientation[2])) ||
!ReadParam(aMsg, aIter, &(aResult->orientation[3])) ||
!ReadParam(aMsg, aIter, &(aResult->position[0])) ||
!ReadParam(aMsg, aIter, &(aResult->position[1])) ||
!ReadParam(aMsg, aIter, &(aResult->position[2])) ||
--- a/layout/base/nsDisplayList.cpp
+++ b/layout/base/nsDisplayList.cpp
@@ -6423,16 +6423,17 @@ nsDisplayVR::BuildLayer(nsDisplayListBui
ContainerLayerParameters newContainerParameters = aContainerParameters;
uint32_t flags = FrameLayerBuilder::CONTAINER_NOT_CLIPPED_BY_ANCESTORS |
FrameLayerBuilder::CONTAINER_ALLOW_PULL_BACKGROUND_COLOR;
RefPtr<ContainerLayer> container = aManager->GetLayerBuilder()->
BuildContainerLayerFor(aBuilder, aManager, mFrame, this, &mList,
newContainerParameters, nullptr, flags);
container->SetVRDeviceID(mHMD->GetDeviceInfo().GetDeviceID());
+ container->SetInputFrameID(mHMD->GetSensorState().inputFrameID);
container->SetUserData(nsIFrame::LayerIsPrerenderedDataKey(),
/*the value is irrelevant*/nullptr);
return container.forget();
}
nsRegion nsDisplaySVGEffects::GetOpaqueRegion(nsDisplayListBuilder* aBuilder,
bool* aSnap)
{