diff options
author | Brad Davis <[email protected]> | 2014-04-14 21:25:09 -0700 |
---|---|---|
committer | Brad Davis <[email protected]> | 2014-04-14 21:25:09 -0700 |
commit | 07d0f4d0bbf3477ac6a9584f726e8ec6ab285707 (patch) | |
tree | 1854d0c690eff32e77b137567c88a52d56d8b660 /LibOVR/Src | |
parent | f28388ff2af14b56ef2d973b2f4f9da021716d4c (diff) |
Adding windows 0.3.1 SDK
Diffstat (limited to 'LibOVR/Src')
155 files changed, 25545 insertions, 9208 deletions
diff --git a/LibOVR/Src/CAPI/CAPI_DistortionRenderer.cpp b/LibOVR/Src/CAPI/CAPI_DistortionRenderer.cpp new file mode 100644 index 0000000..8c0f8b8 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_DistortionRenderer.cpp @@ -0,0 +1,63 @@ +/************************************************************************************ + +Filename : CAPI_DistortionRenderer.cpp +Content : Combines all of the rendering state associated with the HMD +Created : February 2, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_DistortionRenderer.h" + +// TBD: Move to separate config file that handles back-ends. +#define OVR_D3D_VERSION 11 +#include "D3D1X/CAPI_D3D1X_DistortionRenderer.h" +#undef OVR_D3D_VERSION + +#define OVR_D3D_VERSION 10 +#include "D3D1X/CAPI_D3D1X_DistortionRenderer.h" +#undef OVR_D3D_VERSION + +#define OVR_D3D_VERSION 9 +#include "D3D1X/CAPI_D3D9_DistortionRenderer.h" +#undef OVR_D3D_VERSION + +#include "GL/CAPI_GL_DistortionRenderer.h" + +namespace OVR { namespace CAPI { + +//------------------------------------------------------------------------------------- +// ***** DistortionRenderer + +// TBD: Move to separate config file that handles back-ends. + +DistortionRenderer::CreateFunc DistortionRenderer::APICreateRegistry[ovrRenderAPI_Count] = +{ + 0, // None + &GL::DistortionRenderer::Create, + 0, // Android_GLES + &D3D9::DistortionRenderer::Create, + &D3D10::DistortionRenderer::Create, + &D3D11::DistortionRenderer::Create +}; + + +}} // namespace OVR::CAPI + diff --git a/LibOVR/Src/CAPI/CAPI_DistortionRenderer.h b/LibOVR/Src/CAPI/CAPI_DistortionRenderer.h new file mode 100644 index 0000000..d1b8011 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_DistortionRenderer.h @@ -0,0 +1,100 @@ +/************************************************************************************ + +Filename : CAPI_DistortionRenderer.h +Content : Abstract interface for platform-specific rendering of distortion +Created : February 2, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_CAPI_DistortionRenderer_h +#define OVR_CAPI_DistortionRenderer_h + +#include "CAPI_HMDRenderState.h" +#include "CAPI_FrameTimeManager.h" + + +namespace OVR { namespace CAPI { + +//------------------------------------------------------------------------------------- +// ***** CAPI::DistortionRenderer + +// DistortionRenderer implements rendering of distortion and other overlay elements +// in platform-independent way. +// Platform-specific renderer back ends for CAPI are derived from this class. + +class DistortionRenderer : public RefCountBase<DistortionRenderer> +{ + // Quiet assignment compiler warning. + void operator = (const DistortionRenderer&) { } +public: + + DistortionRenderer(ovrRenderAPIType api, ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState) + : RenderAPI(api), HMD(hmd), TimeManager(timeManager), RState(renderState) + { } + virtual ~DistortionRenderer() + { } + + + // Configures the Renderer based on externally passed API settings. Must be + // called before use. + // Under D3D, apiConfig includes D3D Device pointer, back buffer and other + // needed structures. + virtual bool Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps) = 0; + + // Submits one eye texture for rendering. This is in the separate method to + // allow "submit as you render" scenarios on horizontal screens where one + // eye can be scanned out before the other. + virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture) = 0; + + // Finish the frame, optionally swapping buffers. + // Many implementations may actually apply the distortion here. + virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, + unsigned char* latencyTester2DrawColor) = 0; + + + + // *** Creation Factory logic + + ovrRenderAPIType GetRenderAPI() const { return RenderAPI; } + + // Creation function for this interface, registered for API. + typedef DistortionRenderer* (*CreateFunc)(ovrHmd hmd, + FrameTimeManager &timeManager, + const HMDRenderState& renderState); + + static CreateFunc APICreateRegistry[ovrRenderAPI_Count]; + +protected: + const ovrRenderAPIType RenderAPI; + const ovrHmd HMD; + FrameTimeManager& TimeManager; + const HMDRenderState& RState; +}; + +}} // namespace OVR::CAPI + + +#endif // OVR_CAPI_DistortionRenderer_h + + diff --git a/LibOVR/Src/CAPI/CAPI_FrameTimeManager.cpp b/LibOVR/Src/CAPI/CAPI_FrameTimeManager.cpp new file mode 100644 index 0000000..de7eeeb --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_FrameTimeManager.cpp @@ -0,0 +1,674 @@ +/************************************************************************************ + +Filename : CAPI_FrameTimeManager.cpp +Content : Manage frame timing and pose prediction for rendering +Created : November 30, 2013 +Authors : Volga Aksoy, Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_FrameTimeManager.h" + + +namespace OVR { namespace CAPI { + + +//------------------------------------------------------------------------------------- +// ***** FrameLatencyTracker + + +FrameLatencyTracker::FrameLatencyTracker() +{ + Reset(); +} + +void FrameLatencyTracker::Reset() +{ + TrackerEnabled = true; + WaitMode = SampleWait_Zeroes; + FrameIndex = 0; + MatchCount = 0; + RenderLatencySeconds = 0.0; + TimewarpLatencySeconds = 0.0; + + FrameDeltas.Clear(); +} + + +unsigned char FrameLatencyTracker::GetNextDrawColor() +{ + if (!TrackerEnabled || (WaitMode == SampleWait_Zeroes) || + (FrameIndex >= FramesTracked)) + { + return (unsigned char)Util::FrameTimeRecord::ReadbackIndexToColor(0); + } + + OVR_ASSERT(FrameIndex < FramesTracked); + return (unsigned char)Util::FrameTimeRecord::ReadbackIndexToColor(FrameIndex+1); +} + + +void FrameLatencyTracker::SaveDrawColor(unsigned char drawColor, double endFrameTime, + double renderIMUTime, double timewarpIMUTime ) +{ + if (!TrackerEnabled || (WaitMode == SampleWait_Zeroes)) + return; + + if (FrameIndex < FramesTracked) + { + OVR_ASSERT(Util::FrameTimeRecord::ReadbackIndexToColor(FrameIndex+1) == drawColor); + OVR_UNUSED(drawColor); + + // saves {color, endFrame time} + FrameEndTimes[FrameIndex].ReadbackIndex = FrameIndex + 1; + FrameEndTimes[FrameIndex].TimeSeconds = endFrameTime; + FrameEndTimes[FrameIndex].RenderIMUTimeSeconds = renderIMUTime; + FrameEndTimes[FrameIndex].TimewarpIMUTimeSeconds= timewarpIMUTime; + FrameEndTimes[FrameIndex].MatchedRecord = false; + FrameIndex++; + } + else + { + // If the request was outstanding for too long, switch to zero mode to restart. + if (endFrameTime > (FrameEndTimes[FrameIndex-1].TimeSeconds + 0.15)) + { + if (MatchCount == 0) + { + // If nothing was matched, we have no latency reading. + RenderLatencySeconds = 0.0; + TimewarpLatencySeconds = 0.0; + } + + WaitMode = SampleWait_Zeroes; + MatchCount = 0; + FrameIndex = 0; + } + } +} + + +void FrameLatencyTracker::MatchRecord(const Util::FrameTimeRecordSet &r) +{ + if (!TrackerEnabled) + return; + + if (WaitMode == SampleWait_Zeroes) + { + // Do we have all zeros? + if (r.IsAllZeroes()) + { + OVR_ASSERT(FrameIndex == 0); + WaitMode = SampleWait_Match; + MatchCount = 0; + } + return; + } + + // We are in Match Mode. Wait until all colors are matched or timeout, + // at which point we go back to zeros. + + for (int i = 0; i < FrameIndex; i++) + { + int recordIndex = 0; + int consecutiveMatch = 0; + + OVR_ASSERT(FrameEndTimes[i].ReadbackIndex != 0); + + if (r.FindReadbackIndex(&recordIndex, FrameEndTimes[i].ReadbackIndex)) + { + // Advance forward to see that we have several more matches. + int ri = recordIndex + 1; + int j = i + 1; + + consecutiveMatch++; + + for (; (j < FrameIndex) && (ri < Util::FrameTimeRecordSet::RecordCount); j++, ri++) + { + if (r[ri].ReadbackIndex != FrameEndTimes[j].ReadbackIndex) + break; + consecutiveMatch++; + } + + // Match at least 2 items in the row, to avoid accidentally matching color. + if (consecutiveMatch > 1) + { + // Record latency values for all but last samples. Keep last 2 samples + // for the future to simplify matching. + for (int q = 0; q < consecutiveMatch; q++) + { + const Util::FrameTimeRecord &scanoutFrame = r[recordIndex+q]; + FrameTimeRecordEx &renderFrame = FrameEndTimes[i+q]; + + if (!renderFrame.MatchedRecord) + { + double deltaSeconds = scanoutFrame.TimeSeconds - renderFrame.TimeSeconds; + if (deltaSeconds > 0.0) + { + FrameDeltas.AddTimeDelta(deltaSeconds); + LatencyRecordTime = scanoutFrame.TimeSeconds; + RenderLatencySeconds = scanoutFrame.TimeSeconds - renderFrame.RenderIMUTimeSeconds; + TimewarpLatencySeconds = (renderFrame.TimewarpIMUTimeSeconds == 0.0) ? 0.0 : + (scanoutFrame.TimeSeconds - renderFrame.TimewarpIMUTimeSeconds); + } + + renderFrame.MatchedRecord = true; + MatchCount++; + } + } + + // Exit for. + break; + } + } + } // for ( i => FrameIndex ) + + + // If we matched all frames, start over. + if (MatchCount == FramesTracked) + { + WaitMode = SampleWait_Zeroes; + MatchCount = 0; + FrameIndex = 0; + } +} + + +void FrameLatencyTracker::GetLatencyTimings(float latencies[3]) +{ + if (ovr_GetTimeInSeconds() > (LatencyRecordTime + 2.0)) + { + latencies[0] = 0.0f; + latencies[1] = 0.0f; + latencies[2] = 0.0f; + } + else + { + latencies[0] = (float)RenderLatencySeconds; + latencies[1] = (float)TimewarpLatencySeconds; + latencies[2] = (float)FrameDeltas.GetMedianTimeDelta(); + } +} + + +//------------------------------------------------------------------------------------- + +FrameTimeManager::FrameTimeManager(bool vsyncEnabled) + : VsyncEnabled(vsyncEnabled), DynamicPrediction(true), SdkRender(false), + FrameTiming() +{ + RenderIMUTimeSeconds = 0.0; + TimewarpIMUTimeSeconds = 0.0; + + // HACK: SyncToScanoutDelay observed close to 1 frame in video cards. + // Overwritten by dynamic latency measurement on DK2. + VSyncToScanoutDelay = 0.013f; + NoVSyncToScanoutDelay = 0.004f; +} + +void FrameTimeManager::Init(HmdRenderInfo& renderInfo) +{ + // Set up prediction distances. + // With-Vsync timings. + RenderInfo = renderInfo; + + ScreenSwitchingDelay = RenderInfo.Shutter.PixelSettleTime * 0.5f + + RenderInfo.Shutter.PixelPersistence * 0.5f; +} + +void FrameTimeManager::ResetFrameTiming(unsigned frameIndex, + bool vsyncEnabled, bool dynamicPrediction, + bool sdkRender) +{ + VsyncEnabled = vsyncEnabled; + DynamicPrediction = dynamicPrediction; + SdkRender = sdkRender; + + FrameTimeDeltas.Clear(); + DistortionRenderTimes.Clear(); + ScreenLatencyTracker.Reset(); + + FrameTiming.FrameIndex = frameIndex; + FrameTiming.NextFrameTime = 0.0; + FrameTiming.ThisFrameTime = 0.0; + FrameTiming.Inputs.FrameDelta = calcFrameDelta(); + FrameTiming.Inputs.ScreenDelay = calcScreenDelay(); + FrameTiming.Inputs.TimewarpWaitDelta = 0.0f; + + LocklessTiming.SetState(FrameTiming); +} + + +double FrameTimeManager::calcFrameDelta() const +{ + // Timing difference between frame is tracked by FrameTimeDeltas, or + // is a hard-coded value of 1/FrameRate. + double frameDelta; + + if (!VsyncEnabled) + { + frameDelta = 0.0; + } + else if (FrameTimeDeltas.GetCount() > 3) + { + frameDelta = FrameTimeDeltas.GetMedianTimeDelta(); + if (frameDelta > (RenderInfo.Shutter.VsyncToNextVsync + 0.001)) + frameDelta = RenderInfo.Shutter.VsyncToNextVsync; + } + else + { + frameDelta = RenderInfo.Shutter.VsyncToNextVsync; + } + + return frameDelta; +} + + +double FrameTimeManager::calcScreenDelay() const +{ + double screenDelay = ScreenSwitchingDelay; + double measuredVSyncToScanout; + + // Use real-time DK2 latency tester HW for prediction if its is working. + // Do sanity check under 60 ms + if (!VsyncEnabled) + { + screenDelay += NoVSyncToScanoutDelay; + } + else if ( DynamicPrediction && + (ScreenLatencyTracker.FrameDeltas.GetCount() > 3) && + (measuredVSyncToScanout = ScreenLatencyTracker.FrameDeltas.GetMedianTimeDelta(), + (measuredVSyncToScanout > 0.0001) && (measuredVSyncToScanout < 0.06)) ) + { + screenDelay += measuredVSyncToScanout; + } + else + { + screenDelay += VSyncToScanoutDelay; + } + + return screenDelay; +} + + +double FrameTimeManager::calcTimewarpWaitDelta() const +{ + // If timewarp timing hasn't been calculated, we should wait. + if (!VsyncEnabled) + return 0.0; + + if (SdkRender) + { + if (NeedDistortionTimeMeasurement()) + return 0.0; + return -(DistortionRenderTimes.GetMedianTimeDelta() + 0.002); + } + + // Just a hard-coded "high" value for game-drawn code. + // TBD: Just return 0 and let users calculate this themselves? + return -0.003; +} + + + +void FrameTimeManager::Timing::InitTimingFromInputs(const FrameTimeManager::TimingInputs& inputs, + HmdShutterTypeEnum shutterType, + double thisFrameTime, unsigned int frameIndex) +{ + // ThisFrameTime comes from the end of last frame, unless it it changed. + double nextFrameBase; + double frameDelta = inputs.FrameDelta; + + FrameIndex = frameIndex; + + ThisFrameTime = thisFrameTime; + NextFrameTime = ThisFrameTime + frameDelta; + nextFrameBase = NextFrameTime + inputs.ScreenDelay; + MidpointTime = nextFrameBase + frameDelta * 0.5; + TimewarpPointTime = (inputs.TimewarpWaitDelta == 0.0) ? + 0.0 : (NextFrameTime + inputs.TimewarpWaitDelta); + + // Calculate absolute points in time when eye rendering or corresponding time-warp + // screen edges will become visible. + // This only matters with VSync. + switch(shutterType) + { + case HmdShutter_RollingTopToBottom: + EyeRenderTimes[0] = MidpointTime; + EyeRenderTimes[1] = MidpointTime; + TimeWarpStartEndTimes[0][0] = nextFrameBase; + TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta; + TimeWarpStartEndTimes[1][0] = nextFrameBase; + TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta; + break; + case HmdShutter_RollingLeftToRight: + EyeRenderTimes[0] = nextFrameBase + frameDelta * 0.25; + EyeRenderTimes[1] = nextFrameBase + frameDelta * 0.75; + + /* + // TBD: MA: It is probably better if mesh sets it up per-eye. + // Would apply if screen is 0 -> 1 for each eye mesh + TimeWarpStartEndTimes[0][0] = nextFrameBase; + TimeWarpStartEndTimes[0][1] = MidpointTime; + TimeWarpStartEndTimes[1][0] = MidpointTime; + TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta; + */ + + // Mesh is set up to vary from Edge of scree 0 -> 1 across both eyes + TimeWarpStartEndTimes[0][0] = nextFrameBase; + TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta; + TimeWarpStartEndTimes[1][0] = nextFrameBase; + TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta; + + break; + case HmdShutter_RollingRightToLeft: + + EyeRenderTimes[0] = nextFrameBase + frameDelta * 0.75; + EyeRenderTimes[1] = nextFrameBase + frameDelta * 0.25; + + // This is *Correct* with Tom's distortion mesh organization. + TimeWarpStartEndTimes[0][0] = nextFrameBase ; + TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta; + TimeWarpStartEndTimes[1][0] = nextFrameBase ; + TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta; + break; + case HmdShutter_Global: + // TBD + EyeRenderTimes[0] = MidpointTime; + EyeRenderTimes[1] = MidpointTime; + TimeWarpStartEndTimes[0][0] = MidpointTime; + TimeWarpStartEndTimes[0][1] = MidpointTime; + TimeWarpStartEndTimes[1][0] = MidpointTime; + TimeWarpStartEndTimes[1][1] = MidpointTime; + break; + } +} + + +double FrameTimeManager::BeginFrame(unsigned frameIndex) +{ + RenderIMUTimeSeconds = 0.0; + TimewarpIMUTimeSeconds = 0.0; + + // ThisFrameTime comes from the end of last frame, unless it it changed. + double thisFrameTime = (FrameTiming.NextFrameTime != 0.0) ? + FrameTiming.NextFrameTime : ovr_GetTimeInSeconds(); + + // We are starting to process a new frame... + FrameTiming.InitTimingFromInputs(FrameTiming.Inputs, RenderInfo.Shutter.Type, + thisFrameTime, frameIndex); + + return FrameTiming.ThisFrameTime; +} + + +void FrameTimeManager::EndFrame() +{ + // Record timing since last frame; must be called after Present & sync. + FrameTiming.NextFrameTime = ovr_GetTimeInSeconds(); + if (FrameTiming.ThisFrameTime > 0.0) + { + FrameTimeDeltas.AddTimeDelta(FrameTiming.NextFrameTime - FrameTiming.ThisFrameTime); + FrameTiming.Inputs.FrameDelta = calcFrameDelta(); + } + + // Write to Lock-less + LocklessTiming.SetState(FrameTiming); +} + + + +// Thread-safe function to query timing for a future frame + +FrameTimeManager::Timing FrameTimeManager::GetFrameTiming(unsigned frameIndex) +{ + Timing frameTiming = LocklessTiming.GetState(); + + if (frameTiming.ThisFrameTime != 0.0) + { + // If timing hasn't been initialized, starting based on "now" is the best guess. + frameTiming.InitTimingFromInputs(frameTiming.Inputs, RenderInfo.Shutter.Type, + ovr_GetTimeInSeconds(), frameIndex); + } + + else if (frameIndex > frameTiming.FrameIndex) + { + unsigned frameDelta = frameIndex - frameTiming.FrameIndex; + double thisFrameTime = frameTiming.NextFrameTime + + double(frameDelta-1) * frameTiming.Inputs.FrameDelta; + // Don't run away too far into the future beyond rendering. + OVR_ASSERT(frameDelta < 6); + + frameTiming.InitTimingFromInputs(frameTiming.Inputs, RenderInfo.Shutter.Type, + thisFrameTime, frameIndex); + } + + return frameTiming; +} + + +double FrameTimeManager::GetEyePredictionTime(ovrEyeType eye) +{ + if (VsyncEnabled) + { + return FrameTiming.EyeRenderTimes[eye]; + } + + // No VSync: Best guess for the near future + return ovr_GetTimeInSeconds() + ScreenSwitchingDelay + NoVSyncToScanoutDelay; +} + +Posef FrameTimeManager::GetEyePredictionPose(ovrHmd hmd, ovrEyeType eye) +{ + double eyeRenderTime = GetEyePredictionTime(eye); + ovrSensorState eyeState = ovrHmd_GetSensorState(hmd, eyeRenderTime); + +// EyeRenderPoses[eye] = eyeState.Predicted.Pose; + + // Record view pose sampling time for Latency reporting. + if (RenderIMUTimeSeconds == 0.0) + RenderIMUTimeSeconds = eyeState.Recorded.TimeInSeconds; + + return eyeState.Predicted.Pose; +} + + +void FrameTimeManager::GetTimewarpPredictions(ovrEyeType eye, double timewarpStartEnd[2]) +{ + if (VsyncEnabled) + { + timewarpStartEnd[0] = FrameTiming.TimeWarpStartEndTimes[eye][0]; + timewarpStartEnd[1] = FrameTiming.TimeWarpStartEndTimes[eye][1]; + return; + } + + // Free-running, so this will be displayed immediately. + // Unfortunately we have no idea which bit of the screen is actually going to be displayed. + // TODO: guess which bit of the screen is being displayed! + // (e.g. use DONOTWAIT on present and see when the return isn't WASSTILLWAITING?) + + // We have no idea where scan-out is currently, so we can't usefully warp the screen spatially. + timewarpStartEnd[0] = ovr_GetTimeInSeconds() + ScreenSwitchingDelay + NoVSyncToScanoutDelay; + timewarpStartEnd[1] = timewarpStartEnd[0]; +} + + +void FrameTimeManager::GetTimewarpMatrices(ovrHmd hmd, ovrEyeType eyeId, + ovrPosef renderPose, ovrMatrix4f twmOut[2]) +{ + if (!hmd) + { + return; + } + + double timewarpStartEnd[2] = { 0.0, 0.0 }; + GetTimewarpPredictions(eyeId, timewarpStartEnd); + + ovrSensorState startState = ovrHmd_GetSensorState(hmd, timewarpStartEnd[0]); + ovrSensorState endState = ovrHmd_GetSensorState(hmd, timewarpStartEnd[1]); + + if (TimewarpIMUTimeSeconds == 0.0) + TimewarpIMUTimeSeconds = startState.Recorded.TimeInSeconds; + + Quatf quatFromStart = startState.Predicted.Pose.Orientation; + Quatf quatFromEnd = endState.Predicted.Pose.Orientation; + Quatf quatFromEye = renderPose.Orientation; //EyeRenderPoses[eyeId].Orientation; + quatFromEye.Invert(); + + Quatf timewarpStartQuat = quatFromEye * quatFromStart; + Quatf timewarpEndQuat = quatFromEye * quatFromEnd; + + Matrix4f timewarpStart(timewarpStartQuat); + Matrix4f timewarpEnd(timewarpEndQuat); + + + // The real-world orientations have: X=right, Y=up, Z=backwards. + // The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards. + // So we need to perform a similarity transform on this delta matrix. + // The verbose code would look like this: + /* + Matrix4f matBasisChange; + matBasisChange.SetIdentity(); + matBasisChange.M[0][0] = 1.0f; + matBasisChange.M[1][1] = -1.0f; + matBasisChange.M[2][2] = -1.0f; + Matrix4f matBasisChangeInv = matBasisChange.Inverted(); + matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange; + */ + // ...but of course all the above is a constant transform and much more easily done. + // We flip the signs of the Y&Z row, then flip the signs of the Y&Z column, + // and of course most of the flips cancel: + // +++ +-- +-- + // +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++ + // +++ +-- -++ + timewarpStart.M[0][1] = -timewarpStart.M[0][1]; + timewarpStart.M[0][2] = -timewarpStart.M[0][2]; + timewarpStart.M[1][0] = -timewarpStart.M[1][0]; + timewarpStart.M[2][0] = -timewarpStart.M[2][0]; + + timewarpEnd .M[0][1] = -timewarpEnd .M[0][1]; + timewarpEnd .M[0][2] = -timewarpEnd .M[0][2]; + timewarpEnd .M[1][0] = -timewarpEnd .M[1][0]; + timewarpEnd .M[2][0] = -timewarpEnd .M[2][0]; + + twmOut[0] = timewarpStart; + twmOut[1] = timewarpEnd; +} + + +// Used by renderer to determine if it should time distortion rendering. +bool FrameTimeManager::NeedDistortionTimeMeasurement() const +{ + if (!VsyncEnabled) + return false; + return DistortionRenderTimes.GetCount() < 10; +} + + +void FrameTimeManager::AddDistortionTimeMeasurement(double distortionTimeSeconds) +{ + DistortionRenderTimes.AddTimeDelta(distortionTimeSeconds); + + // If timewarp timing changes based on this sample, update it. + double newTimewarpWaitDelta = calcTimewarpWaitDelta(); + if (newTimewarpWaitDelta != FrameTiming.Inputs.TimewarpWaitDelta) + { + FrameTiming.Inputs.TimewarpWaitDelta = newTimewarpWaitDelta; + LocklessTiming.SetState(FrameTiming); + } +} + + +void FrameTimeManager::UpdateFrameLatencyTrackingAfterEndFrame( + unsigned char frameLatencyTestColor, + const Util::FrameTimeRecordSet& rs) +{ + // FrameTiming.NextFrameTime in this context (after EndFrame) is the end frame time. + ScreenLatencyTracker.SaveDrawColor(frameLatencyTestColor, + FrameTiming.NextFrameTime, + RenderIMUTimeSeconds, + TimewarpIMUTimeSeconds); + + ScreenLatencyTracker.MatchRecord(rs); + + // If screen delay changed, update timing. + double newScreenDelay = calcScreenDelay(); + if (newScreenDelay != FrameTiming.Inputs.ScreenDelay) + { + FrameTiming.Inputs.ScreenDelay = newScreenDelay; + LocklessTiming.SetState(FrameTiming); + } +} + + +//----------------------------------------------------------------------------------- +// ***** TimeDeltaCollector + +void TimeDeltaCollector::AddTimeDelta(double timeSeconds) +{ + // avoid adding invalid timing values + if(timeSeconds < 0.0f) + return; + + if (Count == Capacity) + { + for(int i=0; i< Count-1; i++) + TimeBufferSeconds[i] = TimeBufferSeconds[i+1]; + Count--; + } + TimeBufferSeconds[Count++] = timeSeconds; +} + +double TimeDeltaCollector::GetMedianTimeDelta() const +{ + double SortedList[Capacity]; + bool used[Capacity]; + + memset(used, 0, sizeof(used)); + SortedList[0] = 0.0; // In case Count was 0... + + // Probably the slowest way to find median... + for (int i=0; i<Count; i++) + { + double smallestDelta = 1000000.0; + int index = 0; + + for (int j = 0; j < Count; j++) + { + if (!used[j]) + { + if (TimeBufferSeconds[j] < smallestDelta) + { + smallestDelta = TimeBufferSeconds[j]; + index = j; + } + } + } + + // Mark as used + used[index] = true; + SortedList[i] = smallestDelta; + } + + return SortedList[Count/2]; +} + + +}} // namespace OVR::CAPI + diff --git a/LibOVR/Src/CAPI/CAPI_FrameTimeManager.h b/LibOVR/Src/CAPI/CAPI_FrameTimeManager.h new file mode 100644 index 0000000..07a2963 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_FrameTimeManager.h @@ -0,0 +1,264 @@ +/************************************************************************************ + +Filename : CAPI_FrameTimeManager.h +Content : Manage frame timing and pose prediction for rendering +Created : November 30, 2013 +Authors : Volga Aksoy, Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_CAPI_FrameTimeManager_h +#define OVR_CAPI_FrameTimeManager_h + +#include "../OVR_CAPI.h" +#include "../Kernel/OVR_Timer.h" +#include "../Kernel/OVR_Math.h" +#include "../Util/Util_Render_Stereo.h" +#include "../Util/Util_LatencyTest2.h" + +namespace OVR { namespace CAPI { + +//------------------------------------------------------------------------------------- + +// Helper class to collect median times between frames, so that we know +// how long to wait. +struct TimeDeltaCollector +{ + TimeDeltaCollector() : Count(0) { } + + void AddTimeDelta(double timeSeconds); + void Clear() { Count = 0; } + + double GetMedianTimeDelta() const; + + double GetCount() const { return Count; } + + enum { Capacity = 12 }; +private: + int Count; + double TimeBufferSeconds[Capacity]; +}; + + +//------------------------------------------------------------------------------------- +// ***** FrameLatencyTracker + +// FrameLatencyTracker tracks frame Present to display Scan-out timing, as reported by +// the DK2 internal latency tester pixel read-back. The computed value is used in +// FrameTimeManager for prediction. View Render and TimeWarp to scan-out latencies are +// also reported for debugging. +// +// The class operates by generating color values from GetNextDrawColor() that must +// be rendered on the back end and then looking for matching values in FrameTimeRecordSet +// structure as reported by HW. + +class FrameLatencyTracker +{ +public: + + enum { FramesTracked = Util::LT2_IncrementCount-1 }; + + FrameLatencyTracker(); + + // DrawColor == 0 is special in that it doesn't need saving of timestamp + unsigned char GetNextDrawColor(); + + void SaveDrawColor(unsigned char drawColor, double endFrameTime, + double renderIMUTime, double timewarpIMUTime ); + + void MatchRecord(const Util::FrameTimeRecordSet &r); + + void GetLatencyTimings(float latencies[3]); + + void Reset(); + +public: + + struct FrameTimeRecordEx : public Util::FrameTimeRecord + { + bool MatchedRecord; + double RenderIMUTimeSeconds; + double TimewarpIMUTimeSeconds; + }; + + // True if rendering read-back is enabled. + bool TrackerEnabled; + + enum SampleWaitType { + SampleWait_Zeroes, // We are waiting for a record with all zeros. + SampleWait_Match // We are issuing & matching colors. + }; + + SampleWaitType WaitMode; + int MatchCount; + // Records of frame timings that we are trying to measure. + FrameTimeRecordEx FrameEndTimes[FramesTracked]; + int FrameIndex; + // Median filter for (ScanoutTimeSeconds - PostPresent frame time) + TimeDeltaCollector FrameDeltas; + // Latency reporting results + double RenderLatencySeconds; + double TimewarpLatencySeconds; + double LatencyRecordTime; +}; + + + +//------------------------------------------------------------------------------------- +// ***** FrameTimeManager + +// FrameTimeManager keeps track of rendered frame timing and handles predictions for +// orientations and time-warp. + +class FrameTimeManager +{ +public: + FrameTimeManager(bool vsyncEnabled = true); + + // Data that affects frame timing computation. + struct TimingInputs + { + // Hard-coded value or dynamic as reported by FrameTimeDeltas.GetMedianTimeDelta(). + double FrameDelta; + // Screen delay from present to scan-out, as potentially reported by ScreenLatencyTracker. + double ScreenDelay; + // Negative value of how many seconds before EndFrame we start timewarp. 0.0 if not used. + double TimewarpWaitDelta; + + TimingInputs() + : FrameDelta(0), ScreenDelay(0), TimewarpWaitDelta(0) + { } + }; + + // Timing values for a specific frame. + struct Timing + { + TimingInputs Inputs; + + // Index of a frame that started at ThisFrameTime. + unsigned int FrameIndex; + // Predicted absolute times for when this frame will show up on screen. + // Generally, all values will be >= NextFrameTime, since that's the time we expect next + // vsync to succeed. + double ThisFrameTime; + double TimewarpPointTime; + double NextFrameTime; + double MidpointTime; + double EyeRenderTimes[2]; + double TimeWarpStartEndTimes[2][2]; + + Timing() + { + memset(this, 0, sizeof(Timing)); + } + + void InitTimingFromInputs(const TimingInputs& inputs, HmdShutterTypeEnum shutterType, + double thisFrameTime, unsigned int frameIndex); + }; + + + // Called on startup to provided data on HMD timing. + void Init(HmdRenderInfo& renderInfo); + + // Called with each new ConfigureRendering. + void ResetFrameTiming(unsigned frameIndex, + bool vsyncEnabled, bool dynamicPrediction, bool sdkRender); + + void SetVsync(bool enabled) { VsyncEnabled = enabled; } + + // BeginFrame returns time of the call + // TBD: Should this be a predicted time value instead ? + double BeginFrame(unsigned frameIndex); + void EndFrame(); + + // Thread-safe function to query timing for a future frame + Timing GetFrameTiming(unsigned frameIndex); + + double GetEyePredictionTime(ovrEyeType eye); + Posef GetEyePredictionPose(ovrHmd hmd, ovrEyeType eye); + + void GetTimewarpPredictions(ovrEyeType eye, double timewarpStartEnd[2]); + void GetTimewarpMatrices(ovrHmd hmd, ovrEyeType eye, ovrPosef renderPose, ovrMatrix4f twmOut[2]); + + // Used by renderer to determine if it should time distortion rendering. + bool NeedDistortionTimeMeasurement() const; + void AddDistortionTimeMeasurement(double distortionTimeSeconds); + + + // DK2 Lateny test interface + + // Get next draw color for DK2 latency tester + unsigned char GetFrameLatencyTestDrawColor() + { return ScreenLatencyTracker.GetNextDrawColor(); } + + // Must be called after EndFrame() to update latency tester timings. + // Must pass color reported by NextFrameColor for this frame. + void UpdateFrameLatencyTrackingAfterEndFrame(unsigned char frameLatencyTestColor, + const Util::FrameTimeRecordSet& rs); + + void GetLatencyTimings(float latencies[3]) + { return ScreenLatencyTracker.GetLatencyTimings(latencies); } + + + const Timing& GetFrameTiming() const { return FrameTiming; } + +private: + + double calcFrameDelta() const; + double calcScreenDelay() const; + double calcTimewarpWaitDelta() const; + + + HmdRenderInfo RenderInfo; + // Timings are collected through a median filter, to avoid outliers. + TimeDeltaCollector FrameTimeDeltas; + TimeDeltaCollector DistortionRenderTimes; + FrameLatencyTracker ScreenLatencyTracker; + + // Timing changes if we have no Vsync (all prediction is reduced to fixed interval). + bool VsyncEnabled; + // Set if we are rendering via the SDK, so DistortionRenderTimes is valid. + bool DynamicPrediction; + // Set if SDk is doing teh rendering. + bool SdkRender; + + // Total frame delay due to VsyncToFirstScanline, persistence and settle time. + // Computed from RenderInfor.Shutter. + double VSyncToScanoutDelay; + double NoVSyncToScanoutDelay; + double ScreenSwitchingDelay; + + // Current (or last) frame timing info. Used as a source for LocklessTiming. + Timing FrameTiming; + // TBD: Don't we need NextFrame here as well? + LocklessUpdater<Timing> LocklessTiming; + + + // IMU Read timings + double RenderIMUTimeSeconds; + double TimewarpIMUTimeSeconds; +}; + + +}} // namespace OVR::CAPI + +#endif // OVR_CAPI_FrameTimeManager_h + + diff --git a/LibOVR/Src/CAPI/CAPI_GlobalState.cpp b/LibOVR/Src/CAPI/CAPI_GlobalState.cpp new file mode 100644 index 0000000..2ed1794 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_GlobalState.cpp @@ -0,0 +1,142 @@ +/************************************************************************************ + +Filename : CAPI_GlobalState.cpp +Content : Maintains global state of the CAPI +Created : January 24, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_GlobalState.h" + +namespace OVR { namespace CAPI { + + +//------------------------------------------------------------------------------------- +// Open Questions / Notes + +// 2. Detect HMDs. +// Challenge: If we do everything through polling, it would imply we want all the devices +// initialized. However, there may be multiple rifts, extra sensors, etc, +// which shouldn't be allocated. +// + +// How do you reset orientation Quaternion? +// Can you change IPD? + + + +//------------------------------------------------------------------------------------- +// ***** OVRGlobalState + +// Global instance +GlobalState* GlobalState::pInstance = 0; + + +GlobalState::GlobalState() +{ + pManager = *DeviceManager::Create(); + // Handle the DeviceManager's messages + pManager->AddMessageHandler( this ); + EnumerateDevices(); + + // PhoneSensors::Init(); +} + +GlobalState::~GlobalState() +{ + RemoveHandlerFromDevices(); + OVR_ASSERT(HMDs.IsEmpty()); +} + +int GlobalState::EnumerateDevices() +{ + // Need to use separate lock for device enumeration, as pManager->GetHandlerLock() + // would produce deadlocks here. + Lock::Locker lock(&EnumerationLock); + + EnumeratedDevices.Clear(); + + DeviceEnumerator<HMDDevice> e = pManager->EnumerateDevices<HMDDevice>(); + while(e.IsAvailable()) + { + EnumeratedDevices.PushBack(DeviceHandle(e)); + e.Next(); + } + + return (int)EnumeratedDevices.GetSize(); +} + + +HMDDevice* GlobalState::CreateDevice(int index) +{ + Lock::Locker lock(&EnumerationLock); + + if (index >= (int)EnumeratedDevices.GetSize()) + return 0; + return EnumeratedDevices[index].CreateDeviceTyped<HMDDevice>(); +} + + +void GlobalState::AddHMD(HMDState* hmd) +{ + Lock::Locker lock(pManager->GetHandlerLock()); + HMDs.PushBack(hmd); +} +void GlobalState::RemoveHMD(HMDState* hmd) +{ + Lock::Locker lock(pManager->GetHandlerLock()); + hmd->RemoveNode(); +} + +void GlobalState::NotifyHMDs_AddDevice(DeviceType deviceType) +{ + Lock::Locker lock(pManager->GetHandlerLock()); + for(HMDState* hmd = HMDs.GetFirst(); !HMDs.IsNull(hmd); hmd = hmd->pNext) + hmd->NotifyAddDevice(deviceType); +} + +void GlobalState::OnMessage(const Message& msg) +{ + if (msg.Type == Message_DeviceAdded || msg.Type == Message_DeviceRemoved) + { + if (msg.pDevice == pManager) + { + const MessageDeviceStatus& statusMsg = + static_cast<const MessageDeviceStatus&>(msg); + + if (msg.Type == Message_DeviceAdded) + { + //LogText("OnMessage DeviceAdded.\n"); + + // We may have added a sensor/other device; notify any HMDs that might + // need it to check for it later. + NotifyHMDs_AddDevice(statusMsg.Handle.GetType()); + } + else + { + //LogText("OnMessage DeviceRemoved.\n"); + } + } + } +} + + +}} // namespace OVR::CAPI diff --git a/LibOVR/Src/CAPI/CAPI_GlobalState.h b/LibOVR/Src/CAPI/CAPI_GlobalState.h new file mode 100644 index 0000000..54ab8cc --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_GlobalState.h @@ -0,0 +1,84 @@ +/************************************************************************************ + +Filename : CAPI_GlobalState.h +Content : Maintains global state of the CAPI +Created : January 24, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_CAPI_GlobalState_h +#define OVR_CAPI_GlobalState_h + +#include "../OVR_CAPI.h" +#include "../OVR_Device.h" +#include "../Kernel/OVR_Timer.h" +#include "../Kernel/OVR_Math.h" + +#include "CAPI_HMDState.h" + +namespace OVR { namespace CAPI { + +//------------------------------------------------------------------------------------- +// ***** OVRGlobalState + +// Global DeviceManager state - singleton instance of this is created +// by ovr_Initialize(). +class GlobalState : public MessageHandler, public NewOverrideBase +{ +public: + GlobalState(); + ~GlobalState(); + + static GlobalState *pInstance; + + int EnumerateDevices(); + HMDDevice* CreateDevice(int index); + + // MessageHandler implementation + void OnMessage(const Message& msg); + + // Helpers used to keep track of HMDs and notify them of sensor changes. + void AddHMD(HMDState* hmd); + void RemoveHMD(HMDState* hmd); + void NotifyHMDs_AddDevice(DeviceType deviceType); + + const char* GetLastError() + { + return 0; + } + + DeviceManager* GetManager() { return pManager; } + +protected: + + Ptr<DeviceManager> pManager; + Lock EnumerationLock; + Array<DeviceHandle> EnumeratedDevices; + + // Currently created hmds; protected by Manager lock. + List<HMDState> HMDs; +}; + +}} // namespace OVR::CAPI + +#endif + + diff --git a/LibOVR/Src/CAPI/CAPI_HMDRenderState.cpp b/LibOVR/Src/CAPI/CAPI_HMDRenderState.cpp new file mode 100644 index 0000000..bdfa0c7 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_HMDRenderState.cpp @@ -0,0 +1,147 @@ +/************************************************************************************ + +Filename : OVR_CAPI_HMDRenderState.cpp +Content : Combines all of the rendering state associated with the HMD +Created : February 2, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + + + + +#include "CAPI_HMDRenderState.h" + + +namespace OVR { namespace CAPI { + + + +//------------------------------------------------------------------------------------- +// ***** HMDRenderState + + +HMDRenderState::HMDRenderState(ovrHmd hmd, Profile* userProfile, const OVR::HMDInfo& hmdInfo) + : HMD(hmd), HMDInfo(hmdInfo) +{ + RenderInfo = GenerateHmdRenderInfoFromHmdInfo( HMDInfo, userProfile ); + + Distortion[0] = CalculateDistortionRenderDesc(StereoEye_Left, RenderInfo, 0); + Distortion[1] = CalculateDistortionRenderDesc(StereoEye_Right, RenderInfo, 0); + + ClearColor[0] = ClearColor[1] = ClearColor[2] = ClearColor[3] =0.0f; +} + +HMDRenderState::~HMDRenderState() +{ + +} + + +ovrHmdDesc HMDRenderState::GetDesc() +{ + ovrHmdDesc d; + memset(&d, 0, sizeof(d)); + + d.Type = ovrHmd_Other; + + d.ProductName = HMDInfo.ProductName; + d.Manufacturer = HMDInfo.Manufacturer; + d.Resolution.w = HMDInfo.ResolutionInPixels.w; + d.Resolution.h = HMDInfo.ResolutionInPixels.h; + d.WindowsPos.x = HMDInfo.DesktopX; + d.WindowsPos.y = HMDInfo.DesktopY; + d.DisplayDeviceName = HMDInfo.DisplayDeviceName; + d.DisplayId = HMDInfo.DisplayId; + + d.Caps = ovrHmdCap_YawCorrection | ovrHmdCap_Orientation | ovrHmdCap_Present; + + if (strstr(HMDInfo.ProductName, "DK1")) + { + d.Type = ovrHmd_DK1; + } + else if (strstr(HMDInfo.ProductName, "DK2")) + { + d.Type = ovrHmd_DK2; + d.Caps |= ovrHmdCap_Position | ovrHmdCap_LowPersistence; + } + + DistortionRenderDesc& leftDistortion = Distortion[0]; + DistortionRenderDesc& rightDistortion = Distortion[1]; + + // The suggested FOV (assuming eye rotation) + d.DefaultEyeFov[0] = CalculateFovFromHmdInfo(StereoEye_Left, leftDistortion, RenderInfo, OVR_DEFAULT_EXTRA_EYE_ROTATION); + d.DefaultEyeFov[1] = CalculateFovFromHmdInfo(StereoEye_Right, rightDistortion, RenderInfo, OVR_DEFAULT_EXTRA_EYE_ROTATION); + + // FOV extended across the entire screen + d.MaxEyeFov[0] = GetPhysicalScreenFov(StereoEye_Left, leftDistortion); + d.MaxEyeFov[1] = GetPhysicalScreenFov(StereoEye_Right, rightDistortion); + + if (HMDInfo.Shutter.Type == HmdShutter_RollingRightToLeft) + { + d.EyeRenderOrder[0] = ovrEye_Right; + d.EyeRenderOrder[1] = ovrEye_Left; + } + else + { + d.EyeRenderOrder[0] = ovrEye_Left; + d.EyeRenderOrder[1] = ovrEye_Right; + } + + return d; +} + + +ovrSizei HMDRenderState::GetFOVTextureSize(int eye, ovrFovPort fov, float pixelsPerDisplayPixel) +{ + OVR_ASSERT((unsigned)eye < 2); + StereoEye seye = (eye == ovrEye_Left) ? StereoEye_Left : StereoEye_Right; + return CalculateIdealPixelSize(seye, Distortion[eye], fov, pixelsPerDisplayPixel); +} + +ovrEyeRenderDesc HMDRenderState::calcRenderDesc(const ovrEyeDesc& eyeDesc) +{ + HmdRenderInfo& hmdri = RenderInfo; + StereoEye eye = (eyeDesc.Eye == ovrEye_Left) ? StereoEye_Left : StereoEye_Right; + ovrEyeRenderDesc e0; + + e0.Desc = eyeDesc; + e0.ViewAdjust = CalculateEyeVirtualCameraOffset(hmdri, eye, false); + e0.DistortedViewport = GetFramebufferViewport(eye, hmdri); + e0.PixelsPerTanAngleAtCenter = Distortion[0].PixelsPerTanAngleAtCenter; + + // If RenderViewport is uninitialized, set it to texture size. + if (Sizei(e0.Desc.RenderViewport.Size) == Sizei(0)) + e0.Desc.RenderViewport.Size = e0.Desc.TextureSize; + + return e0; +} + + +void HMDRenderState::setupRenderDesc( ovrEyeRenderDesc eyeRenderDescOut[2], + const ovrEyeDesc eyeDescIn[2] ) +{ + eyeRenderDescOut[0] = EyeRenderDesc[0] = calcRenderDesc(eyeDescIn[0]); + eyeRenderDescOut[1] = EyeRenderDesc[1] = calcRenderDesc(eyeDescIn[1]); +} + + +}} // namespace OVR::CAPI + diff --git a/LibOVR/Src/CAPI/CAPI_HMDRenderState.h b/LibOVR/Src/CAPI/CAPI_HMDRenderState.h new file mode 100644 index 0000000..408af51 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_HMDRenderState.h @@ -0,0 +1,93 @@ +/************************************************************************************ + +Filename : CAPI_HMDRenderState.h +Content : Combines all of the rendering state associated with the HMD +Created : February 2, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_CAPI_HMDRenderState_h +#define OVR_CAPI_HMDRenderState_h + +#include "../OVR_CAPI.h" +#include "../Kernel/OVR_Math.h" +#include "../Util/Util_Render_Stereo.h" + + +namespace OVR { namespace CAPI { + +using namespace OVR::Util::Render; + +//------------------------------------------------------------------------------------- +// ***** HMDRenderState + +// Combines all of the rendering setup information about one HMD. + +class HMDRenderState : public NewOverrideBase +{ + // Quiet assignment compiler warning. + void operator = (const HMDRenderState&) { } +public: + + HMDRenderState(ovrHmd hmd, Profile* userProfile, const OVR::HMDInfo& hmdInfo); + virtual ~HMDRenderState(); + + + // *** Rendering Setup + + // Delegated access APIs + ovrHmdDesc GetDesc(); + ovrSizei GetFOVTextureSize(int eye, ovrFovPort fov, float pixelsPerDisplayPixel); + + ovrEyeRenderDesc calcRenderDesc(const ovrEyeDesc& eyeDesc); + + void setupRenderDesc(ovrEyeRenderDesc eyeRenderDescOut[2], + const ovrEyeDesc eyeDescIn[2]); +public: + + // HMDInfo shouldn't change, as its string pointers are passed out. + ovrHmd HMD; + const OVR::HMDInfo& HMDInfo; + + //const char* pLastError; + + HmdRenderInfo RenderInfo; + DistortionRenderDesc Distortion[2]; + ovrEyeRenderDesc EyeRenderDesc[2]; + + // Clear color used for distortion + float ClearColor[4]; + + // Pose at which last time the eye was rendered, as submitted by EndEyeRender. + ovrPosef EyeRenderPoses[2]; + + // Capabilities passed to Configure. + unsigned HMDCaps; + unsigned DistortionCaps; +}; + + +}} // namespace OVR::CAPI + + +#endif // OVR_CAPI_HMDState_h + + diff --git a/LibOVR/Src/CAPI/CAPI_HMDState.cpp b/LibOVR/Src/CAPI/CAPI_HMDState.cpp new file mode 100644 index 0000000..156b84a --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_HMDState.cpp @@ -0,0 +1,774 @@ +/************************************************************************************ + +Filename : CAPI_HMDState.cpp +Content : State associated with a single HMD +Created : January 24, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_HMDState.h" +#include "CAPI_GlobalState.h" +#include "../OVR_Profile.h" + +namespace OVR { namespace CAPI { + +//------------------------------------------------------------------------------------- +// ***** HMDState + + +HMDState::HMDState(HMDDevice* device) + : pHMD(device), HMDInfoW(device), HMDInfo(HMDInfoW.h), + SensorStarted(0), SensorCreated(0), SensorCaps(0), + AddSensorCount(0), AddLatencyTestCount(0), AddLatencyTestDisplayCount(0), + RenderState(getThis(), pHMD->GetProfile(), HMDInfoW.h), + LastFrameTimeSeconds(0.0f), LastGetFrameTimeSeconds(0.0), + LatencyTestActive(false), + LatencyTest2Active(false) +{ + pLastError = 0; + GlobalState::pInstance->AddHMD(this); + + // Should be in renderer? + TimeManager.Init(RenderState.RenderInfo); + + EyeRenderActive[0] = false; + EyeRenderActive[1] = false; + + LatencyTestDrawColor[0] = 0; + LatencyTestDrawColor[1] = 0; + LatencyTestDrawColor[2] = 0; + + OVR_CAPI_VISION_CODE( pPoseTracker = 0; ) + + RenderingConfigured = false; + BeginFrameCalled = false; + BeginFrameThreadId = 0; + BeginFrameTimingCalled = false; +} + +HMDState::HMDState(ovrHmdType hmdType) + : pHMD(0), HMDInfoW(hmdType), HMDInfo(HMDInfoW.h), + SensorStarted(0), SensorCreated(0), SensorCaps(0), + AddSensorCount(0), AddLatencyTestCount(0), AddLatencyTestDisplayCount(0), + RenderState(getThis(), 0, HMDInfoW.h), // No profile. + LastFrameTimeSeconds(0.0), LastGetFrameTimeSeconds(0.0) +{ + // TBD: We should probably be looking up the default profile for the given + // device type + user. + + pLastError = 0; + GlobalState::pInstance->AddHMD(this); + + // Should be in renderer? + TimeManager.Init(RenderState.RenderInfo); + + EyeRenderActive[0] = false; + EyeRenderActive[1] = false; + + OVR_CAPI_VISION_CODE( pPoseTracker = 0; ) + + RenderingConfigured = false; + BeginFrameCalled = false; + BeginFrameThreadId = 0; + BeginFrameTimingCalled = false; +} + + +HMDState::~HMDState() +{ + OVR_ASSERT(GlobalState::pInstance); + + StopSensor(); + ConfigureRendering(0,0,0,0,0); + + OVR_CAPI_VISION_CODE( OVR_ASSERT(pPoseTracker == 0); ) + + GlobalState::pInstance->RemoveHMD(this); +} + + +//------------------------------------------------------------------------------------- +// *** Sensor + +bool HMDState::StartSensor(unsigned supportedCaps, unsigned requiredCaps) +{ + Lock::Locker lockScope(&DevicesLock); + + // TBD: Implement an optimized path that allows you to change caps such as yaw. + if (SensorStarted) + { + + if ((SensorCaps ^ ovrHmdCap_LowPersistence) == supportedCaps) + { + // TBD: Fast persistance switching; redesign to make this better. + if (HMDInfo.HmdType == HmdType_CrystalCoveProto || HMDInfo.HmdType == HmdType_DK2) + { + // switch to full persistence + updateLowPersistenceMode((supportedCaps & ovrHmdCap_LowPersistence) != 0); + SensorCaps = supportedCaps; + return true; + } + } + + if ((SensorCaps ^ ovrHmdCap_DynamicPrediction) == supportedCaps) + { + // TBD: Fast persistance switching; redesign to make this better. + if (HMDInfo.HmdType == HmdType_DK2) + { + // switch to full persistence + TimeManager.ResetFrameTiming(TimeManager.GetFrameTiming().FrameIndex, + (supportedCaps & ovrHmdCap_NoVSync) ? false : true, + (supportedCaps & ovrHmdCap_DynamicPrediction) ? true : false, + RenderingConfigured); + SensorCaps = supportedCaps; + return true; + } + } + + StopSensor(); + } + + supportedCaps |= requiredCaps; + + // TBD: In case of sensor not being immediately available, it would be good to check + // yaw config availability to match it with ovrHmdCap_YawCorrection requirement. + // + + if (requiredCaps & ovrHmdCap_Position) + { + if (HMDInfo.HmdType != HmdType_CrystalCoveProto && HMDInfo.HmdType != HmdType_DK2) + { + pLastError = "ovrHmdCap_Position not supported on this HMD."; + return false; + } + } + if (requiredCaps & ovrHmdCap_LowPersistence) + { + if (HMDInfo.HmdType != HmdType_CrystalCoveProto && HMDInfo.HmdType != HmdType_DK2) + { + pLastError = "ovrHmdCap_LowPersistence not supported on this HMD."; + return false; + } + } + + + SensorCreated = false; + pSensor.Clear(); + if (pHMD) + { + // Zero AddSensorCount before creation, in case it fails (or succeeds but then + // immediately gets disconnected) followed by another Add notification. + AddSensorCount = 0; + pSensor = *pHMD->GetSensor(); + } + + if (!pSensor) + { + if (requiredCaps & ovrHmdCap_Orientation) + { + pLastError = "Failed to create sensor."; + return false; + } + // Succeed, waiting for sensor become available later. + LogText("StartSensor succeeded - waiting for sensor.\n"); + } + else + { + pSensor->SetReportRate(500); + SFusion.AttachToSensor(pSensor); + applyProfileToSensorFusion(); + + if (requiredCaps & ovrHmdCap_YawCorrection) + { + if (!SFusion.HasMagCalibration()) + { + pLastError = "ovrHmdCap_YawCorrection not available."; + SFusion.AttachToSensor(0); + SFusion.Reset(); + pSensor.Clear(); + return false; + } + } + + SFusion.SetYawCorrectionEnabled((supportedCaps & ovrHmdCap_YawCorrection) != 0); + LogText("Sensor created.\n"); + + if (supportedCaps & ovrHmdCap_LowPersistence) + { + updateLowPersistenceMode(true); + } + else + { + if (HMDInfo.HmdType == HmdType_CrystalCoveProto || HMDInfo.HmdType == HmdType_DK2) + { + // switch to full persistence + updateLowPersistenceMode(false); + } + } + + if (HMDInfo.HmdType == HmdType_DK2) + { + updateLatencyTestForHmd((supportedCaps & ovrHmdCap_LatencyTest) != 0); + } + +#ifdef OVR_CAPI_VISIONSUPPORT + if (supportedCaps & ovrHmdCap_Position) + { + pPoseTracker = new Vision::PoseTracker(SFusion); + if (pPoseTracker) + { + pPoseTracker->AssociateHMD(pSensor); + } + LogText("Sensor Pose tracker created.\n"); + } + // TBD: How do we verify that position tracking is actually available + // i.e. camera is plugged in? + +#endif // OVR_CAPI_VISIONSUPPORT + + SensorCreated = true; + } + + SensorCaps = supportedCaps; + SensorStarted = true; + + return true; +} + + +// Stops sensor sampling, shutting down internal resources. +void HMDState::StopSensor() +{ + Lock::Locker lockScope(&DevicesLock); + + if (SensorStarted) + { +#ifdef OVR_CAPI_VISIONSUPPORT + if (pPoseTracker) + { + // TBD: Internals not thread safe - must fix!! + delete pPoseTracker; + pPoseTracker = 0; + LogText("Sensor Pose tracker destroyed.\n"); + } +#endif // OVR_CAPI_VISION_CODE + + SFusion.AttachToSensor(0); + SFusion.Reset(); + pSensor.Clear(); + AddSensorCount = 0; + SensorCaps = 0; + SensorCreated = false; + SensorStarted = false; + + LogText("StopSensor succeeded.\n"); + } +} + +// Resets sensor orientation. +void HMDState::ResetSensor() +{ + SFusion.Reset(); +} + + +// Returns prediction for time. +ovrSensorState HMDState::PredictedSensorState(double absTime) +{ + SensorState ss; + + // We are trying to keep this path lockless unless we are notified of new device + // creation while not having a sensor yet. It's ok to check SensorCreated volatile + // flag here, since GetSensorStateAtTime() is internally lockless and safe. + + if (SensorCreated || checkCreateSensor()) + { + ss = SFusion.GetSensorStateAtTime(absTime); + + if (!(ss.StatusFlags & ovrStatus_OrientationTracked)) + { + Lock::Locker lockScope(&DevicesLock); + +#ifdef OVR_CAPI_VISIONSUPPORT + if (pPoseTracker) + { + // TBD: Internals not thread safe - must fix!! + delete pPoseTracker; + pPoseTracker = 0; + LogText("Sensor Pose tracker destroyed.\n"); + } +#endif // OVR_CAPI_VISION_CODE + // Not needed yet; SFusion.AttachToSensor(0); + // This seems to reset orientation anyway... + pSensor.Clear(); + SensorCreated = false; + } + } + else + { + // SensorState() defaults to 0s. + // ss.Pose.Orientation = Quatf(); + // .. + + // John: + // We still want valid times so frames will get a delta-time + // and allow operation with a joypad when the sensor isn't + // connected. + ss.Recorded.TimeInSeconds = absTime; + ss.Predicted.TimeInSeconds = absTime; + } + + ss.StatusFlags |= ovrStatus_HmdConnected; + return ss; +} + + +bool HMDState::checkCreateSensor() +{ + if (!(SensorStarted && !SensorCreated && AddSensorCount)) + return false; + + Lock::Locker lockScope(&DevicesLock); + + // Re-check condition once in the lock, in case the state changed. + if (SensorStarted && !SensorCreated && AddSensorCount) + { + if (pHMD) + { + AddSensorCount = 0; + pSensor = *pHMD->GetSensor(); + } + + if (pSensor) + { + pSensor->SetReportRate(500); + SFusion.AttachToSensor(pSensor); + SFusion.SetYawCorrectionEnabled((SensorCaps & ovrHmdCap_YawCorrection) != 0); + applyProfileToSensorFusion(); + +#ifdef OVR_CAPI_VISIONSUPPORT + if (SensorCaps & ovrHmdCap_Position) + { + pPoseTracker = new Vision::PoseTracker(SFusion); + if (pPoseTracker) + { + pPoseTracker->AssociateHMD(pSensor); + } + LogText("Sensor Pose tracker created.\n"); + } +#endif // OVR_CAPI_VISION_CODE + + LogText("Sensor created.\n"); + + SensorCreated = true; + return true; + } + } + + return SensorCreated; +} + +bool HMDState::GetSensorDesc(ovrSensorDesc* descOut) +{ + Lock::Locker lockScope(&DevicesLock); + + if (SensorCreated) + { + OVR_ASSERT(pSensor); + OVR::SensorInfo si; + pSensor->GetDeviceInfo(&si); + descOut->VendorId = si.VendorId; + descOut->ProductId = si.ProductId; + OVR_ASSERT(si.SerialNumber.GetSize() <= sizeof(descOut->SerialNumber)); + OVR_strcpy(descOut->SerialNumber, sizeof(descOut->SerialNumber), si.SerialNumber.ToCStr()); + return true; + } + return false; +} + + +void HMDState::applyProfileToSensorFusion() +{ + Profile* profile = pHMD ? pHMD->GetProfile() : 0; + SFusion.SetUserHeadDimensions ( profile, RenderState.RenderInfo ); +} + +void HMDState::updateLowPersistenceMode(bool lowPersistence) const +{ + OVR_ASSERT(pSensor); + DisplayReport dr; + pSensor->GetDisplayReport(&dr); + + dr.Persistence = (UInt16) (dr.TotalRows * (lowPersistence ? 0.18f : 1.0f)); + dr.Brightness = lowPersistence ? 255 : 0; + + pSensor->SetDisplayReport(dr); +} + +void HMDState::updateLatencyTestForHmd(bool latencyTesting) +{ + if (pSensor.GetPtr()) + { + DisplayReport dr; + pSensor->GetDisplayReport(&dr); + + dr.ReadPixel = latencyTesting; + + pSensor->SetDisplayReport(dr); + } + + if (latencyTesting) + { + LatencyUtil2.SetSensorDevice(pSensor.GetPtr()); + } + else + { + LatencyUtil2.SetSensorDevice(NULL); + } +} + +//------------------------------------------------------------------------------------- +// ***** Property Access + +// TBD: This all needs to be cleaned up and organized into namespaces. + +float HMDState::getFloatValue(const char* propertyName, float defaultVal) +{ + if (OVR_strcmp(propertyName, "LensSeparation") == 0) + { + return HMDInfo.LensSeparationInMeters; + } + else if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0) + { + return SFusion.GetCenterPupilDepth(); + } + else if (pHMD) + { + Profile* p = pHMD->GetProfile(); + if (p) + { + return p->GetFloatValue(propertyName, defaultVal); + } + } + return defaultVal; +} + +bool HMDState::setFloatValue(const char* propertyName, float value) +{ + if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0) + { + SFusion.SetCenterPupilDepth(value); + return true; + } + return false; +} + + +static unsigned CopyFloatArrayWithLimit(float dest[], unsigned destSize, + float source[], unsigned sourceSize) +{ + unsigned count = Alg::Min(destSize, sourceSize); + for (unsigned i = 0; i < count; i++) + dest[i] = source[i]; + return count; +} + + +unsigned HMDState::getFloatArray(const char* propertyName, float values[], unsigned arraySize) +{ + if (arraySize) + { + if (OVR_strcmp(propertyName, "ScreenSize") == 0) + { + float data[2] = { HMDInfo.ScreenSizeInMeters.w, HMDInfo.ScreenSizeInMeters.h }; + + return CopyFloatArrayWithLimit(values, arraySize, data, 2); + } + else if (OVR_strcmp(propertyName, "DistortionClearColor") == 0) + { + return CopyFloatArrayWithLimit(values, arraySize, RenderState.ClearColor, 4); + } + else if (OVR_strcmp(propertyName, "DK2Latency") == 0) + { + if (HMDInfo.HmdType != HmdType_DK2) + return 0; + + float data[3]; + TimeManager.GetLatencyTimings(data); + + return CopyFloatArrayWithLimit(values, arraySize, data, 3); + } + + /* + else if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0) + { + if (arraySize >= 1) + { + values[0] = SFusion.GetCenterPupilDepth(); + return 1; + } + return 0; + } */ + else if (pHMD) + { + Profile* p = pHMD->GetProfile(); + + // TBD: Not quite right. Should update profile interface, so that + // we can return 0 in all conditions if property doesn't exist. + if (p) + { + unsigned count = p->GetFloatValues(propertyName, values, arraySize); + return count; + } + } + } + + return 0; +} + +bool HMDState::setFloatArray(const char* propertyName, float values[], unsigned arraySize) +{ + if (!arraySize) + return false; + + if (OVR_strcmp(propertyName, "DistortionClearColor") == 0) + { + CopyFloatArrayWithLimit(RenderState.ClearColor, 4, values, arraySize); + return true; + } + return false; +} + + +const char* HMDState::getString(const char* propertyName, const char* defaultVal) +{ + if (pHMD) + { + // For now, just access the profile. + Profile* p = pHMD->GetProfile(); + + LastGetStringValue[0] = 0; + if (p && p->GetValue(propertyName, LastGetStringValue, sizeof(LastGetStringValue))) + { + return LastGetStringValue; + } + } + + return defaultVal; +} + +//------------------------------------------------------------------------------------- +// *** Latency Test + +bool HMDState::ProcessLatencyTest(unsigned char rgbColorOut[3]) +{ + bool result = false; + + // Check create. + if (pLatencyTester) + { + if (pLatencyTester->IsConnected()) + { + Color colorToDisplay; + + LatencyUtil.ProcessInputs(); + result = LatencyUtil.DisplayScreenColor(colorToDisplay); + rgbColorOut[0] = colorToDisplay.R; + rgbColorOut[1] = colorToDisplay.G; + rgbColorOut[2] = colorToDisplay.B; + } + else + { + // Disconnect. + LatencyUtil.SetDevice(NULL); + pLatencyTester = 0; + LogText("LATENCY SENSOR disconnected.\n"); + } + } + else if (AddLatencyTestCount > 0) + { + // This might have some unlikely race condition issue which could cause us to miss a device... + AddLatencyTestCount = 0; + + pLatencyTester = *GlobalState::pInstance->GetManager()-> + EnumerateDevices<LatencyTestDevice>().CreateDevice(); + if (pLatencyTester) + { + LatencyUtil.SetDevice(pLatencyTester); + LogText("LATENCY TESTER connected\n"); + } + } + + return result; +} + +void HMDState::ProcessLatencyTest2(unsigned char rgbColorOut[3], double startTime) +{ + // Check create. + if (!(SensorCaps & ovrHmdCap_LatencyTest)) + return; + + if (pLatencyTesterDisplay && !LatencyUtil2.HasDisplayDevice()) + { + if (!pLatencyTesterDisplay->IsConnected()) + { + LatencyUtil2.SetDisplayDevice(NULL); + } + } + else if (AddLatencyTestDisplayCount > 0) + { + // This might have some unlikely race condition issue + // which could cause us to miss a device... + AddLatencyTestDisplayCount = 0; + + pLatencyTesterDisplay = *GlobalState::pInstance->GetManager()-> + EnumerateDevices<LatencyTestDevice>().CreateDevice(); + if (pLatencyTesterDisplay) + { + LatencyUtil2.SetDisplayDevice(pLatencyTesterDisplay); + } + } + + if (LatencyUtil2.HasDevice() && pSensor && pSensor->IsConnected()) + { + LatencyUtil2.BeginTest(startTime); + + Color colorToDisplay; + LatencyTest2Active = LatencyUtil2.DisplayScreenColor(colorToDisplay); + rgbColorOut[0] = colorToDisplay.R; + rgbColorOut[1] = colorToDisplay.G; + rgbColorOut[2] = colorToDisplay.B; + } + else + { + LatencyTest2Active = false; + } +} + +//------------------------------------------------------------------------------------- +// *** Rendering + +bool HMDState::ConfigureRendering(ovrEyeRenderDesc eyeRenderDescOut[2], + const ovrEyeDesc eyeDescIn[2], + const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, + unsigned distortionCaps) +{ + ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_ConfigureRendering"); + + // null -> shut down. + if (!apiConfig) + { + if (pRenderer) + pRenderer.Clear(); + RenderingConfigured = false; + return true; + } + + if (pRenderer && + (apiConfig->Header.API != pRenderer->GetRenderAPI())) + { + // Shutdown old renderer. + if (pRenderer) + pRenderer.Clear(); + } + + + // Step 1: do basic setup configuration + RenderState.setupRenderDesc(eyeRenderDescOut, eyeDescIn); + RenderState.HMDCaps = hmdCaps; // Any cleaner way? + RenderState.DistortionCaps = distortionCaps; + + TimeManager.ResetFrameTiming(0, + (hmdCaps & ovrHmdCap_NoVSync) ? false : true, + (hmdCaps & ovrHmdCap_DynamicPrediction) ? true : false, + true); + + LastFrameTimeSeconds = 0.0f; + + // Set RenderingConfigured early to avoid ASSERTs in renderer initialization. + RenderingConfigured = true; + + if (!pRenderer) + { + pRenderer = *DistortionRenderer::APICreateRegistry + [apiConfig->Header.API](this, TimeManager, RenderState); + } + + if (!pRenderer || + !pRenderer->Initialize(apiConfig, hmdCaps, distortionCaps)) + { + RenderingConfigured = false; + return false; + } + + return true; +} + + + +ovrPosef HMDState::BeginEyeRender(ovrEyeType eye) +{ + // Debug checks. + checkBeginFrameScope("ovrHmd_BeginEyeRender"); + ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_BeginEyeRender"); + + // Unknown eyeId provided in ovrHmd_BeginEyeRender + OVR_ASSERT_LOG(eye == ovrEye_Left || eye == ovrEye_Right, + ("ovrHmd_BeginEyeRender eyeId out of range.")); + OVR_ASSERT_LOG(EyeRenderActive[eye] == false, + ("Multiple calls to ovrHmd_BeginEyeRender for the same eye.")); + + EyeRenderActive[eye] = true; + + // Only process latency tester for drawing the left eye (assumes left eye is drawn first) + if (pRenderer && eye == 0) + { + LatencyTestActive = ProcessLatencyTest(LatencyTestDrawColor); + } + + return ovrHmd_GetEyePose(this, eye); +} + + +void HMDState::EndEyeRender(ovrEyeType eye, ovrPosef renderPose, ovrTexture* eyeTexture) +{ + // Debug checks. + checkBeginFrameScope("ovrHmd_EndEyeRender"); + ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_EndEyeRender"); + + if (!EyeRenderActive[eye]) + { + OVR_ASSERT_LOG(false, + ("ovrHmd_EndEyeRender called without ovrHmd_BeginEyeRender.")); + return; + } + + RenderState.EyeRenderPoses[eye] = renderPose; + + if (pRenderer) + pRenderer->SubmitEye(eye, eyeTexture); + + EyeRenderActive[eye] = false; +} + +}} // namespace OVR::CAPI + diff --git a/LibOVR/Src/CAPI/CAPI_HMDState.h b/LibOVR/Src/CAPI/CAPI_HMDState.h new file mode 100644 index 0000000..d178042 --- /dev/null +++ b/LibOVR/Src/CAPI/CAPI_HMDState.h @@ -0,0 +1,334 @@ +/************************************************************************************ + +Filename : CAPI_HMDState.h +Content : State associated with a single HMD +Created : January 24, 2014 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_CAPI_HMDState_h +#define OVR_CAPI_HMDState_h + +#include "../Kernel/OVR_Math.h" +#include "../Kernel/OVR_List.h" +#include "../Kernel/OVR_Log.h" +#include "../OVR_CAPI.h" +#include "../OVR_SensorFusion.h" +#include "../Util/Util_LatencyTest.h" +#include "../Util/Util_LatencyTest2.h" + +#include "CAPI_FrameTimeManager.h" +#include "CAPI_HMDRenderState.h" +#include "CAPI_DistortionRenderer.h" + +// Define OVR_CAPI_VISIONSUPPORT to compile in vision support +#ifdef OVR_CAPI_VISIONSUPPORT + #define OVR_CAPI_VISION_CODE(c) c + #include "../Vision/Vision_PoseTracker.h" +#else + #define OVR_CAPI_VISION_CODE(c) +#endif + + +struct ovrHmdStruct { }; + +namespace OVR { namespace CAPI { + +using namespace OVR::Util::Render; + + +//------------------------------------------------------------------------------------- +// ***** ThreadChecker + +// This helper class is used to verify that the API is used according to supported +// thread safety constraints (is not re-entrant for this and related functions). +class ThreadChecker +{ +public: + +#ifndef OVR_BUILD_DEBUG + + // In release build, thread checks are disabled. + ThreadChecker() { } + void Begin(const char* functionName) { OVR_UNUSED1(functionName); } + void End() { } + + // Add thread-re-entrancy check for function scope + struct Scope + { + Scope(ThreadChecker*, const char *) { } + ~Scope() { } + }; + + +#else // OVR_BUILD_DEBUG + ThreadChecker() : pFunctionName(0), FirstThread(0) + { } + + void Begin(const char* functionName) + { + if (!pFunctionName) + { + pFunctionName = functionName; + FirstThread = GetCurrentThreadId(); + } + else + { + // pFunctionName may be not null here if function is called internally on the same thread. + OVR_ASSERT_LOG((FirstThread == GetCurrentThreadId()), + ("%s (threadId=%d) called at the same times as %s (threadId=%d)\n", + functionName, GetCurrentThreadId(), pFunctionName, FirstThread) ); + } + } + void End() + { + pFunctionName = 0; + FirstThread = 0; + } + + // Add thread-re-entrancy check for function scope. + struct Scope + { + Scope(ThreadChecker* threadChecker, const char *functionName) : pChecker(threadChecker) + { pChecker->Begin(functionName); } + ~Scope() + { pChecker->End(); } + private: + ThreadChecker* pChecker; + }; + +private: + // If not 0, contains the name of the function that first entered the scope. + const char * pFunctionName; + ThreadId FirstThread; + +#endif // OVR_BUILD_DEBUG +}; + + +//------------------------------------------------------------------------------------- +// ***** HMDState + +// Describes a single HMD. +class HMDState : public ListNode<HMDState>, + public ovrHmdStruct, public NewOverrideBase +{ +public: + + HMDState(HMDDevice* device); + HMDState(ovrHmdType hmdType); + virtual ~HMDState(); + + + // *** Sensor Setup + + bool StartSensor(unsigned supportedCaps, unsigned requiredCaps); + void StopSensor(); + void ResetSensor(); + ovrSensorState PredictedSensorState(double absTime); + bool GetSensorDesc(ovrSensorDesc* descOut); + + bool ProcessLatencyTest(unsigned char rgbColorOut[3]); + void ProcessLatencyTest2(unsigned char rgbColorOut[3], double startTime); + + + // *** Rendering Setup + + bool ConfigureRendering(ovrEyeRenderDesc eyeRenderDescOut[2], + const ovrEyeDesc eyeDescIn[2], + const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, + unsigned distortionCaps); + + ovrPosef BeginEyeRender(ovrEyeType eye); + void EndEyeRender(ovrEyeType eye, ovrPosef renderPose, ovrTexture* eyeTexture); + + + const char* GetLastError() + { + const char* p = pLastError; + pLastError = 0; + return p; + } + + void NotifyAddDevice(DeviceType deviceType) + { + if (deviceType == Device_Sensor) + AddSensorCount++; + else if (deviceType == Device_LatencyTester) + { + AddLatencyTestCount++; + AddLatencyTestDisplayCount++; + } + } + + bool checkCreateSensor(); + + void applyProfileToSensorFusion(); + + // INlines so that they can be easily compiled out. + // Does debug ASSERT checks for functions that require BeginFrame. + // Also verifies that we are on the right thread. + void checkBeginFrameScope(const char* functionName) + { + OVR_UNUSED1(functionName); // for Release build. + OVR_ASSERT_LOG(BeginFrameCalled == true, + ("%s called outside ovrHmd_BeginFrame.")); + OVR_ASSERT_LOG(BeginFrameThreadId == OVR::GetCurrentThreadId(), + ("%s called on a different thread then ovrHmd_BeginFrame.")); + } + + void checkRenderingConfigured(const char* functionName) + { + OVR_UNUSED1(functionName); // for Release build. + OVR_ASSERT_LOG(RenderingConfigured == true, + ("%s called without ovrHmd_ConfigureRendering.")); + } + + void checkBeginFrameTimingScope(const char* functionName) + { + OVR_UNUSED1(functionName); // for Release build. + OVR_ASSERT_LOG(BeginFrameTimingCalled == true, + ("%s called outside ovrHmd_BeginFrameTiming.")); + } + + + HMDState* getThis() { return this; } + + void updateLowPersistenceMode(bool lowPersistence) const; + void updateLatencyTestForHmd(bool latencyTesting); + + // Get properties by name. + float getFloatValue(const char* propertyName, float defaultVal); + bool setFloatValue(const char* propertyName, float value); + unsigned getFloatArray(const char* propertyName, float values[], unsigned arraySize); + bool setFloatArray(const char* propertyName, float values[], unsigned arraySize); + const char* getString(const char* propertyName, const char* defaultVal); +public: + + // Wrapper to support 'const' + struct HMDInfoWrapper + { + HMDInfoWrapper(ovrHmdType hmdType) + { + HmdTypeEnum t = HmdType_None; + if (hmdType == ovrHmd_DK1) + t = HmdType_DK1; + else if (hmdType == ovrHmd_CrystalCoveProto) + t = HmdType_CrystalCoveProto; + else if (hmdType == ovrHmd_DK2) + t = HmdType_DK2; + h = CreateDebugHMDInfo(t); + } + HMDInfoWrapper(HMDDevice* device) { if (device) device->GetDeviceInfo(&h); } + OVR::HMDInfo h; + }; + + // Note: pHMD can be null if we are representing a virtualized debug HMD. + Ptr<HMDDevice> pHMD; + + // HMDInfo shouldn't change, as its string pointers are passed out. + const HMDInfoWrapper HMDInfoW; + const OVR::HMDInfo& HMDInfo; + + const char* pLastError; + + + // *** Sensor + + // Lock used to support thread-safe lifetime access to sensor. + Lock DevicesLock; + + // Atomic integer used as a flag that we should check the sensor device. + AtomicInt<int> AddSensorCount; + + // All of Sensor variables may be modified/used with DevicesLock, with exception that + // the {SensorStarted, SensorCreated} can be read outside the lock to see + // if device creation check is necessary. + // Whether we called StartSensor() and requested sensor caps. + volatile bool SensorStarted; + volatile bool SensorCreated; + // pSensor may still be null or non-running after start if it wasn't yet available + Ptr<SensorDevice> pSensor; // Head + unsigned SensorCaps; + + // SensorFusion state may be accessible without a lock. + SensorFusion SFusion; + + + // Vision pose tracker is currently new-allocated + OVR_CAPI_VISION_CODE( + Vision::PoseTracker* pPoseTracker; + ) + + // Latency tester + Ptr<LatencyTestDevice> pLatencyTester; + Util::LatencyTest LatencyUtil; + AtomicInt<int> AddLatencyTestCount; + + bool LatencyTestActive; + unsigned char LatencyTestDrawColor[3]; + + // Using latency tester as debug display + Ptr<LatencyTestDevice> pLatencyTesterDisplay; + AtomicInt<int> AddLatencyTestDisplayCount; + Util::LatencyTest2 LatencyUtil2; + + bool LatencyTest2Active; + unsigned char LatencyTest2DrawColor[3]; + //bool ReadbackColor; + + // Rendering part + FrameTimeManager TimeManager; + HMDRenderState RenderState; + Ptr<DistortionRenderer> pRenderer; + + // Last timing value reported by BeginFrame. + double LastFrameTimeSeconds; + // Last timing value reported by GetFrameTime. These are separate since the intended + // use is from different threads. TBD: Move to FrameTimeManager? Make atomic? + double LastGetFrameTimeSeconds; + + // Last cached value returned by ovrHmd_GetString/ovrHmd_GetStringArray. + char LastGetStringValue[256]; + + + // Debug flag set after ovrHmd_ConfigureRendering succeeds. + bool RenderingConfigured; + // Set after BeginFrame succeeds, and its corresponding thread id for debug checks. + bool BeginFrameCalled; + ThreadId BeginFrameThreadId; + // Graphics functions are not re-entrant from other threads. + ThreadChecker RenderAPIThreadChecker; + // + bool BeginFrameTimingCalled; + + // Flags set when we've called BeginEyeRender on a given eye. + bool EyeRenderActive[2]; +}; + + +}} // namespace OVR::CAPI + + +#endif // OVR_CAPI_HMDState_h + + diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.cpp new file mode 100644 index 0000000..d1ea4dc --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.cpp @@ -0,0 +1,29 @@ +/************************************************************************************ + +Filename : CAPI_D3D10_DistortionRenderer.cpp +Content : Distortion renderer instantiation for D3D10 +Created : November 11, 2013 +Authors : Volga Aksoy, Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#define OVR_D3D_VERSION 10 +#include "CAPI_D3D1X_Util.cpp" +#include "CAPI_D3D1X_DistortionRenderer.cpp" diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.h b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.h new file mode 100644 index 0000000..bb56cb4 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D10_DistortionRenderer.h @@ -0,0 +1,34 @@ +/************************************************************************************ + +Filename : CAPI_D3D10_DistortionRenderer.h +Content : Distortion renderer header for D3D10 +Created : November 11, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef INC_CAPI_D3D10_DistortionRenderer_h +#define INC_CAPI_D3D10_DistortionRenderer_h + +#define OVR_D3D_VERSION 10 +#include "CAPI_D3D1X_DistortionRenderer.h" +#undef OVR_D3D_VERSION + +#endif
\ No newline at end of file diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.cpp new file mode 100644 index 0000000..1184df8 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.cpp @@ -0,0 +1,30 @@ +/************************************************************************************ + +Filename : CAPI_D3D11_DistortionRenderer.cpp +Content : Distortion renderer instantiation for D3D11 +Created : November 11, 2013 +Authors : Volga Aksoy, Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#define OVR_D3D_VERSION 11 +#include "CAPI_D3D1X_Util.cpp" +#include "CAPI_D3D1X_DistortionRenderer.cpp" + diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.h b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.h new file mode 100644 index 0000000..8b9863b --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D11_DistortionRenderer.h @@ -0,0 +1,34 @@ +/************************************************************************************ + +Filename : CAPI_D3D11_DistortionRenderer.h +Content : Distortion renderer header for D3D11 +Created : November 11, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef INC_CAPI_D3D11_DistortionRenderer_h +#define INC_CAPI_D3D11_DistortionRenderer_h + +#define OVR_D3D_VERSION 11 +#include "CAPI_D3D1X_DistortionRenderer.h" +#undef OVR_D3D_VERSION + +#endif
\ No newline at end of file diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.cpp new file mode 100644 index 0000000..53f8948 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.cpp @@ -0,0 +1,773 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_DistortionRenderer.cpp +Content : Experimental distortion renderer +Created : November 11, 2013 +Authors : Volga Aksoy, Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_D3D1X_DistortionRenderer.h" + +#include "../../OVR_CAPI_D3D.h" + +namespace OVR { namespace CAPI { namespace D3D_NS { + +#include "../Shaders/Distortion_vs.h" +#include "../Shaders/Distortion_vs_refl.h" +#include "../Shaders/Distortion_ps.h" +#include "../Shaders/Distortion_ps_refl.h" +#include "../Shaders/DistortionChroma_vs.h" +#include "../Shaders/DistortionChroma_vs_refl.h" +#include "../Shaders/DistortionChroma_ps.h" +#include "../Shaders/DistortionChroma_ps_refl.h" +#include "../Shaders/DistortionTimewarp_vs.h" +#include "../Shaders/DistortionTimewarp_vs_refl.h" +#include "../Shaders/DistortionTimewarpChroma_vs.h" +#include "../Shaders/DistortionTimewarpChroma_vs_refl.h" + +#include "../Shaders/SimpleQuad_vs.h" +#include "../Shaders/SimpleQuad_vs_refl.h" +#include "../Shaders/SimpleQuad_ps.h" +#include "../Shaders/SimpleQuad_ps_refl.h" + +// Distortion pixel shader lookup. +// Bit 0: Chroma Correction +// Bit 1: Timewarp + +enum { + DistortionVertexShaderBitMask = 3, + DistortionVertexShaderCount = DistortionVertexShaderBitMask + 1, + DistortionPixelShaderBitMask = 1, + DistortionPixelShaderCount = DistortionPixelShaderBitMask + 1 +}; + +struct PrecompiledShader +{ + const unsigned char* ShaderData; + size_t ShaderSize; + const ShaderBase::Uniform* ReflectionData; + size_t ReflectionSize; +}; + +// Do add a new distortion shader use these macros (with or w/o reflection) +#define PCS_NOREFL(shader) { shader, sizeof(shader), NULL, 0 } +#define PCS_REFL__(shader) { shader, sizeof(shader), shader ## _refl, sizeof( shader ## _refl )/sizeof(*(shader ## _refl)) } + + +static PrecompiledShader DistortionVertexShaderLookup[DistortionVertexShaderCount] = +{ + PCS_REFL__(Distortion_vs), + PCS_REFL__(DistortionChroma_vs), + PCS_REFL__(DistortionTimewarp_vs), + PCS_REFL__(DistortionTimewarpChroma_vs), +}; + +static PrecompiledShader DistortionPixelShaderLookup[DistortionPixelShaderCount] = +{ + PCS_NOREFL(Distortion_ps), + PCS_NOREFL(DistortionChroma_ps) +}; + +void DistortionShaderBitIndexCheck() +{ + OVR_COMPILER_ASSERT(ovrDistortion_Chromatic == 1); + OVR_COMPILER_ASSERT(ovrDistortion_TimeWarp == 2); +} + + + +struct DistortionVertex +{ + Vector2f Pos; + Vector2f TexR; + Vector2f TexG; + Vector2f TexB; + Color Col; +}; + + +// Vertex type; same format is used for all shapes for simplicity. +// Shapes are built by adding vertices to Model. +struct Vertex +{ + Vector3f Pos; + Color C; + float U, V; + Vector3f Norm; + + Vertex (const Vector3f& p, const Color& c = Color(64,0,0,255), + float u = 0, float v = 0, Vector3f n = Vector3f(1,0,0)) + : Pos(p), C(c), U(u), V(v), Norm(n) + {} + Vertex(float x, float y, float z, const Color& c = Color(64,0,0,255), + float u = 0, float v = 0) : Pos(x,y,z), C(c), U(u), V(v) + { } + + bool operator==(const Vertex& b) const + { + return Pos == b.Pos && C == b.C && U == b.U && V == b.V; + } +}; + + +//---------------------------------------------------------------------------- +// ***** D3D1X::DistortionRenderer + +DistortionRenderer::DistortionRenderer(ovrHmd hmd, FrameTimeManager& timeManager, + const HMDRenderState& renderState) + : CAPI::DistortionRenderer(ovrRenderAPI_D3D11, hmd, timeManager, renderState) +{ +} + +DistortionRenderer::~DistortionRenderer() +{ + destroy(); +} + +// static +CAPI::DistortionRenderer* DistortionRenderer::Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState) +{ + return new DistortionRenderer(hmd, timeManager, renderState); +} + + +bool DistortionRenderer::Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps) +{ + // TBD: Decide if hmdCaps are needed here or are a part of RenderState + OVR_UNUSED(hmdCaps); + + const ovrD3D1X(Config)* config = (const ovrD3D1X(Config)*)apiConfig; + + if (!config) + { + // Cleanup + pEyeTextures[0].Clear(); + pEyeTextures[1].Clear(); + memset(&RParams, 0, sizeof(RParams)); + return true; + } + + if (!config->D3D_NS.pDevice || !config->D3D_NS.pBackBufferRT) + return false; + + RParams.pDevice = config->D3D_NS.pDevice; + RParams.pContext = D3DSELECT_10_11(config->D3D_NS.pDevice, config->D3D_NS.pDeviceContext); + RParams.pBackBufferRT = config->D3D_NS.pBackBufferRT; + RParams.pSwapChain = config->D3D_NS.pSwapChain; + RParams.RTSize = config->D3D_NS.Header.RTSize; + RParams.Multisample = config->D3D_NS.Header.Multisample; + + DistortionCaps = distortionCaps; + + //DistortionWarper.SetVsync((hmdCaps & ovrHmdCap_NoVSync) ? false : true); + + pEyeTextures[0] = *new Texture(&RParams, Texture_RGBA, Sizei(0), + getSamplerState(Sample_Linear|Sample_ClampBorder)); + pEyeTextures[1] = *new Texture(&RParams, Texture_RGBA, Sizei(0), + getSamplerState(Sample_Linear|Sample_ClampBorder)); + + initBuffersAndShaders(); + + // Rasterizer state + D3D1X_(RASTERIZER_DESC) rs; + memset(&rs, 0, sizeof(rs)); + rs.AntialiasedLineEnable = true; + rs.CullMode = D3D1X_(CULL_BACK); + rs.DepthClipEnable = true; + rs.FillMode = D3D1X_(FILL_SOLID); + RParams.pDevice->CreateRasterizerState(&rs, &Rasterizer.GetRawRef()); + + // TBD: Blend state.. not used? + // We'll want to turn off blending + +#if (OVR_D3D_VERSION == 11) + GpuProfiler.Init(RParams.pDevice, RParams.pContext); +#endif + + return true; +} + + +void DistortionRenderer::SubmitEye(int eyeId, ovrTexture* eyeTexture) +{ + const ovrD3D1X(Texture)* tex = (const ovrD3D1X(Texture)*)eyeTexture; + + if (eyeTexture) + { + // Use tex->D3D_NS.Header.RenderViewport to update UVs for rendering in case they changed. + // TBD: This may be optimized through some caching. + ovrEyeDesc ed = RState.EyeRenderDesc[eyeId].Desc; + ed.TextureSize = tex->D3D_NS.Header.TextureSize; + ed.RenderViewport = tex->D3D_NS.Header.RenderViewport; + + ovrHmd_GetRenderScaleAndOffset(HMD, ed, DistortionCaps, UVScaleOffset[eyeId]); + + pEyeTextures[eyeId]->UpdatePlaceholderTexture(tex->D3D_NS.pTexture, tex->D3D_NS.pSRView, + tex->D3D_NS.Header.TextureSize); + } +} + +void DistortionRenderer::EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, + unsigned char* latencyTester2DrawColor) +{ + +#if 0 + + // MA: This causes orientation and positional stutter!! NOT USABLE. + if (!TimeManager.NeedDistortionTimeMeasurement() && + (RState.DistortionCaps & ovrDistortion_TimeWarp)) + { + // Wait for timewarp distortion if it is time + FlushGpuAndWaitTillTime(TimeManager.GetFrameTiming().TimewarpPointTime); + } + + // Always measure distortion time so that TimeManager can better + // estimate latency-reducing time-warp wait timing. + { + GpuProfiler.BeginQuery(); + + renderDistortion(pEyeTextures[0], pEyeTextures[1]); + + GpuProfiler.EndQuery(); + TimeManager.AddDistortionTimeMeasurement(GpuProfiler.GetTiming(false)); + } +#else + + if (!TimeManager.NeedDistortionTimeMeasurement()) + { + if (RState.DistortionCaps & ovrDistortion_TimeWarp) + { + // Wait for timewarp distortion if it is time and Gpu idle + FlushGpuAndWaitTillTime(TimeManager.GetFrameTiming().TimewarpPointTime); + } + + renderDistortion(pEyeTextures[0], pEyeTextures[1]); + } + else + { + // If needed, measure distortion time so that TimeManager can better estimate + // latency-reducing time-warp wait timing. + WaitUntilGpuIdle(); + double distortionStartTime = ovr_GetTimeInSeconds(); + + renderDistortion(pEyeTextures[0], pEyeTextures[1]); + + WaitUntilGpuIdle(); + TimeManager.AddDistortionTimeMeasurement(ovr_GetTimeInSeconds() - distortionStartTime); + } +#endif + + if(latencyTesterDrawColor) + { + renderLatencyQuad(latencyTesterDrawColor); + } + else if(latencyTester2DrawColor) + { + renderLatencyPixel(latencyTester2DrawColor); + } + + if (swapBuffers) + { + if (RParams.pSwapChain) + { + UINT swapInterval = (RState.HMDCaps & ovrHmdCap_NoVSync) ? 0 : 1; + RParams.pSwapChain->Present(swapInterval, 0); + + // Force GPU to flush the scene, resulting in the lowest possible latency. + // It's critical that this flush is *after* present. + WaitUntilGpuIdle(); + } + else + { + // TBD: Generate error - swapbuffer option used with null swapchain. + } + } +} + + +void DistortionRenderer::WaitUntilGpuIdle() +{ + // Flush and Stall CPU while waiting for GPU to complete rendering all of the queued draw calls + D3D1x_QUERY_DESC queryDesc = { D3D1X_(QUERY_EVENT), 0 }; + Ptr<ID3D1xQuery> query; + BOOL done = FALSE; + + if (RParams.pDevice->CreateQuery(&queryDesc, &query.GetRawRef()) == S_OK) + { + D3DSELECT_10_11(query->End(), + RParams.pContext->End(query)); + + // GetData will returns S_OK for both done == TRUE or FALSE. + // Exit on failure to avoid infinite loop. + do { } + while(!done && + !FAILED(D3DSELECT_10_11(query->GetData(&done, sizeof(BOOL), 0), + RParams.pContext->GetData(query, &done, sizeof(BOOL), 0))) + ); + } +} + +double DistortionRenderer::FlushGpuAndWaitTillTime(double absTime) +{ + double initialTime = ovr_GetTimeInSeconds(); + if (initialTime >= absTime) + return 0.0; + + // Flush and Stall CPU while waiting for GPU to complete rendering all of the queued draw calls + D3D1x_QUERY_DESC queryDesc = { D3D1X_(QUERY_EVENT), 0 }; + Ptr<ID3D1xQuery> query; + BOOL done = FALSE; + bool callGetData = false; + + if (RParams.pDevice->CreateQuery(&queryDesc, &query.GetRawRef()) == S_OK) + { + D3DSELECT_10_11(query->End(), + RParams.pContext->End(query)); + callGetData = true; + } + + double newTime = initialTime; + volatile int i; + + while (newTime < absTime) + { + if (callGetData) + { + // GetData will returns S_OK for both done == TRUE or FALSE. + // Stop calling GetData on failure. + callGetData = !FAILED(D3DSELECT_10_11(query->GetData(&done, sizeof(BOOL), 0), + RParams.pContext->GetData(query, &done, sizeof(BOOL), 0))) && !done; + } + else + { + for (int j = 0; j < 50; j++) + i = 0; + } + newTime = ovr_GetTimeInSeconds(); + } + + // How long we waited + return newTime - initialTime; +} + +void DistortionRenderer::initBuffersAndShaders() +{ + for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) + { + // Allocate & generate distortion mesh vertices. + ovrDistortionMesh meshData; + +// double startT = ovr_GetTimeInSeconds(); + + if (!ovrHmd_CreateDistortionMesh( HMD, RState.EyeRenderDesc[eyeNum].Desc, + RState.DistortionCaps, + UVScaleOffset[eyeNum], &meshData) ) + { + OVR_ASSERT(false); + continue; + } + +// double deltaT = ovr_GetTimeInSeconds() - startT; +// LogText("GenerateDistortion time = %f\n", deltaT); + + // Now parse the vertex data and create a render ready vertex buffer from it + DistortionVertex * pVBVerts = (DistortionVertex*)OVR_ALLOC ( sizeof(DistortionVertex) * meshData.VertexCount ); + DistortionVertex * pCurVBVert = pVBVerts; + ovrDistortionVertex* pCurOvrVert = meshData.pVertexData; + + for ( unsigned vertNum = 0; vertNum < meshData.VertexCount; vertNum++ ) + { + pCurVBVert->Pos.x = pCurOvrVert->Pos.x; + pCurVBVert->Pos.y = pCurOvrVert->Pos.y; + pCurVBVert->TexR = (*(Vector2f*)&pCurOvrVert->TexR); + pCurVBVert->TexG = (*(Vector2f*)&pCurOvrVert->TexG); + pCurVBVert->TexB = (*(Vector2f*)&pCurOvrVert->TexB); + // Convert [0.0f,1.0f] to [0,255] + pCurVBVert->Col.R = (OVR::UByte)( pCurOvrVert->VignetteFactor * 255.99f ); + pCurVBVert->Col.G = pCurVBVert->Col.R; + pCurVBVert->Col.B = pCurVBVert->Col.R; + pCurVBVert->Col.A = (OVR::UByte)( pCurOvrVert->TimeWarpFactor * 255.99f );; + pCurOvrVert++; + pCurVBVert++; + } + + DistortionMeshVBs[eyeNum] = *new Buffer(&RParams); + DistortionMeshVBs[eyeNum]->Data ( Buffer_Vertex, pVBVerts, sizeof(DistortionVertex) * meshData.VertexCount ); + DistortionMeshIBs[eyeNum] = *new Buffer(&RParams); + DistortionMeshIBs[eyeNum]->Data ( Buffer_Index, meshData.pIndexData, ( sizeof(INT16) * meshData.IndexCount ) ); + + OVR_FREE ( pVBVerts ); + ovrHmd_DestroyDistortionMesh( &meshData ); + } + + // Uniform buffers + for(int i = 0; i < Shader_Count; i++) + { + UniformBuffers[i] = *new Buffer(&RParams); + //MaxTextureSet[i] = 0; + } + + initShaders(); +} + +void DistortionRenderer::renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture) +{ + RParams.pContext->RSSetState(Rasterizer); + + RParams.pContext->OMSetRenderTargets(1, &RParams.pBackBufferRT, 0); + + setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h)); + + // Not affected by viewport. + RParams.pContext->ClearRenderTargetView(RParams.pBackBufferRT, RState.ClearColor); + + for(int eyeNum = 0; eyeNum < 2; eyeNum++) + { + ShaderFill distortionShaderFill(DistortionShader); + distortionShaderFill.SetTexture(0, eyeNum == 0 ? leftEyeTexture : rightEyeTexture); + distortionShaderFill.SetInputLayout(DistortionVertexIL); + + DistortionShader->SetUniform2f("EyeToSourceUVScale", UVScaleOffset[eyeNum][0].x, UVScaleOffset[eyeNum][0].y); + DistortionShader->SetUniform2f("EyeToSourceUVOffset", UVScaleOffset[eyeNum][1].x, UVScaleOffset[eyeNum][1].y); + + if (DistortionCaps & ovrDistortion_TimeWarp) + { + ovrMatrix4f timeWarpMatrices[2]; + ovrHmd_GetEyeTimewarpMatrices(HMD, (ovrEyeType)eyeNum, + RState.EyeRenderPoses[eyeNum], timeWarpMatrices); + + // Feed identity like matrices in until we get proper timewarp calculation going on + DistortionShader->SetUniform4x4f("EyeRotationStart", Matrix4f(timeWarpMatrices[0])); + DistortionShader->SetUniform4x4f("EyeRotationEnd", Matrix4f(timeWarpMatrices[1])); + + renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum], + NULL, 0, (int)DistortionMeshVBs[eyeNum]->GetSize(), Prim_Triangles); + } + else + { + renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum], + NULL, 0, (int)DistortionMeshVBs[eyeNum]->GetSize(), Prim_Triangles); + } + } +} + +void DistortionRenderer::createDrawQuad() +{ + const int numQuadVerts = 4; + LatencyTesterQuadVB = *new Buffer(&RParams); + if(!LatencyTesterQuadVB) + { + return; + } + + LatencyTesterQuadVB->Data(Buffer_Vertex, NULL, numQuadVerts * sizeof(Vertex)); + Vertex* vertices = (Vertex*)LatencyTesterQuadVB->Map(0, numQuadVerts * sizeof(Vertex), Map_Discard); + if(!vertices) + { + OVR_ASSERT(false); // failed to lock vertex buffer + return; + } + + const float left = -1.0f; + const float top = -1.0f; + const float right = 1.0f; + const float bottom = 1.0f; + + vertices[0] = Vertex(Vector3f(left, top, 0.0f), Color(255, 255, 255, 255)); + vertices[1] = Vertex(Vector3f(left, bottom, 0.0f), Color(255, 255, 255, 255)); + vertices[2] = Vertex(Vector3f(right, top, 0.0f), Color(255, 255, 255, 255)); + vertices[3] = Vertex(Vector3f(right, bottom, 0.0f), Color(255, 255, 255, 255)); + + LatencyTesterQuadVB->Unmap(vertices); +} + +void DistortionRenderer::renderLatencyQuad(unsigned char* latencyTesterDrawColor) +{ + const int numQuadVerts = 4; + + if(!LatencyTesterQuadVB) + { + createDrawQuad(); + } + + ShaderFill quadFill(SimpleQuadShader); + quadFill.SetInputLayout(SimpleQuadVertexIL); + + setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h)); + + SimpleQuadShader->SetUniform2f("Scale", 0.2f, 0.2f); + SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterDrawColor[0] / 255.99f, + (float)latencyTesterDrawColor[0] / 255.99f, + (float)latencyTesterDrawColor[0] / 255.99f, + 1.0f); + + for(int eyeNum = 0; eyeNum < 2; eyeNum++) + { + SimpleQuadShader->SetUniform2f("PositionOffset", eyeNum == 0 ? -0.4f : 0.4f, 0.0f); + renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, NULL, 0, numQuadVerts, Prim_TriangleStrip); + } +} + +void DistortionRenderer::renderLatencyPixel(unsigned char* latencyTesterPixelColor) +{ + const int numQuadVerts = 4; + + if(!LatencyTesterQuadVB) + { + createDrawQuad(); + } + + ShaderFill quadFill(SimpleQuadShader); + quadFill.SetInputLayout(SimpleQuadVertexIL); + + setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h)); + + SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterPixelColor[0] / 255.99f, + (float)latencyTesterPixelColor[0] / 255.99f, + (float)latencyTesterPixelColor[0] / 255.99f, + 1.0f); + + Vector2f scale(2.0f / RParams.RTSize.w, 2.0f / RParams.RTSize.h); + SimpleQuadShader->SetUniform2f("Scale", scale.x, scale.y); + SimpleQuadShader->SetUniform2f("PositionOffset", 1.0f, 1.0f); + renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, NULL, 0, numQuadVerts, Prim_TriangleStrip); +} + +void DistortionRenderer::renderPrimitives( + const ShaderFill* fill, + Buffer* vertices, Buffer* indices, + Matrix4f* viewMatrix, int offset, int count, + PrimitiveType rprim) +{ + OVR_ASSERT(fill->GetInputLayout() != 0); + RParams.pContext->IASetInputLayout((ID3D1xInputLayout*)fill->GetInputLayout()); + + if (indices) + { + RParams.pContext->IASetIndexBuffer(indices->GetBuffer(), DXGI_FORMAT_R16_UINT, 0); + } + + ID3D1xBuffer* vertexBuffer = vertices->GetBuffer(); + UINT vertexStride = sizeof(Vertex); + UINT vertexOffset = offset; + RParams.pContext->IASetVertexBuffers(0, 1, &vertexBuffer, &vertexStride, &vertexOffset); + + ShaderSet* shaders = ((ShaderFill*)fill)->GetShaders(); + + ShaderBase* vshader = ((ShaderBase*)shaders->GetShader(Shader_Vertex)); + unsigned char* vertexData = vshader->UniformData; + if (vertexData) + { + // TODO: some VSes don't start with StandardUniformData! + if ( viewMatrix ) + { + StandardUniformData* stdUniforms = (StandardUniformData*) vertexData; + stdUniforms->View = viewMatrix->Transposed(); + stdUniforms->Proj = StdUniforms.Proj; + } + UniformBuffers[Shader_Vertex]->Data(Buffer_Uniform, vertexData, vshader->UniformsSize); + vshader->SetUniformBuffer(UniformBuffers[Shader_Vertex]); + } + + for(int i = Shader_Vertex + 1; i < Shader_Count; i++) + { + if (shaders->GetShader(i)) + { + ((ShaderBase*)shaders->GetShader(i))->UpdateBuffer(UniformBuffers[i]); + ((ShaderBase*)shaders->GetShader(i))->SetUniformBuffer(UniformBuffers[i]); + } + } + + D3D1X_(PRIMITIVE_TOPOLOGY) prim; + switch(rprim) + { + case Prim_Triangles: + prim = D3D1X_(PRIMITIVE_TOPOLOGY_TRIANGLELIST); + break; + case Prim_Lines: + prim = D3D1X_(PRIMITIVE_TOPOLOGY_LINELIST); + break; + case Prim_TriangleStrip: + prim = D3D1X_(PRIMITIVE_TOPOLOGY_TRIANGLESTRIP); + break; + default: + OVR_ASSERT(0); + return; + } + RParams.pContext->IASetPrimitiveTopology(prim); + + fill->Set(rprim); + + if (indices) + { + RParams.pContext->DrawIndexed(count, 0, 0); + } + else + { + RParams.pContext->Draw(count, 0); + } +} + +void DistortionRenderer::setViewport(const Recti& vp) +{ + D3D1x_VIEWPORT d3dvp; + + d3dvp.Width = D3DSELECT_10_11(vp.w, (float)vp.w); + d3dvp.Height = D3DSELECT_10_11(vp.h, (float)vp.h); + d3dvp.TopLeftX = D3DSELECT_10_11(vp.x, (float)vp.x); + d3dvp.TopLeftY = D3DSELECT_10_11(vp.y, (float)vp.y); + d3dvp.MinDepth = 0; + d3dvp.MaxDepth = 1; + RParams.pContext->RSSetViewports(1, &d3dvp); +} + + + + +static D3D1X_(INPUT_ELEMENT_DESC) DistortionMeshVertexDesc[] = +{ + {"Position", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, + {"TexCoord", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 8, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, + {"TexCoord", 1, DXGI_FORMAT_R32G32_FLOAT, 0, 16, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, + {"TexCoord", 2, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, + {"Color", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, 32, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, +}; + +static D3D1X_(INPUT_ELEMENT_DESC) SimpleQuadMeshVertexDesc[] = +{ + {"Position", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D1X_(INPUT_PER_VERTEX_DATA), 0}, +}; + +// TODO: this is D3D specific +void DistortionRenderer::initShaders() +{ + { + PrecompiledShader vsShaderByteCode = DistortionVertexShaderLookup[DistortionVertexShaderBitMask & DistortionCaps]; + Ptr<D3D_NS::VertexShader> vtxShader = *new D3D_NS::VertexShader( + &RParams, + (void*)vsShaderByteCode.ShaderData, vsShaderByteCode.ShaderSize, + vsShaderByteCode.ReflectionData, vsShaderByteCode.ReflectionSize); + + ID3D1xInputLayout** objRef = &DistortionVertexIL.GetRawRef(); + + HRESULT validate = RParams.pDevice->CreateInputLayout( + DistortionMeshVertexDesc, sizeof(DistortionMeshVertexDesc) / sizeof(DistortionMeshVertexDesc[0]), + vsShaderByteCode.ShaderData, vsShaderByteCode.ShaderSize, objRef); + OVR_UNUSED(validate); + + DistortionShader = *new ShaderSet; + DistortionShader->SetShader(vtxShader); + + PrecompiledShader psShaderByteCode = DistortionPixelShaderLookup[DistortionPixelShaderBitMask & DistortionCaps]; + + Ptr<D3D_NS::PixelShader> ps = *new D3D_NS::PixelShader( + &RParams, + (void*)psShaderByteCode.ShaderData, psShaderByteCode.ShaderSize, + psShaderByteCode.ReflectionData, psShaderByteCode.ReflectionSize); + + DistortionShader->SetShader(ps); + } + + { + Ptr<D3D_NS::VertexShader> vtxShader = *new D3D_NS::VertexShader( + &RParams, + (void*)SimpleQuad_vs, sizeof(SimpleQuad_vs), + SimpleQuad_vs_refl, sizeof(SimpleQuad_vs_refl) / sizeof(SimpleQuad_vs_refl[0])); + //NULL, 0); + + ID3D1xInputLayout** objRef = &SimpleQuadVertexIL.GetRawRef(); + + HRESULT validate = RParams.pDevice->CreateInputLayout( + SimpleQuadMeshVertexDesc, sizeof(SimpleQuadMeshVertexDesc) / sizeof(SimpleQuadMeshVertexDesc[0]), + (void*)SimpleQuad_vs, sizeof(SimpleQuad_vs), objRef); + OVR_UNUSED(validate); + + SimpleQuadShader = *new ShaderSet; + SimpleQuadShader->SetShader(vtxShader); + + Ptr<D3D_NS::PixelShader> ps = *new D3D_NS::PixelShader( + &RParams, + (void*)SimpleQuad_ps, sizeof(SimpleQuad_ps), + SimpleQuad_ps_refl, sizeof(SimpleQuad_ps_refl) / sizeof(SimpleQuad_ps_refl[0])); + + SimpleQuadShader->SetShader(ps); + } +} + + + +ID3D1xSamplerState* DistortionRenderer::getSamplerState(int sm) +{ + if (SamplerStates[sm]) + return SamplerStates[sm]; + + D3D1X_(SAMPLER_DESC) ss; + memset(&ss, 0, sizeof(ss)); + if (sm & Sample_Clamp) + ss.AddressU = ss.AddressV = ss.AddressW = D3D1X_(TEXTURE_ADDRESS_CLAMP); + else if (sm & Sample_ClampBorder) + ss.AddressU = ss.AddressV = ss.AddressW = D3D1X_(TEXTURE_ADDRESS_BORDER); + else + ss.AddressU = ss.AddressV = ss.AddressW = D3D1X_(TEXTURE_ADDRESS_WRAP); + + if (sm & Sample_Nearest) + { + ss.Filter = D3D1X_(FILTER_MIN_MAG_MIP_POINT); + } + else if (sm & Sample_Anisotropic) + { + ss.Filter = D3D1X_(FILTER_ANISOTROPIC); + ss.MaxAnisotropy = 8; + } + else + { + ss.Filter = D3D1X_(FILTER_MIN_MAG_MIP_LINEAR); + } + ss.MaxLOD = 15; + RParams.pDevice->CreateSamplerState(&ss, &SamplerStates[sm].GetRawRef()); + return SamplerStates[sm]; +} + + +void DistortionRenderer::destroy() +{ + for(int eyeNum = 0; eyeNum < 2; eyeNum++) + { + DistortionMeshVBs[eyeNum].Clear(); + DistortionMeshIBs[eyeNum].Clear(); + } + + DistortionVertexIL.Clear(); + + if (DistortionShader) + { + DistortionShader->UnsetShader(Shader_Vertex); + DistortionShader->UnsetShader(Shader_Pixel); + DistortionShader.Clear(); + } + + LatencyTesterQuadVB.Clear(); +} + +}}} // OVR::CAPI::D3D1X diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.h b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.h new file mode 100644 index 0000000..f151d73 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_DistortionRenderer.h @@ -0,0 +1,131 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_DistortionRenderer.h +Content : Experimental distortion renderer +Created : November 11, 2013 +Authors : Volga Aksoy + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +// No include guard, since this fill will be multiply-included. +//#ifndef OVR_CAPI_D3D1X_DistortionRenderer_h + +#include "CAPI_D3D1X_Util.h" +#include "../CAPI_DistortionRenderer.h" + +#include "../../Kernel/OVR_Log.h" + +namespace OVR { namespace CAPI { namespace D3D_NS { + + +// ***** D3D1X::DistortionRenderer + +// Implementation of DistortionRenderer for D3D10/11. + +class DistortionRenderer : public CAPI::DistortionRenderer +{ +public: + DistortionRenderer(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState); + ~DistortionRenderer(); + + + // Creation function for the device. + static CAPI::DistortionRenderer* Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState); + + + // ***** Public DistortionRenderer interface + + virtual bool Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps); + + virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture); + + virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor); + + // TBD: Make public? + void WaitUntilGpuIdle(); + + // Similar to ovr_WaitTillTime but it also flushes GPU. + // Note, it exits when time expires, even if GPU is not in idle state yet. + double FlushGpuAndWaitTillTime(double absTime); + +private: + // Helpers + void initBuffersAndShaders(); + void initShaders(); + void initFullscreenQuad(); + void destroy(); + + void setViewport(const Recti& vp); + + void renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture); + + void renderPrimitives(const ShaderFill* fill, Buffer* vertices, Buffer* indices, + Matrix4f* viewMatrix, int offset, int count, + PrimitiveType rprim); + + void createDrawQuad(); + void renderLatencyQuad(unsigned char* latencyTesterDrawColor); + void renderLatencyPixel(unsigned char* latencyTesterPixelColor); + + // Create or get cached D3D sampler based on flags. + ID3D1xSamplerState* getSamplerState(int sm); + + + // TBD: Should we be using oe from RState instead? + unsigned DistortionCaps; + + // D3DX device and utility variables. + RenderParams RParams; + Ptr<Texture> pEyeTextures[2]; + + // U,V scale and offset needed for timewarp. + ovrVector2f UVScaleOffset[2][2]; + + //Ptr<Buffer> mpFullScreenVertexBuffer; + + Ptr<Buffer> DistortionMeshVBs[2]; // one per-eye + Ptr<Buffer> DistortionMeshIBs[2]; // one per-eye + + Ptr<ShaderSet> DistortionShader; + Ptr<ID3D1xInputLayout> DistortionVertexIL; + + struct StandardUniformData + { + Matrix4f Proj; + Matrix4f View; + } StdUniforms; + Ptr<Buffer> UniformBuffers[Shader_Count]; + + Ptr<ID3D1xSamplerState> SamplerStates[Sample_Count]; + Ptr<ID3D1xRasterizerState> Rasterizer; + + Ptr<Buffer> LatencyTesterQuadVB; + Ptr<ShaderSet> SimpleQuadShader; + Ptr<ID3D1xInputLayout> SimpleQuadVertexIL; + + GpuTimer GpuProfiler; +}; + +}}} // OVR::CAPI::D3D1X diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.cpp new file mode 100644 index 0000000..501a8e3 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.cpp @@ -0,0 +1,416 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_Util.cpp +Content : D3DX10 utility classes for rendering +Created : September 10, 2012 +Authors : Andrew Reisse + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_D3D1X_Util.h" + +#include <d3dcompiler.h> + +namespace OVR { namespace CAPI { namespace D3D_NS { + + +//------------------------------------------------------------------------------------- +// ***** ShaderFill + +void ShaderFill::Set(PrimitiveType prim) const +{ + Shaders->Set(prim); + for(int i = 0; i < 8; i++) + { + if(Textures[i]) + { + Textures[i]->Set(i); + } + } +} + + +//------------------------------------------------------------------------------------- +// ***** Buffer + +Buffer::~Buffer() +{ +} + +bool Buffer::Data(int use, const void *buffer, size_t size) +{ + if (D3DBuffer && Size >= size) + { + if (Dynamic) + { + if (!buffer) + return true; + + void* v = Map(0, size, Map_Discard); + if (v) + { + memcpy(v, buffer, size); + Unmap(v); + return true; + } + } + else + { + pParams->pContext->UpdateSubresource(D3DBuffer, 0, NULL, buffer, 0, 0); + return true; + } + } + if (D3DBuffer) + { + D3DBuffer = NULL; + Size = 0; + Use = 0; + Dynamic = 0; + } + + D3D1X_(BUFFER_DESC) desc; + memset(&desc, 0, sizeof(desc)); + if (use & Buffer_ReadOnly) + { + desc.Usage = D3D1X_(USAGE_IMMUTABLE); + desc.CPUAccessFlags = 0; + } + else + { + desc.Usage = D3D1X_(USAGE_DYNAMIC); + desc.CPUAccessFlags = D3D1X_(CPU_ACCESS_WRITE); + Dynamic = 1; + } + + switch(use & Buffer_TypeMask) + { + case Buffer_Vertex: desc.BindFlags = D3D1X_(BIND_VERTEX_BUFFER); break; + case Buffer_Index: desc.BindFlags = D3D1X_(BIND_INDEX_BUFFER); break; + case Buffer_Uniform: + desc.BindFlags = D3D1X_(BIND_CONSTANT_BUFFER); + size += ((size + 15) & ~15) - size; + break; + } + + desc.ByteWidth = (unsigned)size; + + D3D1X_(SUBRESOURCE_DATA) sr; + sr.pSysMem = buffer; + sr.SysMemPitch = 0; + sr.SysMemSlicePitch = 0; + + HRESULT hr = pParams->pDevice->CreateBuffer(&desc, buffer ? &sr : NULL, &D3DBuffer.GetRawRef()); + if (SUCCEEDED(hr)) + { + Use = use; + Size = desc.ByteWidth; + return 1; + } + return 0; +} + +void* Buffer::Map(size_t start, size_t size, int flags) +{ + OVR_UNUSED(size); + + D3D1X_(MAP) mapFlags = D3D1X_(MAP_WRITE); + if (flags & Map_Discard) + mapFlags = D3D1X_(MAP_WRITE_DISCARD); + if (flags & Map_Unsynchronized) + mapFlags = D3D1X_(MAP_WRITE_NO_OVERWRITE); + +#if (OVR_D3D_VERSION == 10) + void* map; + if (SUCCEEDED(D3DBuffer->Map(mapFlags, 0, &map))) + return ((char*)map) + start; +#else + D3D11_MAPPED_SUBRESOURCE map; + if (SUCCEEDED(pParams->pContext->Map(D3DBuffer, 0, mapFlags, 0, &map))) + return ((char*)map.pData) + start; +#endif + + return NULL; +} + +bool Buffer::Unmap(void *m) +{ + OVR_UNUSED(m); + + D3DSELECT_10_11( D3DBuffer->Unmap(), + pParams->pContext->Unmap(D3DBuffer, 0) ); + return true; +} + + +//------------------------------------------------------------------------------------- +// Shaders + +template<> bool ShaderImpl<Shader_Vertex, ID3D1xVertexShader>::Load(void* shader, size_t size) +{ + return SUCCEEDED(pParams->pDevice->CreateVertexShader(shader, size D3D11_COMMA_0, &D3DShader)); +} +template<> bool ShaderImpl<Shader_Pixel, ID3D1xPixelShader>::Load(void* shader, size_t size) +{ + return SUCCEEDED(pParams->pDevice->CreatePixelShader(shader, size D3D11_COMMA_0, &D3DShader)); +} + +template<> void ShaderImpl<Shader_Vertex, ID3D1xVertexShader>::Set(PrimitiveType) const +{ + pParams->pContext->VSSetShader(D3DShader D3D11_COMMA_0 D3D11_COMMA_0 ); +} +template<> void ShaderImpl<Shader_Pixel, ID3D1xPixelShader>::Set(PrimitiveType) const +{ + pParams->pContext->PSSetShader(D3DShader D3D11_COMMA_0 D3D11_COMMA_0 ) ; +} + +template<> void ShaderImpl<Shader_Vertex, ID3D1xVertexShader>::SetUniformBuffer(Buffer* buffer, int i) +{ + pParams->pContext->VSSetConstantBuffers(i, 1, &((Buffer*)buffer)->D3DBuffer.GetRawRef()); +} +template<> void ShaderImpl<Shader_Pixel, ID3D1xPixelShader>::SetUniformBuffer(Buffer* buffer, int i) +{ + pParams->pContext->PSSetConstantBuffers(i, 1, &((Buffer*)buffer)->D3DBuffer.GetRawRef()); +} + + +//------------------------------------------------------------------------------------- +// ***** Shader Base + +ShaderBase::ShaderBase(RenderParams* rp, ShaderStage stage) + : Shader(stage), pParams(rp), UniformData(0) +{ +} +ShaderBase::~ShaderBase() +{ + if (UniformData) + OVR_FREE(UniformData); +} + +bool ShaderBase::SetUniform(const char* name, int n, const float* v) +{ + for(unsigned i = 0; i < UniformReflSize; i++) + { + if (!strcmp(UniformRefl[i].Name, name)) + { + memcpy(UniformData + UniformRefl[i].Offset, v, n * sizeof(float)); + return 1; + } + } + return 0; +} + +bool ShaderBase::SetUniformBool(const char* name, int n, const bool* v) +{ + OVR_UNUSED(n); + for(unsigned i = 0; i < UniformReflSize; i++) + { + if (!strcmp(UniformRefl[i].Name, name)) + { + memcpy(UniformData + UniformRefl[i].Offset, v, UniformRefl[i].Size); + return 1; + } + } + return 0; +} + +void ShaderBase::InitUniforms(const Uniform* refl, size_t reflSize) +{ + if(!refl) + { + UniformRefl = NULL; + UniformReflSize = 0; + + UniformsSize = 0; + if (UniformData) + { + OVR_FREE(UniformData); + UniformData = 0; + } + return; // no reflection data + } + + UniformRefl = refl; + UniformReflSize = reflSize; + + UniformsSize = UniformRefl[UniformReflSize-1].Offset + UniformRefl[UniformReflSize-1].Size; + UniformData = (unsigned char*)OVR_ALLOC(UniformsSize); +} + +void ShaderBase::UpdateBuffer(Buffer* buf) +{ + if (UniformsSize) + { + buf->Data(Buffer_Uniform, UniformData, UniformsSize); + } +} + + +//------------------------------------------------------------------------------------- +// ***** Texture +// +Texture::Texture(RenderParams* rp, int fmt, const Sizei texSize, + ID3D1xSamplerState* sampler, int samples) + : pParams(rp), Tex(NULL), TexSv(NULL), TexRtv(NULL), TexDsv(NULL), + TextureSize(texSize), + Sampler(sampler), + Samples(samples) +{ + OVR_UNUSED(fmt); +} + +Texture::~Texture() +{ +} + +void Texture::Set(int slot, ShaderStage stage) const +{ + ID3D1xShaderResourceView* texSv = TexSv.GetPtr(); + + switch(stage) + { + case Shader_Fragment: + pParams->pContext->PSSetShaderResources(slot, 1, &texSv); + pParams->pContext->PSSetSamplers(slot, 1, &Sampler.GetRawRef()); + break; + + case Shader_Vertex: + pParams->pContext->VSSetShaderResources(slot, 1, &texSv); + break; + } +} + + +//------------------------------------------------------------------------------------- +// ***** GpuTimer +// +#if (OVR_D3D_VERSION == 11) +#define D3DQUERY_EXEC(_context_, _query_, _command_, ...) _context_->_command_(_query_, __VA_ARGS__) +#else +#define D3DQUERY_EXEC(_context_, _query_, _command_, ...) _query_->_command_(__VA_ARGS__) +#endif + + +void GpuTimer::Init(ID3D1xDevice* device, ID3D1xDeviceContext* content) +{ + D3dDevice = device; + Context = content; +} + +void GpuTimer::BeginQuery() +{ + if(GotoNextFrame(LastQueuedFrame) == LastTimedFrame) + { + OVR_ASSERT(false); // too many queries queued + return; + } + + LastQueuedFrame = GotoNextFrame(LastQueuedFrame); + + GpuQuerySets& newQuerySet = QuerySets[LastQueuedFrame]; + if(newQuerySet.DisjointQuery == NULL) + { + // Create the queries + D3D1x_QUERY_DESC desc; + desc.Query = D3D1X_(QUERY_TIMESTAMP_DISJOINT); + desc.MiscFlags = 0; + VERIFY_HRESULT(D3dDevice->CreateQuery(&desc, &newQuerySet.DisjointQuery)); + + desc.Query = D3D1X_(QUERY_TIMESTAMP); + VERIFY_HRESULT(D3dDevice->CreateQuery(&desc, &newQuerySet.TimeStartQuery)); + VERIFY_HRESULT(D3dDevice->CreateQuery(&desc, &newQuerySet.TimeEndQuery)); + } + + OVR_ASSERT(!newQuerySet.QueryStarted); + OVR_ASSERT(!newQuerySet.QueryAwaitingTiming); + + + D3DQUERY_EXEC(Context, QuerySets[LastQueuedFrame].DisjointQuery, Begin, ); // First start a disjoint query + D3DQUERY_EXEC(Context, QuerySets[LastQueuedFrame].TimeStartQuery, End, ); // Insert start timestamp + + newQuerySet.QueryStarted = true; + newQuerySet.QueryAwaitingTiming = false; + //newQuerySet.QueryTimed = false; +} + +void GpuTimer::EndQuery() +{ + if(LastQueuedFrame > 0 && !QuerySets[LastQueuedFrame].QueryStarted) + return; + + GpuQuerySets& doneQuerySet = QuerySets[LastQueuedFrame]; + OVR_ASSERT(doneQuerySet.QueryStarted); + OVR_ASSERT(!doneQuerySet.QueryAwaitingTiming); + + // Insert the end timestamp + D3DQUERY_EXEC(Context, doneQuerySet.TimeEndQuery, End, ); + + // End the disjoint query + D3DQUERY_EXEC(Context, doneQuerySet.DisjointQuery, End, ); + + doneQuerySet.QueryStarted = false; + doneQuerySet.QueryAwaitingTiming = true; +} + +float GpuTimer::GetTiming(bool blockUntilValid) +{ + float time = -1.0f; + + // loop until we hit a query that is not ready yet, or we have read all queued queries + while(LastTimedFrame != LastQueuedFrame) + { + int timeTestFrame = GotoNextFrame(LastTimedFrame); + + GpuQuerySets& querySet = QuerySets[timeTestFrame]; + + OVR_ASSERT(!querySet.QueryStarted && querySet.QueryAwaitingTiming); + + UINT64 startTime = 0; + UINT64 endTime = 0; + D3D1X_(QUERY_DATA_TIMESTAMP_DISJOINT) disjointData; + + if(blockUntilValid) + { + while(D3DQUERY_EXEC(Context, querySet.TimeStartQuery, GetData, &startTime, sizeof(startTime), 0) != S_OK); + while(D3DQUERY_EXEC(Context, querySet.TimeEndQuery, GetData, &endTime, sizeof(endTime), 0) != S_OK); + while(D3DQUERY_EXEC(Context, querySet.DisjointQuery, GetData, &disjointData, sizeof(disjointData), 0) != S_OK); + } + else + { +// Early return if we fail to get data for any of these + if(D3DQUERY_EXEC(Context, querySet.TimeStartQuery, GetData, &startTime, sizeof(startTime), 0) != S_OK) return time; + if(D3DQUERY_EXEC(Context, querySet.TimeEndQuery, GetData, &endTime, sizeof(endTime), 0) != S_OK) return time; + if(D3DQUERY_EXEC(Context, querySet.DisjointQuery, GetData, &disjointData, sizeof(disjointData), 0) != S_OK) return time; + } + + querySet.QueryAwaitingTiming = false; + LastTimedFrame = timeTestFrame; // successfully retrieved the timing data + + if(disjointData.Disjoint == false) + { + UINT64 delta = endTime - startTime; + float frequency = (float)(disjointData.Frequency); + time = (delta / frequency); + } + } + + return time; +} + +}}} // OVR::CAPI::D3DX diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.h b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.h new file mode 100644 index 0000000..f8d7bd3 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D1X_Util.h @@ -0,0 +1,505 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_Util.h +Content : D3DX 10/11 utility classes for rendering +Created : September 10, 2012 +Authors : Andrew Reisse + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +// ***** IMPORTANT: +// This file can be included twice, once with OVR_D3D_VERSION=10 and +// once with OVR_D3D_VERSION=11. + + +#ifndef OVR_D3D_VERSION +#error define OVR_D3D_VERSION to 10 or 11 +#endif + +// Custom include guard, allowing one of each D3D10/11. +#if (OVR_D3D_VERSION == 10 && !defined(INC_OVR_CAPI_D3D10_Util_h)) || \ + (OVR_D3D_VERSION == 11 && !defined(INC_OVR_CAPI_D3D11_Util_h)) + +#include "../../Kernel/OVR_String.h" +#include "../../Kernel/OVR_Array.h" +#include "../../Kernel/OVR_Math.h" + +#include <Windows.h> +#include <comdef.h> // for _COM_SMARTPTR_TYPEDEF() + +#undef D3D_NS // namespace +#undef D3D1X_ +#undef ID3D1X // interface prefix +#undef ovrD3D1X // ovrD3D10Config, etc. +#undef D3D11_COMMA_0 // Injects on ", 0" for D3D11 only +#undef D3DSELECT_10_11 +#undef IID_ID3D1xShaderReflection + +#if (OVR_D3D_VERSION == 10) + + #define INC_OVR_CAPI_D3D10_Util_h + #define D3D_NS D3D10 + #define D3D1X_(x) D3D10_##x + #define ID3D1X(x) ID3D10##x + #define ovrD3D1X(x) ovrD3D10##x + #define D3DSELECT_10_11(a10, a11) a10 + #define D3D11_COMMA_0 + #define IID_ID3D1xShaderReflection IID_ID3D10ShaderReflection + #include <d3d10_1.h> // avoids warning? + #include <d3d10.h> + +#else // (OVR_D3D_VERSION == 11) + + #define INC_OVR_CAPI_D3D11_Util_h + #define D3D_NS D3D11 + #define D3D1X_(x) D3D11_##x + #define ID3D1X(x) ID3D11##x + #define ovrD3D1X(x) ovrD3D11##x + #define D3DSELECT_10_11(a10, a11) a11 + #define D3D11_COMMA_0 , 0 + #define IID_ID3D1xShaderReflection IID_ID3D11ShaderReflection + #include <d3d11.h> + #include <D3D11Shader.h> +#endif + + +namespace OVR { namespace CAPI { namespace D3D_NS { + +// D3D Namespace-local types. +typedef ID3D1X(Device) ID3D1xDevice; +typedef ID3D1X(RenderTargetView) ID3D1xRenderTargetView; +typedef ID3D1X(Texture2D) ID3D1xTexture2D; +typedef ID3D1X(ShaderResourceView) ID3D1xShaderResourceView; +typedef ID3D1X(DepthStencilView) ID3D1xDepthStencilView; +typedef ID3D1X(DepthStencilState) ID3D1xDepthStencilState; +typedef ID3D1X(InputLayout) ID3D1xInputLayout; +typedef ID3D1X(Buffer) ID3D1xBuffer; +typedef ID3D1X(VertexShader) ID3D1xVertexShader; +typedef ID3D1X(PixelShader) ID3D1xPixelShader; +typedef ID3D1X(GeometryShader) ID3D1xGeometryShader; +typedef ID3D1X(BlendState) ID3D1xBlendState; +typedef ID3D1X(RasterizerState) ID3D1xRasterizerState; +typedef ID3D1X(SamplerState) ID3D1xSamplerState; +typedef ID3D1X(Query) ID3D1xQuery; +typedef ID3D1X(ShaderReflection) ID3D1xShaderReflection; +typedef ID3D1X(ShaderReflectionVariable) ID3D1xShaderReflectionVariable; +typedef ID3D1X(ShaderReflectionConstantBuffer) ID3D1xShaderReflectionConstantBuffer; +typedef D3D1X_(VIEWPORT) D3D1x_VIEWPORT; +typedef D3D1X_(QUERY_DESC) D3D1x_QUERY_DESC; +typedef D3D1X_(SHADER_BUFFER_DESC) D3D1x_SHADER_BUFFER_DESC; +typedef D3D1X_(SHADER_VARIABLE_DESC) D3D1x_SHADER_VARIABLE_DESC; +// Blob is the same +typedef ID3D10Blob ID3D1xBlob; + +#if (OVR_D3D_VERSION == 10) + typedef ID3D10Device ID3D1xDeviceContext; +#else + typedef ID3D11DeviceContext ID3D1xDeviceContext; +#endif + + +// Assert on HRESULT failure +inline void VERIFY_HRESULT(HRESULT hr) +{ + if (FAILED(hr)) + OVR_ASSERT(false); +} + +class Buffer; + +// Rendering parameters/pointers describing D3DX rendering setup. +struct RenderParams +{ + ID3D1xDevice* pDevice; + ID3D1xDeviceContext* pContext; + ID3D1xRenderTargetView* pBackBufferRT; + IDXGISwapChain* pSwapChain; + Sizei RTSize; + int Multisample; +}; + + +// Rendering primitive type used to render Model. +enum PrimitiveType +{ + Prim_Triangles, + Prim_Lines, + Prim_TriangleStrip, + Prim_Unknown, + Prim_Count +}; + +// Types of shaders that can be stored together in a ShaderSet. +enum ShaderStage +{ + Shader_Vertex = 0, + Shader_Fragment = 2, + Shader_Pixel = 2, + Shader_Count = 3, +}; + +enum MapFlags +{ + Map_Discard = 1, + Map_Read = 2, // do not use + Map_Unsynchronized = 4, // like D3D11_MAP_NO_OVERWRITE +}; + + +// Buffer types used for uploading geometry & constants. +enum BufferUsage +{ + Buffer_Unknown = 0, + Buffer_Vertex = 1, + Buffer_Index = 2, + Buffer_Uniform = 4, + Buffer_TypeMask = 0xff, + Buffer_ReadOnly = 0x100, // Buffer must be created with Data(). +}; + +enum TextureFormat +{ + Texture_RGBA = 0x0100, + Texture_Depth = 0x8000, + Texture_TypeMask = 0xff00, + Texture_SamplesMask = 0x00ff, + Texture_RenderTarget = 0x10000, + Texture_GenMipmaps = 0x20000, +}; + +// Texture sampling modes. +enum SampleMode +{ + Sample_Linear = 0, + Sample_Nearest = 1, + Sample_Anisotropic = 2, + Sample_FilterMask = 3, + + Sample_Repeat = 0, + Sample_Clamp = 4, + Sample_ClampBorder = 8, // If unsupported Clamp is used instead. + Sample_AddressMask =12, + + Sample_Count =13, +}; + +// Base class for vertex and pixel shaders. Stored in ShaderSet. +class Shader : public RefCountBase<Shader> +{ + friend class ShaderSet; + +protected: + ShaderStage Stage; + +public: + Shader(ShaderStage s) : Stage(s) {} + virtual ~Shader() {} + + ShaderStage GetStage() const { return Stage; } + + virtual void Set(PrimitiveType) const { } + virtual void SetUniformBuffer(class Buffer* buffers, int i = 0) { OVR_UNUSED2(buffers, i); } + +protected: + virtual bool SetUniform(const char* name, int n, const float* v) { OVR_UNUSED3(name, n, v); return false; } + virtual bool SetUniformBool(const char* name, int n, const bool* v) { OVR_UNUSED3(name, n, v); return false; } +}; + + + +// A group of shaders, one per stage. +// A ShaderSet is applied to a RenderDevice for rendering with a given fill. +class ShaderSet : public RefCountBase<ShaderSet> +{ +protected: + Ptr<Shader> Shaders[Shader_Count]; + +public: + ShaderSet() { } + ~ShaderSet() { } + + virtual void SetShader(Shader *s) + { + Shaders[s->GetStage()] = s; + } + virtual void UnsetShader(int stage) + { + Shaders[stage] = NULL; + } + Shader* GetShader(int stage) { return Shaders[stage]; } + + virtual void Set(PrimitiveType prim) const + { + for (int i = 0; i < Shader_Count; i++) + if (Shaders[i]) + Shaders[i]->Set(prim); + } + + // Set a uniform (other than the standard matrices). It is undefined whether the + // uniforms from one shader occupy the same space as those in other shaders + // (unless a buffer is used, then each buffer is independent). + virtual bool SetUniform(const char* name, int n, const float* v) + { + bool result = 0; + for (int i = 0; i < Shader_Count; i++) + if (Shaders[i]) + result |= Shaders[i]->SetUniform(name, n, v); + + return result; + } + bool SetUniform1f(const char* name, float x) + { + const float v[] = {x}; + return SetUniform(name, 1, v); + } + bool SetUniform2f(const char* name, float x, float y) + { + const float v[] = {x,y}; + return SetUniform(name, 2, v); + } + bool SetUniform3f(const char* name, float x, float y, float z) + { + const float v[] = {x,y,z}; + return SetUniform(name, 3, v); + } + bool SetUniform4f(const char* name, float x, float y, float z, float w = 1) + { + const float v[] = {x,y,z,w}; + return SetUniform(name, 4, v); + } + + bool SetUniformv(const char* name, const Vector3f& v) + { + const float a[] = {v.x,v.y,v.z,1}; + return SetUniform(name, 4, a); + } + + virtual bool SetUniform4x4f(const char* name, const Matrix4f& m) + { + Matrix4f mt = m.Transposed(); + return SetUniform(name, 16, &mt.M[0][0]); + } +}; + + +// Fill combines a ShaderSet (vertex, pixel) with textures, if any. +// Every model has a fill. +class ShaderFill : public RefCountBase<ShaderFill> +{ + Ptr<ShaderSet> Shaders; + Ptr<class Texture> Textures[8]; + void* InputLayout; // HACK this should be abstracted + +public: + ShaderFill(ShaderSet* sh) : Shaders(sh) { InputLayout = NULL; } + ShaderFill(ShaderSet& sh) : Shaders(sh) { InputLayout = NULL; } + + ShaderSet* GetShaders() const { return Shaders; } + void* GetInputLayout() const { return InputLayout; } + + virtual void Set(PrimitiveType prim = Prim_Unknown) const; + virtual void SetTexture(int i, class Texture* tex) { if (i < 8) Textures[i] = tex; } + void SetInputLayout(void* newIL) { InputLayout = (void*)newIL; } +}; + + +class ShaderBase : public Shader +{ +public: + RenderParams* pParams; + unsigned char* UniformData; + int UniformsSize; + + enum VarType + { + VARTYPE_FLOAT, + VARTYPE_INT, + VARTYPE_BOOL, + }; + + struct Uniform + { + const char* Name; + VarType Type; + int Offset, Size; + }; + const Uniform* UniformRefl; + size_t UniformReflSize; + + ShaderBase(RenderParams* rp, ShaderStage stage); + ~ShaderBase(); + + ShaderStage GetStage() const { return Stage; } + + void InitUniforms(const Uniform* refl, size_t reflSize); + bool SetUniform(const char* name, int n, const float* v); + bool SetUniformBool(const char* name, int n, const bool* v); + + void UpdateBuffer(Buffer* b); +}; + + +template<ShaderStage SStage, class D3DShaderType> +class ShaderImpl : public ShaderBase +{ +public: + D3DShaderType* D3DShader; + + ShaderImpl(RenderParams* rp, void* s, size_t size, const Uniform* refl, size_t reflSize) : ShaderBase(rp, SStage) + { + Load(s, size); + InitUniforms(refl, reflSize); + } + ~ShaderImpl() + { + if (D3DShader) + D3DShader->Release(); + } + + // These functions have specializations. + bool Load(void* shader, size_t size); + void Set(PrimitiveType prim) const; + void SetUniformBuffer(Buffer* buffers, int i = 0); +}; + +typedef ShaderImpl<Shader_Vertex, ID3D1xVertexShader> VertexShader; +typedef ShaderImpl<Shader_Fragment, ID3D1xPixelShader> PixelShader; + + +class Buffer : public RefCountBase<Buffer> +{ +public: + RenderParams* pParams; + Ptr<ID3D1xBuffer> D3DBuffer; + size_t Size; + int Use; + bool Dynamic; + +public: + Buffer(RenderParams* rp) : pParams(rp), Size(0), Use(0) {} + ~Buffer(); + + ID3D1xBuffer* GetBuffer() const { return D3DBuffer; } + + virtual size_t GetSize() { return Size; } + virtual void* Map(size_t start, size_t size, int flags = 0); + virtual bool Unmap(void *m); + virtual bool Data(int use, const void* buffer, size_t size); +}; + + +class Texture : public RefCountBase<Texture> +{ +public: + RenderParams* pParams; + Ptr<ID3D1xTexture2D> Tex; + Ptr<ID3D1xShaderResourceView> TexSv; + Ptr<ID3D1xRenderTargetView> TexRtv; + Ptr<ID3D1xDepthStencilView> TexDsv; + mutable Ptr<ID3D1xSamplerState> Sampler; + Sizei TextureSize; + int Samples; + + Texture(RenderParams* rp, int fmt, const Sizei texSize, + ID3D1xSamplerState* sampler, int samples = 1); + ~Texture(); + + virtual Sizei GetSize() const { return TextureSize; } + virtual int GetSamples() const { return Samples; } + + // virtual void SetSampleMode(int sm); + + // Updates texture to point to specified resources + // - used for slave rendering. + void UpdatePlaceholderTexture(ID3D1xTexture2D* texture, + ID3D1xShaderResourceView* psrv, + const Sizei& textureSize) + { + Tex = texture; + TexSv = psrv; + TexRtv.Clear(); + TexDsv.Clear(); + + TextureSize = textureSize; + +#ifdef OVR_BUILD_DEBUG + D3D1X_(TEXTURE2D_DESC) desc; + texture->GetDesc(&desc); + OVR_ASSERT(TextureSize == Sizei(desc.Width, desc.Height)); +#endif + } + + + virtual void Set(int slot, ShaderStage stage = Shader_Fragment) const; + +}; + + +class GpuTimer : public RefCountBase<GpuTimer> +{ +public: + GpuTimer() + : QuerySets(MaxNumQueryFrames) + , D3dDevice(NULL) + , Context(NULL) + , LastQueuedFrame(-1) + , LastTimedFrame(-1) + { } + + void Init(ID3D1xDevice* device, ID3D1xDeviceContext* content); + + void BeginQuery(); + void EndQuery(); + + // Returns -1 if timing is invalid + float GetTiming(bool blockUntilValid); + +protected: + static const unsigned MaxNumQueryFrames = 10; + + int GotoNextFrame(int frame) + { + return (frame + 1) % MaxNumQueryFrames; + } + + _COM_SMARTPTR_TYPEDEF(ID3D1xQuery, __uuidof(ID3D1xQuery)); + + struct GpuQuerySets + { + ID3D1xQueryPtr DisjointQuery; + ID3D1xQueryPtr TimeStartQuery; + ID3D1xQueryPtr TimeEndQuery; + bool QueryStarted; + bool QueryAwaitingTiming; + + GpuQuerySets() : QueryStarted(false), QueryAwaitingTiming(false) {} + }; + Array<GpuQuerySets> QuerySets; + + int LastQueuedFrame; + int LastTimedFrame; + + Ptr<ID3D1xDevice> D3dDevice; + Ptr<ID3D1xDeviceContext> Context; +}; + +}}} // OVR::CAPI::D3D1X + +#endif // INC_OVR_CAPI_D3D10/11_Util_h diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.cpp new file mode 100644 index 0000000..21b885e --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.cpp @@ -0,0 +1,251 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_DistortionRenderer.cpp +Content : Experimental distortion renderer +Created : March 7th, 2014 +Authors : Tom Heath + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_D3D9_DistortionRenderer.h" +#define OVR_D3D_VERSION 9 +#include "../../OVR_CAPI_D3D.h" + +namespace OVR { namespace CAPI { namespace D3D9 { + + +///QUESTION : Why not just a normal constructor? +CAPI::DistortionRenderer* DistortionRenderer::Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState) +{ + return new DistortionRenderer(hmd, timeManager, renderState); +} + +DistortionRenderer::DistortionRenderer(ovrHmd hmd, FrameTimeManager& timeManager, + const HMDRenderState& renderState) + : CAPI::DistortionRenderer(ovrRenderAPI_D3D9, hmd, timeManager, renderState) +{ +} +/**********************************************/ +DistortionRenderer::~DistortionRenderer() +{ + //Release any memory + eachEye[0].dxIndices->Release(); + eachEye[0].dxVerts->Release(); + eachEye[1].dxIndices->Release(); + eachEye[1].dxVerts->Release(); +} + + +/******************************************************************************/ +bool DistortionRenderer::Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned arg_distortionCaps) +{ + // TBD: Decide if hmdCaps are needed here or are a part of RenderState + OVR_UNUSED(hmdCaps); + + ///QUESTION - what is returned bool for??? Are we happy with this true, if not config. + const ovrD3D9Config * config = (const ovrD3D9Config*)apiConfig; + if (!config) return true; + if (!config->D3D9.pDevice) return false; + + //Glean all the required variables from the input structures + device = config->D3D9.pDevice; + screenSize = config->D3D9.Header.RTSize; + distortionCaps = arg_distortionCaps; + + CreateVertexDeclaration(); + CreateDistortionShaders(); + Create_Distortion_Models(); + + return (true); +} + + +/**************************************************************/ +void DistortionRenderer::SubmitEye(int eyeId, ovrTexture* eyeTexture) +{ + //Doesn't do a lot in here?? + const ovrD3D9Texture* tex = (const ovrD3D9Texture*)eyeTexture; + + //Write in values + eachEye[eyeId].texture = tex->D3D9.pTexture; + + //Its only at this point we discover what the viewport of the texture is. + //because presumably we allow users to realtime adjust the resolution. + //Which begs the question - why did we ask them what viewport they were + //using before, which gave them a set of UV offsets. In fact, our + //asking for eye mesh must be entirely independed of these viewports, + //presumably only to get the parameters. + + ovrEyeDesc ed = RState.EyeRenderDesc[eyeId].Desc; + ed.TextureSize = tex->D3D9.Header.TextureSize; + ed.RenderViewport = tex->D3D9.Header.RenderViewport; + + ovrHmd_GetRenderScaleAndOffset(HMD, ed, distortionCaps, eachEye[eyeId].UVScaleOffset); +} + + +/******************************************************************/ +void DistortionRenderer::EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor) +{ + OVR_UNUSED(swapBuffers); + OVR_UNUSED(latencyTesterDrawColor); + + ///QUESTION : Should I be clearing the screen? + ///QUESTION : Should I be ensuring the screen is the render target + + if (!TimeManager.NeedDistortionTimeMeasurement()) + { + if (RState.DistortionCaps & ovrDistortion_TimeWarp) + { + // Wait for timewarp distortion if it is time and Gpu idle + WaitTillTimeAndFlushGpu(TimeManager.GetFrameTiming().TimewarpPointTime); + } + + RenderBothDistortionMeshes(); + } + else + { + // If needed, measure distortion time so that TimeManager can better estimate + // latency-reducing time-warp wait timing. + WaitUntilGpuIdle(); + double distortionStartTime = ovr_GetTimeInSeconds(); + + RenderBothDistortionMeshes(); + WaitUntilGpuIdle(); + + TimeManager.AddDistortionTimeMeasurement(ovr_GetTimeInSeconds() - distortionStartTime); + } + + if(latencyTesterDrawColor) + { + ///QUESTION : Is this still to be supported? + ///renderLatencyQuad(latencyTesterDrawColor); + } + + if(latencyTester2DrawColor) + { + // TODO: + } + + if (swapBuffers) + { + device->Present( NULL, NULL, NULL, NULL ); + + /// if (RParams.pSwapChain) + { + /// UINT swapInterval = (RState.HMDCaps & ovrHmdCap_NoVSync) ? 0 : 1; + /// RParams.pSwapChain->Present(swapInterval, 0); + + // Force GPU to flush the scene, resulting in the lowest possible latency. + // It's critical that this flush is *after* present. + /// WaitUntilGpuIdle(); + } + /// else + { + // TBD: Generate error - swapbuffer option used with null swapchain. + } + } +} + + +void DistortionRenderer::WaitUntilGpuIdle() +{ +#if 0 + // Flush and Stall CPU while waiting for GPU to complete rendering all of the queued draw calls + D3D1x_QUERY_DESC queryDesc = { D3D1X_(QUERY_EVENT), 0 }; + Ptr<ID3D1xQuery> query; + BOOL done = FALSE; + + if (RParams.pDevice->CreateQuery(&queryDesc, &query.GetRawRef()) == S_OK) + { + D3DSELECT_10_11(query->End(), + RParams.pContext->End(query)); + + // GetData will returns S_OK for both done == TRUE or FALSE. + // Exit on failure to avoid infinite loop. + do { } + while(!done && + !FAILED(D3DSELECT_10_11(query->GetData(&done, sizeof(BOOL), 0), + RParams.pContext->GetData(query, &done, sizeof(BOOL), 0))) + ); + } +#endif +} + +double DistortionRenderer::WaitTillTimeAndFlushGpu(double absTime) +{ + +OVR_UNUSED(absTime); +#if 0 + double initialTime = ovr_GetTimeInSeconds(); + if (initialTime >= absTime) + return 0.0; + + // Flush and Stall CPU while waiting for GPU to complete rendering all of the queued draw calls + D3D1x_QUERY_DESC queryDesc = { D3D1X_(QUERY_EVENT), 0 }; + Ptr<ID3D1xQuery> query; + BOOL done = FALSE; + bool callGetData = false; + + if (RParams.pDevice->CreateQuery(&queryDesc, &query.GetRawRef()) == S_OK) + { + D3DSELECT_10_11(query->End(), + RParams.pContext->End(query)); + callGetData = true; + } + + double newTime = initialTime; + volatile int i; + + while (newTime < absTime) + { + if (callGetData) + { + // GetData will returns S_OK for both done == TRUE or FALSE. + // Stop calling GetData on failure. + callGetData = !FAILED(D3DSELECT_10_11(query->GetData(&done, sizeof(BOOL), 0), + RParams.pContext->GetData(query, &done, sizeof(BOOL), 0))) && !done; + } + else + { + for (int j = 0; j < 50; j++) + i = 0; + } + newTime = ovr_GetTimeInSeconds(); + } + + // How long we waited + return newTime - initialTime; +#endif + return 0; //dummy +} + + + + + + + +}}} // OVR::CAPI::D3D1X + + diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.h b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.h new file mode 100644 index 0000000..9332b83 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_DistortionRenderer.h @@ -0,0 +1,120 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_DistortionRenderer.h +Content : Experimental distortion renderer +Created : March 7, 2014 +Authors : Tom Heath + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#undef new + +#if _MSC_VER < 1700 +#include <d3dx9.h> +#else +#include <d3d9.h> +#endif + +#if defined(OVR_DEFINE_NEW) +#define new OVR_DEFINE_NEW +#endif + +#include "../CAPI_DistortionRenderer.h" + + +namespace OVR { namespace CAPI { namespace D3D9 { + + +//Implementation of DistortionRenderer for D3D9. +/***************************************************/ +class DistortionRenderer : public CAPI::DistortionRenderer +{ +public: + DistortionRenderer(ovrHmd hmd, FrameTimeManager& timeManager, const HMDRenderState& renderState); + ~DistortionRenderer(); + + // Creation function for the device. + static CAPI::DistortionRenderer* Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState); + + // ***** Public DistortionRenderer interface + virtual bool Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps); + + virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture); + + virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor); + + // TBD: Make public? + void WaitUntilGpuIdle(); + + // Similar to ovr_WaitTillTime but it also flushes GPU. + // Note, it exits when time expires, even if GPU is not in idle state yet. + double WaitTillTimeAndFlushGpu(double absTime); + +private: + + //Functions + void CreateDistortionShaders(void); + void Create_Distortion_Models(void); + void CreateVertexDeclaration(void); + void RenderBothDistortionMeshes(); + void RecordAndSetState(int which, int type, DWORD newValue); + void RevertAllStates(void); + + + //Data, structures and pointers + IDirect3DDevice9 * device; + IDirect3DVertexDeclaration9 * vertexDecl; + IDirect3DPixelShader9 * pixelShader; + IDirect3DVertexShader9 * vertexShader; + IDirect3DVertexShader9 * vertexShaderTimewarp; + ovrSizei screenSize; + unsigned distortionCaps; + + struct FOR_EACH_EYE + { + IDirect3DVertexBuffer9 * dxVerts; + IDirect3DIndexBuffer9 * dxIndices; + int numVerts; + int numIndices; + IDirect3DTexture9 * texture; + ovrVector2f UVScaleOffset[2]; + } eachEye[2]; + + + //Structure to store our state changes + #define MAX_SAVED_STATES 100 + struct SavedStateType + { + int which; //0 for samplerstate, 1 for renderstate + int type; + DWORD valueToRevertTo; + } savedState[MAX_SAVED_STATES]; + + //Keep track of how many we've done, for reverting + int numSavedStates; + + + +}; + +}}} // OVR::CAPI::D3D9 diff --git a/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_Util.cpp b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_Util.cpp new file mode 100644 index 0000000..ea36100 --- /dev/null +++ b/LibOVR/Src/CAPI/D3D1X/CAPI_D3D9_Util.cpp @@ -0,0 +1,317 @@ +/************************************************************************************ + +Filename : CAPI_D3D1X_Util.cpp +Content : D3D9 utility functions for rendering +Created : March 7 , 2014 +Authors : Tom Heath + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_D3D9_DistortionRenderer.h" +#define OVR_D3D_VERSION 9 +#include "../../OVR_CAPI_D3D.h" + + +namespace OVR { namespace CAPI { namespace D3D9 { + + + +#define PRECOMPILE_FLAG 0 +#if !PRECOMPILE_FLAG +//To make these, you need to run it with PRECOMPILE_FLAG, which also uses them, so good for debugging. +//Then cut and paste these from the output window. +//Then turn off the flag. +DWORD precompiledVertexShaderSrc[95] = {4294836736,3014654,1111577667,28,127,4294836736,2,28,33024,120,68,131074,655361,88,0,104,2,131073,88,0,1415936325,1668436847,1716475477,1952805734,2880154368,196609,131073,1,0,1415936325,1668436847,1666405973,6646881,845116278,1291858015,1869767529,1952870259,693250080,1397508128,1750278220,1919247457,1836008224,1701603696,775495794,959330610,858665525,3223857,83886161,2685337601,1065353216,0,1056964608,0,33554463,2147483648,2416902144,33554463,2147614720,2416902145,33554463,2147483653,2416902146,33554463,2147549189,2416902147,33554463,2147614725,2416902148,33554433,2147680256,2699296768,67108868,3758292992,2162425856,2430861314,2699296770,67108868,3758292993,2162425856,2430861315,2699296770,67108868,3758292994,2162425856,2430861316,2699296770,67108868,3222208512,2416181248,2689597441,2686779393,33554433,3758161923,2415919105,65535,}; +DWORD precompiledVertexShaderTimewarpSrc[293] = {4294836736,4456446,1111577667,28,215,4294836736,4,28,33024,208,108,1310722,5373956,124,0,140,262146,1179652,124,0,157,131074,655361,176,0,192,2,131073,176,0,1382381893,1952543855,1164865385,2868929646,196611,262148,1,0,1382381893,1952543855,1399746409,1953653108,1702446336,1918070612,1331058019,1702061670,2880110708,196609,131073,1,0,1415936325,1668436847,1666405973,6646881,845116278,1291858015,1869767529,1952870259,693250080,1397508128,1750278220,1919247457,1836008224,1701603696,775495794,959330610,858665525,3223857,83886161,2685337601,1065353216,0,1056964608,0,33554463,2147483648,2416902144,33554463,2147549184,2416902145,33554463,2147614720,2416902146,33554463,2147483653,2416902147,33554463,2147549189,2416902148,33554463,2147614725,2416902149,33554433,2147549184,2695495684,50331650,2147549185,2164260864,2695495700,33554433,2147614720,2695495685,50331650,2147614721,2169831424,2695495701,33554433,2147745792,2695495686,50331650,2147745793,2175401984,2695495702,33554433,2148007936,2695495687,50331650,2148007937,2180972544,2695495703,67108868,2148466688,2415919105,2162425857,2162425856,67108868,2148466689,2416181251,2689597441,2684682241,50331657,2147549186,2162425856,2162425857,33554438,2147549186,2147483650,33554433,2147680259,2699296772,50331650,2147876866,2177892355,2697986068,67108868,2147549187,2415919105,2158624770,2689925124,67108868,2147549188,2415919105,2153054210,2684354564,33554433,2147680261,2699296773,50331650,2147876866,2177105925,2697199637,67108868,2147614723,2415919105,2153054210,2689925125,67108868,2147614724,2415919105,2158624770,2684354565,33554433,2147680261,2699296774,50331650,2147811333,2177171461,2697265174,67108868,2147745795,2415919105,2147483653,2689925126,67108868,2147745796,2415919105,2158624773,2684354566,33554433,2147680261,2699296775,50331650,2148073477,2166685701,2686779415,67108868,2148007939,2415919105,2147483653,2689925127,67108868,2148007940,2415919105,2164195333,2684354567,50331657,2147549189,2162425860,2162425857,50331657,2147614725,2162425859,2162425857,50331653,2147680257,2147483650,2162425861,33554433,2147680258,2699296768,67108868,3758292992,2162425858,2162425857,2699296770,67108868,2148466689,2416181252,2689597441,2684682241,50331657,2147549189,2162425860,2162425857,50331657,2147614725,2162425859,2162425857,50331657,2147549185,2162425856,2162425857,33554438,2147549185,2147483649,50331653,2147680257,2147483649,2162425861,67108868,3758292993,2162425858,2162425857,2699296770,67108868,2148466689,2416181253,2689597441,2684682241,50331657,2147549188,2162425860,2162425857,50331657,2147614724,2162425859,2162425857,50331657,2147549184,2162425856,2162425857,33554438,2147549184,2147483648,50331653,2147680256,2147483648,2162425860,67108868,3758292994,2162425858,2162425856,2699296770,67108868,3222208512,2416181248,2689597441,2686779393,33554433,3758161923,2415919106,65535,}; +DWORD precompiledPixelShaderSrc[84] = {4294902528,2228222,1111577667,28,79,4294902528,1,28,33024,72,48,3,131073,56,0,1954047316,6648437,786436,65537,1,0,861893488,1291858015,1869767529,1952870259,693250080,1397508128,1750278220,1919247457,1836008224,1701603696,775495794,959330610,858665525,3223857,83886161,2685337600,1065353216,0,0,0,33554463,2147483653,2416115712,33554463,2147549189,2416115713,33554463,2147614725,2416115714,33554463,2147680261,2415984643,33554463,2415919104,2685339648,50331714,2148466688,2430861312,2699298816,67108868,2148073472,2147483648,2690908160,2686779392,50331714,2148466689,2430861313,2699298816,33554433,2147614720,2153054209,50331714,2148466689,2430861314,2699298816,33554433,2147745792,2158624769,50331653,2148468736,2162425856,2415919107,65535,}; + +#else +#include "d3dcompiler.h" +/***************************************************************************/ +const char* VertexShaderSrc = + + "float2 EyeToSourceUVScale : register(c0); \n" + "float2 EyeToSourceUVOffset : register(c2); \n" + + "void main(in float2 Position : POSITION, in float TimeWarp : POSITION1, \n" + " in float Vignette : POSITION2, in float2 TexCoord0 : TEXCOORD0, \n" + " in float2 TexCoord1 : TEXCOORD1, in float2 TexCoord2 : TEXCOORD2, \n" + " out float4 oPosition : SV_Position, out float2 oTexCoord0 : TEXCOORD0, \n" + " out float2 oTexCoord1 : TEXCOORD1, out float2 oTexCoord2 : TEXCOORD2, \n" + " out float oVignette : TEXCOORD3) \n" + "{ \n" + " oTexCoord0 = EyeToSourceUVScale * TexCoord0 + EyeToSourceUVOffset; \n" + " oTexCoord1 = EyeToSourceUVScale * TexCoord1 + EyeToSourceUVOffset; \n" + " oTexCoord2 = EyeToSourceUVScale * TexCoord2 + EyeToSourceUVOffset; \n" + " oVignette = Vignette; \n" + " oPosition = float4(Position.xy, 0.5, 1.0); \n" + "}"; + +/***************************************************************************/ +const char* VertexShaderTimewarpSrc = + + "float2 EyeToSourceUVScale : register(c0); \n" + "float2 EyeToSourceUVOffset : register(c2); \n" + "float4x4 EyeRotationStart : register(c4); \n" + "float4x4 EyeRotationEnd : register(c20); \n" + + "float2 TimewarpTexCoord(float2 TexCoord, float4x4 rotMat) \n" + "{ \n" + " float3 transformed = float3( mul ( rotMat, float4(TexCoord.xy, 1, 1) ).xyz); \n" + " float2 flattened = (transformed.xy / transformed.z); \n" + " return(EyeToSourceUVScale * flattened + EyeToSourceUVOffset); \n" + "} \n" + "void main(in float2 Position : POSITION, in float TimeWarp : POSITION1, \n" + " in float Vignette : POSITION2, in float2 TexCoord0 : TEXCOORD0, \n" + " in float2 TexCoord1 : TEXCOORD1, in float2 TexCoord2 : TEXCOORD2, \n" + " out float4 oPosition : SV_Position, out float2 oTexCoord0 : TEXCOORD0, \n" + " out float2 oTexCoord1 : TEXCOORD1, out float2 oTexCoord2 : TEXCOORD2, \n" + " out float oVignette : TEXCOORD3) \n" + "{ \n" + " float4x4 lerpedEyeRot = lerp(EyeRotationStart, EyeRotationEnd, TimeWarp); \n" + " oTexCoord0 = TimewarpTexCoord(TexCoord0,lerpedEyeRot); \n" + " oTexCoord1 = TimewarpTexCoord(TexCoord1,lerpedEyeRot); \n" + " oTexCoord2 = TimewarpTexCoord(TexCoord2,lerpedEyeRot); \n" + " oVignette = Vignette; \n" + " oPosition = float4(Position.xy, 0.5, 1.0); \n" + "}"; + +/***************************************************************************/ +const char* PixelShaderSrc = + + " sampler2D Texture : register(s0); \n" + + "float4 main(in float4 oPosition : SV_Position, in float2 oTexCoord0 : TEXCOORD0, \n" + " in float2 oTexCoord1 : TEXCOORD1, in float2 oTexCoord2 : TEXCOORD2, \n" + " in float oVignette : TEXCOORD3) \n" + " : SV_Target \n" + "{ \n" + " float R = tex2D(Texture,oTexCoord0).r; \n" + " float G = tex2D(Texture,oTexCoord1).g; \n" + " float B = tex2D(Texture,oTexCoord2).b; \n" + " return (oVignette*float4(R,G,B,1)); \n" + "}"; + +/*************************************************************/ +ID3DBlob* ShaderCompile(char * shaderName, const char * shaderSrcString, const char * profile) +{ + ID3DBlob* pShaderCode = NULL; + ID3DBlob* pErrorMsg = NULL; + + if (FAILED(D3DCompile(shaderSrcString, strlen(shaderSrcString),NULL,NULL,NULL, + "main",profile,D3DCOMPILE_OPTIMIZATION_LEVEL3,0, + &pShaderCode,&pErrorMsg))) + MessageBoxA(NULL,(char *) pErrorMsg->GetBufferPointer(),"", MB_OK); + if (pErrorMsg) pErrorMsg->Release(); + + //Now write out blob + char tempString[1000]; + int numDWORDs = ((int)pShaderCode->GetBufferSize())/4; + DWORD * ptr = (DWORD *)pShaderCode->GetBufferPointer(); + sprintf_s(tempString,"DWORD %s[%d] = {",shaderName,numDWORDs); + OutputDebugStringA(tempString); + for (int i = 0;i < numDWORDs; i++) + { + sprintf_s(tempString,"%lu,",ptr[i]); + OutputDebugStringA(tempString); + } + OutputDebugStringA("};\n"); + + return(pShaderCode); +} +#endif + +/***********************************************************/ +void DistortionRenderer::CreateDistortionShaders(void) +{ +#if PRECOMPILE_FLAG + ID3DBlob * pShaderCode; + pShaderCode = ShaderCompile("precompiledVertexShaderSrc",VertexShaderSrc,"vs_2_0"); + device->CreateVertexShader( ( DWORD* )pShaderCode->GetBufferPointer(), &vertexShader ); + pShaderCode->Release(); + + pShaderCode = ShaderCompile("precompiledVertexShaderTimewarpSrc",VertexShaderTimewarpSrc,"vs_2_0"); + device->CreateVertexShader( ( DWORD* )pShaderCode->GetBufferPointer(), &vertexShaderTimewarp ); + pShaderCode->Release(); + + pShaderCode = ShaderCompile("precompiledPixelShaderSrc",PixelShaderSrc,"ps_3_0"); + device->CreatePixelShader( ( DWORD* )pShaderCode->GetBufferPointer(), &pixelShader ); + pShaderCode->Release(); +#else + device->CreateVertexShader( precompiledVertexShaderSrc, &vertexShader ); + device->CreateVertexShader( precompiledVertexShaderTimewarpSrc, &vertexShaderTimewarp ); + device->CreatePixelShader( precompiledPixelShaderSrc, &pixelShader ); +#endif +} + + +/***************************************************/ +void DistortionRenderer::CreateVertexDeclaration(void) +{ + static const D3DVERTEXELEMENT9 VertexElements[7] = { + { 0, 0, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0 }, + { 0, 8, D3DDECLTYPE_FLOAT1, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 1 }, + { 0, 12, D3DDECLTYPE_FLOAT1, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 2 }, + { 0, 16, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0 }, + { 0, 24, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 1 }, + { 0, 32, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 2 }, + D3DDECL_END() }; + device->CreateVertexDeclaration( VertexElements, &vertexDecl ); +} + + +/******************************************************/ +void DistortionRenderer::Create_Distortion_Models(void) +{ + //Make the distortion models + for (int eye=0;eye<2;eye++) + { + ovrVector2f dummy_UVScaleOffset[2]; //because it needs to be updated by a later call + FOR_EACH_EYE * e = &eachEye[eye]; + ovrDistortionMesh meshData; + ovrHmd_CreateDistortionMesh(HMD, RState.EyeRenderDesc[eye].Desc, distortionCaps, + dummy_UVScaleOffset, &meshData); + + e->numVerts = meshData.VertexCount; + e->numIndices = meshData.IndexCount; + + device->CreateVertexBuffer( (e->numVerts)*sizeof(ovrDistortionVertex),0, 0,D3DPOOL_MANAGED, &e->dxVerts, NULL ); + ovrDistortionVertex * dxv; e->dxVerts->Lock( 0, 0, (void**)&dxv, 0 ); + for (int v=0;v<e->numVerts;v++) dxv[v] = meshData.pVertexData[v]; + + device->CreateIndexBuffer( (e->numIndices)*sizeof(u_short),0, D3DFMT_INDEX16,D3DPOOL_MANAGED, &e->dxIndices, NULL ); + unsigned short* dxi; e->dxIndices->Lock( 0, 0, (void**)&dxi, 0 ); + for (int i=0;i<e->numIndices;i++) dxi[i] = meshData.pIndexData[i]; + + ovrHmd_DestroyDistortionMesh( &meshData ); + } +} + +/**********************************************************/ +void DistortionRenderer::RenderBothDistortionMeshes(void) +{ + //Record and set render state + numSavedStates=0; + RecordAndSetState(0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR ); + RecordAndSetState(0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR ); + RecordAndSetState(0, D3DSAMP_MIPFILTER, D3DTEXF_LINEAR ); + RecordAndSetState(0, D3DSAMP_BORDERCOLOR, 0x000000 ); + RecordAndSetState(0, D3DSAMP_ADDRESSU, D3DTADDRESS_BORDER ); + RecordAndSetState(0, D3DSAMP_ADDRESSV, D3DTADDRESS_BORDER ); + RecordAndSetState(1, D3DRS_MULTISAMPLEANTIALIAS, FALSE ); + RecordAndSetState(1, D3DRS_DITHERENABLE, FALSE ); + RecordAndSetState(1, D3DRS_ZENABLE, FALSE ); + RecordAndSetState(1, D3DRS_ZWRITEENABLE, TRUE ); + RecordAndSetState(1, D3DRS_ZFUNC, D3DCMP_LESSEQUAL ); + RecordAndSetState(1, D3DRS_CULLMODE , D3DCULL_CCW ); + RecordAndSetState(1, D3DRS_ALPHABLENDENABLE , FALSE ); + RecordAndSetState(1, D3DRS_DEPTHBIAS , 0 ); + RecordAndSetState(1, D3DRS_SRCBLEND , D3DBLEND_SRCALPHA ); + RecordAndSetState(1, D3DRS_DESTBLEND , D3DBLEND_INVSRCALPHA ); + RecordAndSetState(1, D3DRS_FILLMODE, D3DFILL_SOLID ); + RecordAndSetState(1, D3DRS_ALPHATESTENABLE, FALSE); + RecordAndSetState(1, D3DRS_DEPTHBIAS , 0 ); + RecordAndSetState(1, D3DRS_LIGHTING, FALSE ); + RecordAndSetState(1, D3DRS_FOGENABLE, FALSE ); + + for (int eye=0; eye<2; eye++) + { + FOR_EACH_EYE * e = &eachEye[eye]; + D3DVIEWPORT9 vp; vp.X=0; vp.Y=0; vp.Width=screenSize.w; vp.Height=screenSize.h; vp.MinZ=0; vp.MaxZ = 1; + device->SetViewport(&vp); + device->SetStreamSource( 0, e->dxVerts,0, sizeof(ovrDistortionVertex) ); + device->SetVertexDeclaration( vertexDecl ); + device->SetIndices( e->dxIndices ); + device->SetPixelShader( pixelShader ); + device->SetTexture( 0, e->texture); + + //Choose which vertex shader, with associated additional inputs + if (distortionCaps & ovrDistortion_TimeWarp) + { + device->SetVertexShader( vertexShaderTimewarp ); + + ovrMatrix4f timeWarpMatrices[2]; + ovrHmd_GetEyeTimewarpMatrices(HMD, (ovrEyeType)eye, + RState.EyeRenderPoses[eye], timeWarpMatrices); + + // Feed identity like matrices in until we get proper timewarp calculation going on + device->SetVertexShaderConstantF(4, (float *) &timeWarpMatrices[0],4); + device->SetVertexShaderConstantF(20,(float *) &timeWarpMatrices[1],4); + } + else + { + device->SetVertexShader( vertexShader ); + } + + + //Set up vertex shader constants + device->SetVertexShaderConstantF( 0, ( FLOAT* )&(e->UVScaleOffset[0]), 1 ); + device->SetVertexShaderConstantF( 2, ( FLOAT* )&(e->UVScaleOffset[1]), 1 ); + + device->DrawIndexedPrimitive( D3DPT_TRIANGLELIST,0,0,e->numVerts,0,e->numIndices/3); + } + + //Revert render state + RevertAllStates(); + +} + +/*********************************************************************************/ +void DistortionRenderer::RecordAndSetState(int which, int type, DWORD newValue) +{ + SavedStateType * sst = &savedState[numSavedStates++]; + sst->which = which; + sst->type = type; + if (which==0) + { + device->GetSamplerState( 0, (D3DSAMPLERSTATETYPE)type, &sst->valueToRevertTo); + device->SetSamplerState( 0, (D3DSAMPLERSTATETYPE)type, newValue); + } + else + { + device->GetRenderState( (D3DRENDERSTATETYPE)type, &sst->valueToRevertTo); + device->SetRenderState( (D3DRENDERSTATETYPE)type, newValue); + } +} +/*********************************************************************************/ +void DistortionRenderer::RevertAllStates(void) +{ + for (int i=0;i<numSavedStates;i++) + { + SavedStateType * sst = &savedState[i]; + if (sst->which==0) + { + device->SetSamplerState( 0, (D3DSAMPLERSTATETYPE)sst->type, sst->valueToRevertTo); + } + else + { + device->SetRenderState( (D3DRENDERSTATETYPE)sst->type, sst->valueToRevertTo); + } + } +} + + + + + + + + +}}}
\ No newline at end of file diff --git a/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.cpp b/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.cpp new file mode 100644 index 0000000..a953d73 --- /dev/null +++ b/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.cpp @@ -0,0 +1,1006 @@ +/************************************************************************************ + +Filename : CAPI_GL_DistortionRenderer.h +Content : Distortion renderer header for GL +Created : November 11, 2013 +Authors : David Borel, Lee Cooper + +Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. + +Use of this software is subject to the terms of the Oculus Inc license +agreement provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +************************************************************************************/ + +#include "CAPI_GL_DistortionRenderer.h" + +#include "../../OVR_CAPI_GL.h" + +namespace OVR { namespace CAPI { namespace GL { + + +static const char SimpleQuad_vs[] = + "uniform vec2 PositionOffset;\n" + "uniform vec2 Scale;\n" + + "attribute vec3 Position;\n" + + "void main()\n" + "{\n" + " gl_Position = vec4(Position.xy * Scale + PositionOffset, 0.5, 1.0);\n" + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform SimpleQuad_vs_refl[] = +{ + { "PositionOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "Scale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + +static const char SimpleQuad_fs[] = + "uniform vec4 Color;\n" + + "void main()\n" + "{\n" + " gl_FragColor = Color;\n" + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform SimpleQuad_fs_refl[] = +{ + { "Color", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 16 }, +}; + + +static const char Distortion_vs[] = + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + + "attribute vec2 Position;\n" + "attribute vec4 Color;\n" + "attribute vec2 TexCoord0;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.5;\n" + " gl_Position.w = 1.0;\n" + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye) + " oTexCoord0 = TexCoord0 * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + " oColor = Color;\n" // Used for vignette fade. + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform Distortion_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + +static const char Distortion_fs[] = + "uniform sampler2D Texture0;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + + "void main()\n" + "{\n" + " gl_FragColor = texture2D(Texture0, oTexCoord0);\n" + " gl_FragColor.a = 1.0;\n" + "}\n"; + + +static const char DistortionTimewarp_vs[] = + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + "uniform mat4 EyeRotationStart;\n" + "uniform mat4 EyeRotationEnd;\n" + + "attribute vec2 Position;\n" + "attribute vec4 Color;\n" + "attribute vec2 TexCoord0;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.0;\n" + " gl_Position.w = 1.0;\n" + + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + " vec3 TanEyeAngle = vec3 ( TexCoord0.x, TexCoord0.y, 1.0 );\n" + + // Accurate time warp lerp vs. faster +#if 1 + // Apply the two 3x3 timewarp rotations to these vectors. + " vec3 TransformedStart = (EyeRotationStart * vec4(TanEyeAngle, 0)).xyz;\n" + " vec3 TransformedEnd = (EyeRotationEnd * vec4(TanEyeAngle, 0)).xyz;\n" + // And blend between them. + " vec3 Transformed = mix ( TransformedStart, TransformedEnd, Color.a );\n" +#else + " mat3 EyeRotation = mix ( EyeRotationStart, EyeRotationEnd, Color.a );\n" + " vec3 Transformed = EyeRotation * TanEyeAngle;\n" +#endif + + // Project them back onto the Z=1 plane of the rendered images. + " float RecipZ = 1.0 / Transformed.z;\n" + " vec2 Flattened = vec2 ( Transformed.x * RecipZ, Transformed.y * RecipZ );\n" + + // These are now still in TanEyeAngle space. + // Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye) + " vec2 SrcCoord = Flattened * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " oTexCoord0 = SrcCoord;\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + " oColor = Color.r;\n" // Used for vignette fade. + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform DistortionTimewarp_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + + +static const char DistortionPositionalTimewarp_vs[] = + "#version 150\n" + + "uniform sampler2D Texture0;\n" + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + "uniform vec2 DepthProjector;\n" + "uniform vec2 DepthDimSize;\n" + "uniform mat4 EyeRotationStart;\n" + "uniform mat4 EyeRotationEnd;\n" + + "in vec2 Position;\n" + "in vec4 Color;\n" + "in vec2 TexCoord0;\n" + "in vec2 TexCoord1;\n" + "in vec2 TexCoord2;\n" + + "out vec4 oColor;\n" + "out vec2 oTexCoord0;\n" + + "vec4 PositionFromDepth(vec2 inTexCoord)\n" + "{\n" + " vec2 eyeToSourceTexCoord = inTexCoord * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " eyeToSourceTexCoord.y = 1 - eyeToSourceTexCoord.y;\n" + " float depth = texelFetch(Texture0, ivec2(eyeToSourceTexCoord * DepthDimSize), 0).x;\n" + " float linearDepth = DepthProjector.y / (depth - DepthProjector.x);\n" + " vec4 retVal = vec4(inTexCoord, 1, 1);\n" + " retVal.xyz *= linearDepth;\n" + " return retVal;\n" + "}\n" + + "vec2 TimewarpTexCoordToWarpedPos(vec2 inTexCoord, float a)\n" + "{\n" + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + // Apply the 4x4 timewarp rotation to these vectors. + " vec4 inputPos = PositionFromDepth(inTexCoord);\n" + " vec3 transformed = mix ( EyeRotationStart * inputPos, EyeRotationEnd * inputPos, a ).xyz;\n" + // Project them back onto the Z=1 plane of the rendered images. + " vec2 flattened = transformed.xy / transformed.z;\n" + // Scale them into ([0,0.5],[0,1]) or ([0.5,0],[0,1]) UV lookup space (depending on eye) + " vec2 noDepthUV = flattened * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + //" float depth = texture2DLod(Texture0, noDepthUV, 0).r;\n" + " return noDepthUV.xy;\n" + "}\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.0;\n" + " gl_Position.w = 1.0;\n" + + // warped positions are a bit more involved, hence a separate function + " oTexCoord0 = TimewarpTexCoordToWarpedPos(TexCoord0, Color.a);\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + + " oColor = vec4(Color.r); // Used for vignette fade.\n" + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform DistortionPositionalTimewarp_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + + +static const char DistortionChroma_vs[] = + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + + "attribute vec2 Position;\n" + "attribute vec4 Color;\n" + "attribute vec2 TexCoord0;\n" + "attribute vec2 TexCoord1;\n" + "attribute vec2 TexCoord2;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + "varying vec2 oTexCoord1;\n" + "varying vec2 oTexCoord2;\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.5;\n" + " gl_Position.w = 1.0;\n" + + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye) + " oTexCoord0 = TexCoord0 * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + " oTexCoord1 = TexCoord1 * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " oTexCoord1.y = 1-oTexCoord1.y;\n" + " oTexCoord2 = TexCoord2 * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " oTexCoord2.y = 1-oTexCoord2.y;\n" + + " oColor = Color;\n" // Used for vignette fade. + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform DistortionChroma_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + +static const char DistortionChroma_fs[] = + "uniform sampler2D Texture0;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + "varying vec2 oTexCoord1;\n" + "varying vec2 oTexCoord2;\n" + + "void main()\n" + "{\n" + " float ResultR = texture2D(Texture0, oTexCoord0).r;\n" + " float ResultG = texture2D(Texture0, oTexCoord1).g;\n" + " float ResultB = texture2D(Texture0, oTexCoord2).b;\n" + + " gl_FragColor = vec4(ResultR * oColor.r, ResultG * oColor.g, ResultB * oColor.b, 1.0);\n" + "}\n"; + + +static const char DistortionTimewarpChroma_vs[] = + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + "uniform mat4 EyeRotationStart;\n" + "uniform mat4 EyeRotationEnd;\n" + + "attribute vec2 Position;\n" + "attribute vec4 Color;\n" + "attribute vec2 TexCoord0;\n" + "attribute vec2 TexCoord1;\n" + "attribute vec2 TexCoord2;\n" + + "varying vec4 oColor;\n" + "varying vec2 oTexCoord0;\n" + "varying vec2 oTexCoord1;\n" + "varying vec2 oTexCoord2;\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.0;\n" + " gl_Position.w = 1.0;\n" + + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + " vec3 TanEyeAngleR = vec3 ( TexCoord0.x, TexCoord0.y, 1.0 );\n" + " vec3 TanEyeAngleG = vec3 ( TexCoord1.x, TexCoord1.y, 1.0 );\n" + " vec3 TanEyeAngleB = vec3 ( TexCoord2.x, TexCoord2.y, 1.0 );\n" + + // Accurate time warp lerp vs. faster +#if 1 + // Apply the two 3x3 timewarp rotations to these vectors. + " vec3 TransformedRStart = (EyeRotationStart * vec4(TanEyeAngleR, 0)).xyz;\n" + " vec3 TransformedGStart = (EyeRotationStart * vec4(TanEyeAngleG, 0)).xyz;\n" + " vec3 TransformedBStart = (EyeRotationStart * vec4(TanEyeAngleB, 0)).xyz;\n" + " vec3 TransformedREnd = (EyeRotationEnd * vec4(TanEyeAngleR, 0)).xyz;\n" + " vec3 TransformedGEnd = (EyeRotationEnd * vec4(TanEyeAngleG, 0)).xyz;\n" + " vec3 TransformedBEnd = (EyeRotationEnd * vec4(TanEyeAngleB, 0)).xyz;\n" + + // And blend between them. + " vec3 TransformedR = mix ( TransformedRStart, TransformedREnd, Color.a );\n" + " vec3 TransformedG = mix ( TransformedGStart, TransformedGEnd, Color.a );\n" + " vec3 TransformedB = mix ( TransformedBStart, TransformedBEnd, Color.a );\n" +#else + " mat3 EyeRotation = mix ( EyeRotationStart, EyeRotationEnd, Color.a );\n" + " vec3 TransformedR = EyeRotation * TanEyeAngleR;\n" + " vec3 TransformedG = EyeRotation * TanEyeAngleG;\n" + " vec3 TransformedB = EyeRotation * TanEyeAngleB;\n" +#endif + + // Project them back onto the Z=1 plane of the rendered images. + " float RecipZR = 1.0 / TransformedR.z;\n" + " float RecipZG = 1.0 / TransformedG.z;\n" + " float RecipZB = 1.0 / TransformedB.z;\n" + " vec2 FlattenedR = vec2 ( TransformedR.x * RecipZR, TransformedR.y * RecipZR );\n" + " vec2 FlattenedG = vec2 ( TransformedG.x * RecipZG, TransformedG.y * RecipZG );\n" + " vec2 FlattenedB = vec2 ( TransformedB.x * RecipZB, TransformedB.y * RecipZB );\n" + + // These are now still in TanEyeAngle space. + // Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye) + " vec2 SrcCoordR = FlattenedR * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " vec2 SrcCoordG = FlattenedG * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " vec2 SrcCoordB = FlattenedB * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + + " oTexCoord0 = SrcCoordR;\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + " oTexCoord1 = SrcCoordG;\n" + " oTexCoord1.y = 1-oTexCoord1.y;\n" + " oTexCoord2 = SrcCoordB;\n" + " oTexCoord2.y = 1-oTexCoord2.y;\n" + + " oColor = Color.r;\n" // Used for vignette fade. + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform DistortionTimewarpChroma_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, + { "EyeRotationStart", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 16, 64 }, + { "EyeRotationEnd", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 80, 64 }, +}; + + +static const char DistortionPositionalTimewarpChroma_vs[] = + "#version 150\n" + "uniform sampler2D Texture0;\n" + "uniform sampler2D Texture1;\n" + "uniform vec2 EyeToSourceUVScale;\n" + "uniform vec2 EyeToSourceUVOffset;\n" + "uniform vec2 DepthProjector;\n" + "uniform vec2 DepthDimSize;\n" + "uniform mat4 EyeRotationStart;\n" + "uniform mat4 EyeRotationEnd;\n" + + "in vec2 Position;\n" + "in vec4 Color;\n" + "in vec2 TexCoord0;\n" + "in vec2 TexCoord1;\n" + "in vec2 TexCoord2;\n" + + "out vec4 oColor;\n" + "out vec2 oTexCoord0;\n" + "out vec2 oTexCoord1;\n" + "out vec2 oTexCoord2;\n" + + "vec4 PositionFromDepth(vec2 inTexCoord)\n" + "{\n" + " vec2 eyeToSourceTexCoord = inTexCoord * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + " eyeToSourceTexCoord.y = 1 - eyeToSourceTexCoord.y;\n" + " float depth = texelFetch(Texture1, ivec2(eyeToSourceTexCoord * DepthDimSize), 0).x;\n" + " float linearDepth = DepthProjector.y / (depth - DepthProjector.x);\n" + " vec4 retVal = vec4(inTexCoord, 1, 1);\n" + " retVal.xyz *= linearDepth;\n" + " return retVal;\n" + "}\n" + + "vec2 TimewarpTexCoordToWarpedPos(vec2 inTexCoord, float a)\n" + "{\n" + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + // Apply the 4x4 timewarp rotation to these vectors. + " vec4 inputPos = PositionFromDepth(inTexCoord);\n" + " vec3 transformed = mix ( EyeRotationStart * inputPos, EyeRotationEnd * inputPos, a ).xyz;\n" + // Project them back onto the Z=1 plane of the rendered images. + " vec2 flattened = transformed.xy / transformed.z;\n" + // Scale them into ([0,0.5],[0,1]) or ([0.5,0],[0,1]) UV lookup space (depending on eye) + " vec2 noDepthUV = flattened * EyeToSourceUVScale + EyeToSourceUVOffset;\n" + //" float depth = texture2DLod(Texture1, noDepthUV, 0).r;\n" + " return noDepthUV.xy;\n" + "}\n" + + "void main()\n" + "{\n" + " gl_Position.x = Position.x;\n" + " gl_Position.y = Position.y;\n" + " gl_Position.z = 0.0;\n" + " gl_Position.w = 1.0;\n" + + // warped positions are a bit more involved, hence a separate function + " oTexCoord0 = TimewarpTexCoordToWarpedPos(TexCoord0, Color.a);\n" + " oTexCoord0.y = 1-oTexCoord0.y;\n" + " oTexCoord1 = TimewarpTexCoordToWarpedPos(TexCoord1, Color.a);\n" + " oTexCoord1.y = 1-oTexCoord1.y;\n" + " oTexCoord2 = TimewarpTexCoordToWarpedPos(TexCoord2, Color.a);\n" + " oTexCoord2.y = 1-oTexCoord2.y;\n" + + " oColor = vec4(Color.r); // Used for vignette fade.\n" + "}\n"; + +const OVR::CAPI::GL::ShaderBase::Uniform DistortionPositionalTimewarpChroma_vs_refl[] = +{ + { "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 }, + { "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 }, +}; + + +// Distortion pixel shader lookup. +// Bit 0: Chroma Correction +// Bit 1: Timewarp + +enum { + DistortionVertexShaderBitMask = 3, + DistortionVertexShaderCount = DistortionVertexShaderBitMask + 1, + DistortionPixelShaderBitMask = 1, + DistortionPixelShaderCount = DistortionPixelShaderBitMask + 1 +}; + +struct ShaderInfo +{ + const char* ShaderData; + size_t ShaderSize; + const ShaderBase::Uniform* ReflectionData; + size_t ReflectionSize; +}; + +// Do add a new distortion shader use these macros (with or w/o reflection) +#define SI_NOREFL(shader) { shader, sizeof(shader), NULL, 0 } +#define SI_REFL__(shader) { shader, sizeof(shader), shader ## _refl, sizeof( shader ## _refl )/sizeof(*(shader ## _refl)) } + + +static ShaderInfo DistortionVertexShaderLookup[DistortionVertexShaderCount] = +{ + SI_REFL__(Distortion_vs), + SI_REFL__(DistortionChroma_vs), + SI_REFL__(DistortionTimewarp_vs), + SI_REFL__(DistortionTimewarpChroma_vs) + //SI_REFL__(DistortionPositionalTimewarp_vs), + //SI_REFL__(DistortionPositionalTimewarpChroma_vs) +}; + +static ShaderInfo DistortionPixelShaderLookup[DistortionPixelShaderCount] = +{ + SI_NOREFL(Distortion_fs), + SI_NOREFL(DistortionChroma_fs) +}; + +void DistortionShaderBitIndexCheck() +{ + OVR_COMPILER_ASSERT(ovrDistortion_Chromatic == 1); + OVR_COMPILER_ASSERT(ovrDistortion_TimeWarp == 2); +} + + + +struct DistortionVertex +{ + Vector2f Pos; + Vector2f TexR; + Vector2f TexG; + Vector2f TexB; + Color Col; +}; + + +// Vertex type; same format is used for all shapes for simplicity. +// Shapes are built by adding vertices to Model. +struct LatencyVertex +{ + Vector3f Pos; + LatencyVertex (const Vector3f& p) : Pos(p) {} +}; + + +//---------------------------------------------------------------------------- +// ***** GL::DistortionRenderer + +DistortionRenderer::DistortionRenderer(ovrHmd hmd, FrameTimeManager& timeManager, + const HMDRenderState& renderState) + : CAPI::DistortionRenderer(ovrRenderAPI_OpenGL, hmd, timeManager, renderState) +{ +} + +DistortionRenderer::~DistortionRenderer() +{ + destroy(); +} + +// static +CAPI::DistortionRenderer* DistortionRenderer::Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState) +{ + InitGLExtensions(); + + return new DistortionRenderer(hmd, timeManager, renderState); +} + + +bool DistortionRenderer::Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps) +{ + // TBD: Decide if hmdCaps are needed here or are a part of RenderState + OVR_UNUSED(hmdCaps); + + const ovrGLConfig* config = (const ovrGLConfig*)apiConfig; + + if (!config) + { + // Cleanup + pEyeTextures[0].Clear(); + pEyeTextures[1].Clear(); + memset(&RParams, 0, sizeof(RParams)); + return true; + } + + if (!config->OGL.WglContext || !config->OGL.GdiDc) + return false; + + RParams.GdiDc = config->OGL.GdiDc; + RParams.Multisample = config->OGL.Header.Multisample; + RParams.RTSize = config->OGL.Header.RTSize; + RParams.WglContext = config->OGL.WglContext; + RParams.Window = config->OGL.Window; + + DistortionCaps = distortionCaps; + + //DistortionWarper.SetVsync((hmdCaps & ovrHmdCap_NoVSync) ? false : true); + + pEyeTextures[0] = *new Texture(&RParams, 0, 0); + pEyeTextures[1] = *new Texture(&RParams, 0, 0); + + initBuffersAndShaders(); + + return true; +} + + +void DistortionRenderer::SubmitEye(int eyeId, ovrTexture* eyeTexture) +{ + //Doesn't do a lot in here?? + const ovrGLTexture* tex = (const ovrGLTexture*)eyeTexture; + + //Write in values + eachEye[eyeId].texture = tex->OGL.TexId; + + if (tex) + { + //Its only at this point we discover what the viewport of the texture is. + //because presumably we allow users to realtime adjust the resolution. + //Which begs the question - why did we ask them what viewport they were + //using before, which gave them a set of UV offsets. In fact, our + //asking for eye mesh must be entirely independed of these viewports, + //presumably only to get the parameters. + + ovrEyeDesc ed = RState.EyeRenderDesc[eyeId].Desc; + ed.TextureSize = tex->OGL.Header.TextureSize; + ed.RenderViewport = tex->OGL.Header.RenderViewport; + + ovrHmd_GetRenderScaleAndOffset(HMD, ed, DistortionCaps, eachEye[eyeId].UVScaleOffset); + + pEyeTextures[eyeId]->UpdatePlaceholderTexture(tex->OGL.TexId, + tex->OGL.Header.TextureSize); + } +} + +void DistortionRenderer::EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor) +{ + if (!TimeManager.NeedDistortionTimeMeasurement()) + { + if (RState.DistortionCaps & ovrDistortion_TimeWarp) + { + // Wait for timewarp distortion if it is time and Gpu idle + FlushGpuAndWaitTillTime(TimeManager.GetFrameTiming().TimewarpPointTime); + } + + renderDistortion(pEyeTextures[0], pEyeTextures[1]); + } + else + { + // If needed, measure distortion time so that TimeManager can better estimate + // latency-reducing time-warp wait timing. + WaitUntilGpuIdle(); + double distortionStartTime = ovr_GetTimeInSeconds(); + + renderDistortion(pEyeTextures[0], pEyeTextures[1]); + + WaitUntilGpuIdle(); + TimeManager.AddDistortionTimeMeasurement(ovr_GetTimeInSeconds() - distortionStartTime); + } + + if(latencyTesterDrawColor) + { + renderLatencyQuad(latencyTesterDrawColor); + } + else if(latencyTester2DrawColor) + { + renderLatencyPixel(latencyTester2DrawColor); + } + + if (swapBuffers) + { + bool useVsync = ((RState.HMDCaps & ovrHmdCap_NoVSync) == 0); + BOOL success; + int swapInterval = (useVsync) ? 1 : 0; + if (wglGetSwapIntervalEXT() != swapInterval) + wglSwapIntervalEXT(swapInterval); + + success = SwapBuffers(RParams.GdiDc); + OVR_ASSERT(success); + + // Force GPU to flush the scene, resulting in the lowest possible latency. + // It's critical that this flush is *after* present. + WaitUntilGpuIdle(); + } +} + +void DistortionRenderer::WaitUntilGpuIdle() +{ + glFlush(); + glFinish(); +} + +double DistortionRenderer::FlushGpuAndWaitTillTime(double absTime) +{ + double initialTime = ovr_GetTimeInSeconds(); + if (initialTime >= absTime) + return 0.0; + + glFlush(); + glFinish(); + + double newTime = initialTime; + volatile int i; + + while (newTime < absTime) + { + for (int j = 0; j < 50; j++) + i = 0; + + newTime = ovr_GetTimeInSeconds(); + } + + // How long we waited + return newTime - initialTime; +} + + +void DistortionRenderer::initBuffersAndShaders() +{ + for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) + { + // Allocate & generate distortion mesh vertices. + ovrDistortionMesh meshData; + +// double startT = ovr_GetTimeInSeconds(); + + if (!ovrHmd_CreateDistortionMesh( HMD, RState.EyeRenderDesc[eyeNum].Desc, + RState.DistortionCaps, + UVScaleOffset[eyeNum], &meshData) ) + { + OVR_ASSERT(false); + continue; + } + + // Now parse the vertex data and create a render ready vertex buffer from it + DistortionVertex * pVBVerts = (DistortionVertex*)OVR_ALLOC ( sizeof(DistortionVertex) * meshData.VertexCount ); + DistortionVertex * pCurVBVert = pVBVerts; + ovrDistortionVertex* pCurOvrVert = meshData.pVertexData; + + for ( unsigned vertNum = 0; vertNum < meshData.VertexCount; vertNum++ ) + { + pCurVBVert->Pos.x = pCurOvrVert->Pos.x; + pCurVBVert->Pos.y = pCurOvrVert->Pos.y; + pCurVBVert->TexR = (*(Vector2f*)&pCurOvrVert->TexR); + pCurVBVert->TexG = (*(Vector2f*)&pCurOvrVert->TexG); + pCurVBVert->TexB = (*(Vector2f*)&pCurOvrVert->TexB); + // Convert [0.0f,1.0f] to [0,255] + pCurVBVert->Col.R = (OVR::UByte)( pCurOvrVert->VignetteFactor * 255.99f ); + pCurVBVert->Col.G = pCurVBVert->Col.R; + pCurVBVert->Col.B = pCurVBVert->Col.R; + pCurVBVert->Col.A = (OVR::UByte)( pCurOvrVert->TimeWarpFactor * 255.99f );; + pCurOvrVert++; + pCurVBVert++; + } + + DistortionMeshVBs[eyeNum] = *new Buffer(&RParams); + DistortionMeshVBs[eyeNum]->Data ( Buffer_Vertex, pVBVerts, sizeof(DistortionVertex) * meshData.VertexCount ); + DistortionMeshIBs[eyeNum] = *new Buffer(&RParams); + DistortionMeshIBs[eyeNum]->Data ( Buffer_Index, meshData.pIndexData, ( sizeof(INT16) * meshData.IndexCount ) ); + + OVR_FREE ( pVBVerts ); + ovrHmd_DestroyDistortionMesh( &meshData ); + } + + initShaders(); +} + +void DistortionRenderer::renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture) +{ + setViewport( Recti(0,0, RParams.RTSize.w, RParams.RTSize.h) ); + + glClearColor( + RState.ClearColor[0], + RState.ClearColor[1], + RState.ClearColor[2], + RState.ClearColor[3] ); + + glClearDepth(0); + + glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); + + for (int eyeNum = 0; eyeNum < 2; eyeNum++) + { + ShaderFill distortionShaderFill(DistortionShader); + distortionShaderFill.SetTexture(0, eyeNum == 0 ? leftEyeTexture : rightEyeTexture); + + DistortionShader->SetUniform2f("EyeToSourceUVScale", UVScaleOffset[eyeNum][0].x, UVScaleOffset[eyeNum][0].y); + DistortionShader->SetUniform2f("EyeToSourceUVOffset", UVScaleOffset[eyeNum][1].x, UVScaleOffset[eyeNum][1].y); + + if (DistortionCaps & ovrDistortion_TimeWarp) + { + ovrMatrix4f timeWarpMatrices[2]; + ovrHmd_GetEyeTimewarpMatrices(HMD, (ovrEyeType)eyeNum, + RState.EyeRenderPoses[eyeNum], timeWarpMatrices); + + // Feed identity like matrices in until we get proper timewarp calculation going on + DistortionShader->SetUniform4x4f("EyeRotationStart", Matrix4f(timeWarpMatrices[0]).Transposed()); + DistortionShader->SetUniform4x4f("EyeRotationEnd", Matrix4f(timeWarpMatrices[1]).Transposed()); + + renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum], + NULL, 0, (int)DistortionMeshVBs[eyeNum]->GetSize(), Prim_Triangles, true); + } + else + { + renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum], + NULL, 0, (int)DistortionMeshVBs[eyeNum]->GetSize(), Prim_Triangles, true); + } + } +} + +void DistortionRenderer::createDrawQuad() +{ + const int numQuadVerts = 4; + LatencyTesterQuadVB = *new Buffer(&RParams); + if(!LatencyTesterQuadVB) + { + return; + } + + LatencyTesterQuadVB->Data(Buffer_Vertex, NULL, numQuadVerts * sizeof(LatencyVertex)); + LatencyVertex* vertices = (LatencyVertex*)LatencyTesterQuadVB->Map(0, numQuadVerts * sizeof(LatencyVertex), Map_Discard); + if(!vertices) + { + OVR_ASSERT(false); // failed to lock vertex buffer + return; + } + + const float left = -1.0f; + const float top = -1.0f; + const float right = 1.0f; + const float bottom = 1.0f; + + vertices[0] = LatencyVertex(Vector3f(left, top, 0.0f)); + vertices[1] = LatencyVertex(Vector3f(left, bottom, 0.0f)); + vertices[2] = LatencyVertex(Vector3f(right, top, 0.0f)); + vertices[3] = LatencyVertex(Vector3f(right, bottom, 0.0f)); + + LatencyTesterQuadVB->Unmap(vertices); +} + +void DistortionRenderer::renderLatencyQuad(unsigned char* latencyTesterDrawColor) +{ + const int numQuadVerts = 4; + + if(!LatencyTesterQuadVB) + { + createDrawQuad(); + } + + ShaderFill quadFill(SimpleQuadShader); + //quadFill.SetInputLayout(SimpleQuadVertexIL); + + setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h)); + + SimpleQuadShader->SetUniform2f("Scale", 0.2f, 0.2f); + SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterDrawColor[0] / 255.99f, + (float)latencyTesterDrawColor[0] / 255.99f, + (float)latencyTesterDrawColor[0] / 255.99f, + 1.0f); + + for(int eyeNum = 0; eyeNum < 2; eyeNum++) + { + SimpleQuadShader->SetUniform2f("PositionOffset", eyeNum == 0 ? -0.4f : 0.4f, 0.0f); + renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, NULL, 0, numQuadVerts, Prim_TriangleStrip, false); + } +} + +void DistortionRenderer::renderLatencyPixel(unsigned char* latencyTesterPixelColor) +{ + const int numQuadVerts = 4; + + if(!LatencyTesterQuadVB) + { + createDrawQuad(); + } + + ShaderFill quadFill(SimpleQuadShader); + + setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h)); + + SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterPixelColor[0] / 255.99f, + (float)latencyTesterPixelColor[0] / 255.99f, + (float)latencyTesterPixelColor[0] / 255.99f, + 1.0f); + + Vector2f scale(2.0f / RParams.RTSize.w, 2.0f / RParams.RTSize.h); + SimpleQuadShader->SetUniform2f("Scale", scale.x, scale.y); + SimpleQuadShader->SetUniform2f("PositionOffset", 1.0f, 1.0f); + renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, NULL, 0, numQuadVerts, Prim_TriangleStrip, false); +} + +void DistortionRenderer::renderPrimitives( + const ShaderFill* fill, + Buffer* vertices, Buffer* indices, + Matrix4f* viewMatrix, int offset, int count, + PrimitiveType rprim, bool useDistortionVertex) +{ + ShaderSet* shaders = (ShaderSet*) ((ShaderFill*)fill)->GetShaders(); + + GLenum prim; + switch (rprim) + { + case Prim_Triangles: + prim = GL_TRIANGLES; + break; + case Prim_Lines: + prim = GL_LINES; + break; + case Prim_TriangleStrip: + prim = GL_TRIANGLE_STRIP; + break; + default: + assert(0); + return; + } + + fill->Set(); + if (shaders->ProjLoc >= 0) + glUniformMatrix4fv(shaders->ProjLoc, 1, 0, &StdUniforms.Proj.M[0][0]); + if (shaders->ViewLoc >= 0 && viewMatrix != NULL) + glUniformMatrix4fv(shaders->ViewLoc, 1, 0, &viewMatrix->Transposed().M[0][0]); + + //if (shaders->UsesLighting && Lighting->Version != shaders->LightingVer) + //{ + // shaders->LightingVer = Lighting->Version; + // Lighting->Set(shaders); + //} + + glBindBuffer(GL_ARRAY_BUFFER, ((Buffer*)vertices)->GLBuffer); + for (int i = 0; i < 5; i++) + glEnableVertexAttribArray(i); + + GLuint prog = fill->GetShaders()->Prog; + + if (useDistortionVertex) + { + GLint posLoc = glGetAttribLocation(prog, "Position"); + GLint colLoc = glGetAttribLocation(prog, "Color"); + GLint tc0Loc = glGetAttribLocation(prog, "TexCoord0"); + GLint tc1Loc = glGetAttribLocation(prog, "TexCoord1"); + GLint tc2Loc = glGetAttribLocation(prog, "TexCoord2"); + + glVertexAttribPointer(posLoc, 2, GL_FLOAT, false, sizeof(DistortionVertex), (char*)offset + offsetof(DistortionVertex, Pos)); + glVertexAttribPointer(colLoc, 4, GL_UNSIGNED_BYTE, true, sizeof(DistortionVertex), (char*)offset + offsetof(DistortionVertex, Col)); + glVertexAttribPointer(tc0Loc, 2, GL_FLOAT, false, sizeof(DistortionVertex), (char*)offset + offsetof(DistortionVertex, TexR)); + glVertexAttribPointer(tc1Loc, 2, GL_FLOAT, false, sizeof(DistortionVertex), (char*)offset + offsetof(DistortionVertex, TexG)); + glVertexAttribPointer(tc2Loc, 2, GL_FLOAT, false, sizeof(DistortionVertex), (char*)offset + offsetof(DistortionVertex, TexB)); + } + else + { + GLint posLoc = glGetAttribLocation(prog, "Position"); + + glVertexAttribPointer(posLoc, 3, GL_FLOAT, false, sizeof(LatencyVertex), (char*)offset + offsetof(LatencyVertex, Pos)); + } + + if (indices) + { + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ((Buffer*)indices)->GLBuffer); + glDrawElements(prim, count, GL_UNSIGNED_SHORT, NULL); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + } + else + { + glDrawArrays(prim, 0, count); + } + + for (int i = 0; i < 5; i++) + glDisableVertexAttribArray(i); +} + +void DistortionRenderer::setViewport(const Recti& vp) +{ + int wh; + if (CurRenderTarget) + wh = CurRenderTarget->Height; + else + { + RECT rect; + BOOL success = GetWindowRect(RParams.Window, &rect); + OVR_ASSERT(success); + OVR_UNUSED(success); + wh = rect.bottom - rect.top; + } + glViewport(vp.x, wh-vp.y-vp.h, vp.w, vp.h); + + //glEnable(GL_SCISSOR_TEST); + //glScissor(vp.x, wh-vp.y-vp.h, vp.w, vp.h); +} + + +void DistortionRenderer::initShaders() +{ + { + ShaderInfo vsShaderByteCode = DistortionVertexShaderLookup[DistortionVertexShaderBitMask & DistortionCaps]; + Ptr<GL::VertexShader> vtxShader = *new GL::VertexShader( + &RParams, + (void*)vsShaderByteCode.ShaderData, vsShaderByteCode.ShaderSize, + vsShaderByteCode.ReflectionData, vsShaderByteCode.ReflectionSize); + + DistortionShader = *new ShaderSet; + DistortionShader->SetShader(vtxShader); + + ShaderInfo psShaderByteCode = DistortionPixelShaderLookup[DistortionPixelShaderBitMask & DistortionCaps]; + + Ptr<GL::FragmentShader> ps = *new GL::FragmentShader( + &RParams, + (void*)psShaderByteCode.ShaderData, psShaderByteCode.ShaderSize, + psShaderByteCode.ReflectionData, psShaderByteCode.ReflectionSize); + + DistortionShader->SetShader(ps); + } + { + Ptr<GL::VertexShader> vtxShader = *new GL::VertexShader( + &RParams, + (void*)SimpleQuad_vs, sizeof(SimpleQuad_vs), + SimpleQuad_vs_refl, sizeof(SimpleQuad_vs_refl) / sizeof(SimpleQuad_vs_refl[0])); + + SimpleQuadShader = *new ShaderSet; + SimpleQuadShader->SetShader(vtxShader); + + Ptr<GL::FragmentShader> ps = *new GL::FragmentShader( + &RParams, + (void*)SimpleQuad_fs, sizeof(SimpleQuad_fs), + SimpleQuad_fs_refl, sizeof(SimpleQuad_fs_refl) / sizeof(SimpleQuad_fs_refl[0])); + + SimpleQuadShader->SetShader(ps); + } +} + + +void DistortionRenderer::destroy() +{ + for(int eyeNum = 0; eyeNum < 2; eyeNum++) + { + DistortionMeshVBs[eyeNum].Clear(); + DistortionMeshIBs[eyeNum].Clear(); + } + + if (DistortionShader) + { + DistortionShader->UnsetShader(Shader_Vertex); + DistortionShader->UnsetShader(Shader_Pixel); + DistortionShader.Clear(); + } + + LatencyTesterQuadVB.Clear(); +} + +}}} // OVR::CAPI::GL diff --git a/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.h b/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.h new file mode 100644 index 0000000..8e0b72e --- /dev/null +++ b/LibOVR/Src/CAPI/GL/CAPI_GL_DistortionRenderer.h @@ -0,0 +1,125 @@ +/************************************************************************************ + +Filename : CAPI_GL_DistortionRenderer.h +Content : Distortion renderer header for GL +Created : November 11, 2013 +Authors : David Borel, Lee Cooper + +Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. + +Use of this software is subject to the terms of the Oculus Inc license +agreement provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +************************************************************************************/ + +#ifndef OVR_CAPI_GL_DistortionRenderer_h +#define OVR_CAPI_GL_DistortionRenderer_h + +#include "../CAPI_DistortionRenderer.h" + +#include "../../Kernel/OVR_Log.h" +#include "CAPI_GL_Util.h" + +namespace OVR { namespace CAPI { namespace GL { + +// ***** GL::DistortionRenderer + +// Implementation of DistortionRenderer for GL. + +class DistortionRenderer : public CAPI::DistortionRenderer +{ +public: + DistortionRenderer(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState); + ~DistortionRenderer(); + + + // Creation function for the device. + static CAPI::DistortionRenderer* Create(ovrHmd hmd, + FrameTimeManager& timeManager, + const HMDRenderState& renderState); + + + // ***** Public DistortionRenderer interface + + virtual bool Initialize(const ovrRenderAPIConfig* apiConfig, + unsigned hmdCaps, unsigned distortionCaps); + + virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture); + + virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor); + + void WaitUntilGpuIdle(); + + // Similar to ovr_WaitTillTime but it also flushes GPU. + // Note, it exits when time expires, even if GPU is not in idle state yet. + double FlushGpuAndWaitTillTime(double absTime); + +private: + // TBD: Should we be using oe from RState instead? + unsigned DistortionCaps; + + struct FOR_EACH_EYE + { +#if 0 + IDirect3DVertexBuffer9 * dxVerts; + IDirect3DIndexBuffer9 * dxIndices; +#endif + int numVerts; + int numIndices; + + GLuint texture; + + ovrVector2f UVScaleOffset[2]; + } eachEye[2]; + + // GL context and utility variables. + RenderParams RParams; + + // Helpers + void initBuffersAndShaders(); + void initShaders(); + void initFullscreenQuad(); + void destroy(); + + void setViewport(const Recti& vp); + + void renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture); + + void renderPrimitives(const ShaderFill* fill, Buffer* vertices, Buffer* indices, + Matrix4f* viewMatrix, int offset, int count, + PrimitiveType rprim, bool useDistortionVertex); + + void createDrawQuad(); + void renderLatencyQuad(unsigned char* latencyTesterDrawColor); + void renderLatencyPixel(unsigned char* latencyTesterPixelColor); + + Ptr<Texture> pEyeTextures[2]; + + // U,V scale and offset needed for timewarp. + ovrVector2f UVScaleOffset[2][2]; + + Ptr<Buffer> DistortionMeshVBs[2]; // one per-eye + Ptr<Buffer> DistortionMeshIBs[2]; // one per-eye + + Ptr<ShaderSet> DistortionShader; + + struct StandardUniformData + { + Matrix4f Proj; + Matrix4f View; + } StdUniforms; + + Ptr<Buffer> LatencyTesterQuadVB; + Ptr<ShaderSet> SimpleQuadShader; + + Ptr<Texture> CurRenderTarget; + Array<Ptr<Texture> > DepthBuffers; + GLuint CurrentFbo; +}; + +}}} // OVR::CAPI::GL + +#endif // OVR_CAPI_GL_DistortionRenderer_h
\ No newline at end of file diff --git a/LibOVR/Src/CAPI/GL/CAPI_GL_Util.cpp b/LibOVR/Src/CAPI/GL/CAPI_GL_Util.cpp new file mode 100644 index 0000000..b82939a --- /dev/null +++ b/LibOVR/Src/CAPI/GL/CAPI_GL_Util.cpp @@ -0,0 +1,516 @@ +/************************************************************************************ + +Filename : Render_GL_Device.cpp +Content : RenderDevice implementation for OpenGL +Created : September 10, 2012 +Authors : David Borel, Andrew Reisse + +Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "CAPI_GL_Util.h" +#include "../../Kernel/OVR_Log.h" + +namespace OVR { namespace CAPI { namespace GL { + + + +// GL Hooks for PC. +#if defined(OVR_OS_WIN32) + +PFNWGLGETPROCADDRESS wglGetProcAddress; + +PFNGLCLEARPROC glClear; +PFNGLCLEARCOLORPROC glClearColor; +PFNGLCLEARDEPTHPROC glClearDepth; +PFNGLVIEWPORTPROC glViewport; +PFNGLDRAWELEMENTSPROC glDrawElements; +PFNGLTEXPARAMETERIPROC glTexParameteri; +PFNGLFLUSHPROC glFlush; +PFNGLFINISHPROC glFinish; +PFNGLDRAWARRAYSPROC glDrawArrays; +PFNGLGENTEXTURESPROC glGenTextures; +PFNGLDELETETEXTURESPROC glDeleteTextures; +PFNGLBINDTEXTUREPROC glBindTexture; + +PFNWGLGETSWAPINTERVALEXTPROC wglGetSwapIntervalEXT; +PFNWGLSWAPINTERVALEXTPROC wglSwapIntervalEXT; +PFNGLGENFRAMEBUFFERSEXTPROC glGenFramebuffersEXT; +PFNGLDELETESHADERPROC glDeleteShader; +PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC glCheckFramebufferStatusEXT; +PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC glFramebufferRenderbufferEXT; +PFNGLFRAMEBUFFERTEXTURE2DEXTPROC glFramebufferTexture2DEXT; +PFNGLBINDFRAMEBUFFEREXTPROC glBindFramebufferEXT; +PFNGLACTIVETEXTUREPROC glActiveTexture; +PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray; +PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer; +PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray; +PFNGLBINDBUFFERPROC glBindBuffer; +PFNGLUNIFORMMATRIX3FVPROC glUniformMatrix3fv; +PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv; +PFNGLDELETEBUFFERSPROC glDeleteBuffers; +PFNGLBUFFERDATAPROC glBufferData; +PFNGLGENBUFFERSPROC glGenBuffers; +PFNGLMAPBUFFERPROC glMapBuffer; +PFNGLUNMAPBUFFERPROC glUnmapBuffer; +PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog; +PFNGLGETSHADERIVPROC glGetShaderiv; +PFNGLCOMPILESHADERPROC glCompileShader; +PFNGLSHADERSOURCEPROC glShaderSource; +PFNGLCREATESHADERPROC glCreateShader; +PFNGLCREATEPROGRAMPROC glCreateProgram; +PFNGLATTACHSHADERPROC glAttachShader; +PFNGLDETACHSHADERPROC glDetachShader; +PFNGLDELETEPROGRAMPROC glDeleteProgram; +PFNGLUNIFORM1IPROC glUniform1i; +PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation; +PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform; +PFNGLUSEPROGRAMPROC glUseProgram; +PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog; +PFNGLGETPROGRAMIVPROC glGetProgramiv; +PFNGLLINKPROGRAMPROC glLinkProgram; +PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation; +PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation; +PFNGLUNIFORM4FVPROC glUniform4fv; +PFNGLUNIFORM3FVPROC glUniform3fv; +PFNGLUNIFORM2FVPROC glUniform2fv; +PFNGLUNIFORM1FVPROC glUniform1fv; +PFNGLCOMPRESSEDTEXIMAGE2DPROC glCompressedTexImage2D; +PFNGLRENDERBUFFERSTORAGEEXTPROC glRenderbufferStorageEXT; +PFNGLBINDRENDERBUFFEREXTPROC glBindRenderbufferEXT; +PFNGLGENRENDERBUFFERSEXTPROC glGenRenderbuffersEXT; +PFNGLDELETERENDERBUFFERSEXTPROC glDeleteRenderbuffersEXT; + +PFNGLGENVERTEXARRAYSPROC glGenVertexArrays; + +void InitGLExtensions() +{ + HINSTANCE hInst = LoadLibrary( L"Opengl32.dll" ); + if (!hInst) + return; + + glClear = (PFNGLCLEARPROC)GetProcAddress( hInst, "glClear" ); + glClearColor = (PFNGLCLEARCOLORPROC)GetProcAddress( hInst, "glClearColor" ); + glClearDepth = (PFNGLCLEARDEPTHPROC)GetProcAddress( hInst, "glClearDepth" ); + glViewport = (PFNGLVIEWPORTPROC)GetProcAddress( hInst, "glViewport" ); + glDrawElements = (PFNGLDRAWELEMENTSPROC)GetProcAddress( hInst, "glDrawElements" ); + glTexParameteri = (PFNGLTEXPARAMETERIPROC)GetProcAddress( hInst, "glTexParameteri" ); + glFlush = (PFNGLFLUSHPROC)GetProcAddress( hInst, "glFlush" ); + glFinish = (PFNGLFINISHPROC)GetProcAddress( hInst, "glFinish" ); + glDrawArrays = (PFNGLDRAWARRAYSPROC)GetProcAddress( hInst, "glDrawArrays" ); + glGenTextures = (PFNGLGENTEXTURESPROC)GetProcAddress( hInst,"glGenTextures" ); + glDeleteTextures = (PFNGLDELETETEXTURESPROC)GetProcAddress( hInst,"glDeleteTextures" ); + glBindTexture = (PFNGLBINDTEXTUREPROC)GetProcAddress( hInst,"glBindTexture" ); + + wglGetProcAddress = (PFNWGLGETPROCADDRESS)GetProcAddress( hInst, "wglGetProcAddress" ); + + if (glGenFramebuffersEXT) + return; + + wglGetSwapIntervalEXT = (PFNWGLGETSWAPINTERVALEXTPROC) wglGetProcAddress("wglGetSwapIntervalEXT"); + wglSwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC) wglGetProcAddress("wglSwapIntervalEXT"); + glGenFramebuffersEXT = (PFNGLGENFRAMEBUFFERSEXTPROC) wglGetProcAddress("glGenFramebuffersEXT"); + glDeleteShader = (PFNGLDELETESHADERPROC) wglGetProcAddress("glDeleteShader"); + glCheckFramebufferStatusEXT = (PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC) wglGetProcAddress("glCheckFramebufferStatusEXT"); + glFramebufferRenderbufferEXT = (PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC) wglGetProcAddress("glFramebufferRenderbufferEXT"); + glFramebufferTexture2DEXT = (PFNGLFRAMEBUFFERTEXTURE2DEXTPROC) wglGetProcAddress("glFramebufferTexture2DEXT"); + glBindFramebufferEXT = (PFNGLBINDFRAMEBUFFEREXTPROC) wglGetProcAddress("glBindFramebufferEXT"); + glActiveTexture = (PFNGLACTIVETEXTUREPROC) wglGetProcAddress("glActiveTexture"); + glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) wglGetProcAddress("glDisableVertexAttribArray"); + glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) wglGetProcAddress("glVertexAttribPointer"); + glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) wglGetProcAddress("glEnableVertexAttribArray"); + glBindBuffer = (PFNGLBINDBUFFERPROC) wglGetProcAddress("glBindBuffer"); + glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) wglGetProcAddress("glUniformMatrix3fv"); + glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) wglGetProcAddress("glUniformMatrix4fv"); + glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) wglGetProcAddress("glDeleteBuffers"); + glBufferData = (PFNGLBUFFERDATAPROC) wglGetProcAddress("glBufferData"); + glGenBuffers = (PFNGLGENBUFFERSPROC) wglGetProcAddress("glGenBuffers"); + glMapBuffer = (PFNGLMAPBUFFERPROC) wglGetProcAddress("glMapBuffer"); + glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) wglGetProcAddress("glUnmapBuffer"); + glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) wglGetProcAddress("glGetShaderInfoLog"); + glGetShaderiv = (PFNGLGETSHADERIVPROC) wglGetProcAddress("glGetShaderiv"); + glCompileShader = (PFNGLCOMPILESHADERPROC) wglGetProcAddress("glCompileShader"); + glShaderSource = (PFNGLSHADERSOURCEPROC) wglGetProcAddress("glShaderSource"); + glCreateShader = (PFNGLCREATESHADERPROC) wglGetProcAddress("glCreateShader"); + glCreateProgram = (PFNGLCREATEPROGRAMPROC) wglGetProcAddress("glCreateProgram"); + glAttachShader = (PFNGLATTACHSHADERPROC) wglGetProcAddress("glAttachShader"); + glDetachShader = (PFNGLDETACHSHADERPROC) wglGetProcAddress("glDetachShader"); + glDeleteProgram = (PFNGLDELETEPROGRAMPROC) wglGetProcAddress("glDeleteProgram"); + glUniform1i = (PFNGLUNIFORM1IPROC) wglGetProcAddress("glUniform1i"); + glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) wglGetProcAddress("glGetUniformLocation"); + glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) wglGetProcAddress("glGetActiveUniform"); + glUseProgram = (PFNGLUSEPROGRAMPROC) wglGetProcAddress("glUseProgram"); + glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) wglGetProcAddress("glGetProgramInfoLog"); + glGetProgramiv = (PFNGLGETPROGRAMIVPROC) wglGetProcAddress("glGetProgramiv"); + glLinkProgram = (PFNGLLINKPROGRAMPROC) wglGetProcAddress("glLinkProgram"); + glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) wglGetProcAddress("glBindAttribLocation"); + glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) wglGetProcAddress("glGetAttribLocation"); + glUniform4fv = (PFNGLUNIFORM4FVPROC) wglGetProcAddress("glUniform4fv"); + glUniform3fv = (PFNGLUNIFORM3FVPROC) wglGetProcAddress("glUniform3fv"); + glUniform2fv = (PFNGLUNIFORM2FVPROC) wglGetProcAddress("glUniform2fv"); + glUniform1fv = (PFNGLUNIFORM1FVPROC) wglGetProcAddress("glUniform1fv"); + glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) wglGetProcAddress("glCompressedTexImage2D"); + glRenderbufferStorageEXT = (PFNGLRENDERBUFFERSTORAGEEXTPROC) wglGetProcAddress("glRenderbufferStorageEXT"); + glBindRenderbufferEXT = (PFNGLBINDRENDERBUFFEREXTPROC) wglGetProcAddress("glBindRenderbufferEXT"); + glGenRenderbuffersEXT = (PFNGLGENRENDERBUFFERSEXTPROC) wglGetProcAddress("glGenRenderbuffersEXT"); + glDeleteRenderbuffersEXT = (PFNGLDELETERENDERBUFFERSEXTPROC) wglGetProcAddress("glDeleteRenderbuffersEXT"); + + + glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) wglGetProcAddress("glGenVertexArrays"); +} + +#endif + +Buffer::Buffer(RenderParams* rp) : pParams(rp), Size(0), Use(0), GLBuffer(0) +{ +} + +Buffer::~Buffer() +{ + if (GLBuffer) + glDeleteBuffers(1, &GLBuffer); +} + +bool Buffer::Data(int use, const void* buffer, size_t size) +{ + Size = size; + + switch (use & Buffer_TypeMask) + { + case Buffer_Index: Use = GL_ELEMENT_ARRAY_BUFFER; break; + default: Use = GL_ARRAY_BUFFER; break; + } + + if (!GLBuffer) + glGenBuffers(1, &GLBuffer); + + int mode = GL_DYNAMIC_DRAW; + if (use & Buffer_ReadOnly) + mode = GL_STATIC_DRAW; + + glBindBuffer(Use, GLBuffer); + glBufferData(Use, size, buffer, mode); + glBindBuffer(Use, 0); + return 1; +} + +void* Buffer::Map(size_t, size_t, int) +{ + int mode = GL_WRITE_ONLY; + //if (flags & Map_Unsynchronized) + // mode |= GL_MAP_UNSYNCHRONIZED; + + glBindBuffer(Use, GLBuffer); + void* v = glMapBuffer(Use, mode); + glBindBuffer(Use, 0); + return v; +} + +bool Buffer::Unmap(void*) +{ + glBindBuffer(Use, GLBuffer); + int r = glUnmapBuffer(Use); + glBindBuffer(Use, 0); + return r != 0; +} + +ShaderSet::ShaderSet() +{ + Prog = glCreateProgram(); +} +ShaderSet::~ShaderSet() +{ + glDeleteProgram(Prog); +} + +GLint ShaderSet::GetGLShader(Shader* s) +{ + switch (s->Stage) + { + case Shader_Vertex: { + ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER>* gls = (ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER>*)s; + return gls->GLShader; + } break; + case Shader_Fragment: { + ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER>* gls = (ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER>*)s; + return gls->GLShader; + } break; + } + + return -1; +} + +void ShaderSet::SetShader(Shader *s) +{ + Shaders[s->Stage] = s; + GLint GLShader = GetGLShader(s); + glAttachShader(Prog, GLShader); + if (Shaders[Shader_Vertex] && Shaders[Shader_Fragment]) + Link(); +} + +void ShaderSet::UnsetShader(int stage) +{ + if (Shaders[stage] == NULL) + return; + + GLint GLShader = GetGLShader(Shaders[stage]); + glDetachShader(Prog, GLShader); + + Shaders[stage] = NULL; + + //Link(); +} + +bool ShaderSet::SetUniform(const char* name, int n, const float* v) +{ + for (unsigned int i = 0; i < UniformInfo.GetSize(); i++) + if (!strcmp(UniformInfo[i].Name.ToCStr(), name)) + { + OVR_ASSERT(UniformInfo[i].Location >= 0); + glUseProgram(Prog); + switch (UniformInfo[i].Type) + { + case 1: glUniform1fv(UniformInfo[i].Location, n, v); break; + case 2: glUniform2fv(UniformInfo[i].Location, n/2, v); break; + case 3: glUniform3fv(UniformInfo[i].Location, n/3, v); break; + case 4: glUniform4fv(UniformInfo[i].Location, n/4, v); break; + case 12: glUniformMatrix3fv(UniformInfo[i].Location, 1, 1, v); break; + case 16: glUniformMatrix4fv(UniformInfo[i].Location, 1, 1, v); break; + default: OVR_ASSERT(0); + } + return 1; + } + + OVR_DEBUG_LOG(("Warning: uniform %s not present in selected shader", name)); + return 0; +} + +bool ShaderSet::Link() +{ + glLinkProgram(Prog); + GLint r; + glGetProgramiv(Prog, GL_LINK_STATUS, &r); + if (!r) + { + GLchar msg[1024]; + glGetProgramInfoLog(Prog, sizeof(msg), 0, msg); + OVR_DEBUG_LOG(("Linking shaders failed: %s\n", msg)); + if (!r) + return 0; + } + glUseProgram(Prog); + + UniformInfo.Clear(); + LightingVer = 0; + UsesLighting = 0; + + GLint uniformCount = 0; + glGetProgramiv(Prog, GL_ACTIVE_UNIFORMS, &uniformCount); + OVR_ASSERT(uniformCount >= 0); + + for(GLuint i = 0; i < (GLuint)uniformCount; i++) + { + GLsizei namelen; + GLint size = 0; + GLenum type; + GLchar name[32]; + glGetActiveUniform(Prog, i, sizeof(name), &namelen, &size, &type, name); + + if (size) + { + int l = glGetUniformLocation(Prog, name); + char *np = name; + while (*np) + { + if (*np == '[') + *np = 0; + np++; + } + Uniform u; + u.Name = name; + u.Location = l; + u.Size = size; + switch (type) + { + case GL_FLOAT: u.Type = 1; break; + case GL_FLOAT_VEC2: u.Type = 2; break; + case GL_FLOAT_VEC3: u.Type = 3; break; + case GL_FLOAT_VEC4: u.Type = 4; break; + case GL_FLOAT_MAT3: u.Type = 12; break; + case GL_FLOAT_MAT4: u.Type = 16; break; + default: + continue; + } + UniformInfo.PushBack(u); + if (!strcmp(name, "LightCount")) + UsesLighting = 1; + } + else + break; + } + + ProjLoc = glGetUniformLocation(Prog, "Proj"); + ViewLoc = glGetUniformLocation(Prog, "View"); + for (int i = 0; i < 8; i++) + { + char texv[32]; + OVR_sprintf(texv, 10, "Texture%d", i); + TexLoc[i] = glGetUniformLocation(Prog, texv); + if (TexLoc[i] < 0) + break; + + glUniform1i(TexLoc[i], i); + } + if (UsesLighting) + OVR_ASSERT(ProjLoc >= 0 && ViewLoc >= 0); + return 1; +} + +bool ShaderBase::SetUniform(const char* name, int n, const float* v) +{ + for(unsigned i = 0; i < UniformReflSize; i++) + { + if (!strcmp(UniformRefl[i].Name, name)) + { + memcpy(UniformData + UniformRefl[i].Offset, v, n * sizeof(float)); + return 1; + } + } + return 0; +} + +bool ShaderBase::SetUniformBool(const char* name, int n, const bool* v) +{ + OVR_UNUSED(n); + for(unsigned i = 0; i < UniformReflSize; i++) + { + if (!strcmp(UniformRefl[i].Name, name)) + { + memcpy(UniformData + UniformRefl[i].Offset, v, UniformRefl[i].Size); + return 1; + } + } + return 0; +} + +void ShaderBase::InitUniforms(const Uniform* refl, size_t reflSize) +{ + if(!refl) + { + UniformRefl = NULL; + UniformReflSize = 0; + + UniformsSize = 0; + if (UniformData) + { + OVR_FREE(UniformData); + UniformData = 0; + } + return; // no reflection data + } + + UniformRefl = refl; + UniformReflSize = reflSize; + + UniformsSize = UniformRefl[UniformReflSize-1].Offset + UniformRefl[UniformReflSize-1].Size; + UniformData = (unsigned char*)OVR_ALLOC(UniformsSize); +} + +void ShaderBase::UpdateBuffer(Buffer* buf) +{ + if (UniformsSize) + { + buf->Data(Buffer_Uniform, UniformData, UniformsSize); + } +} + +Texture::Texture(RenderParams* rp, int w, int h) : IsUserAllocated(true), pParams(rp), TexId(0), Width(w), Height(h) +{ + if (w && h) + glGenTextures(1, &TexId); +} + +Texture::~Texture() +{ + if (TexId && !IsUserAllocated) + glDeleteTextures(1, &TexId); +} + +void Texture::Set(int slot, ShaderStage) const +{ + glActiveTexture(GL_TEXTURE0 + slot); + glBindTexture(GL_TEXTURE_2D, TexId); + glActiveTexture(GL_TEXTURE0); +} + +void Texture::SetSampleMode(int sm) +{ + glBindTexture(GL_TEXTURE_2D, TexId); + switch (sm & Sample_FilterMask) + { + case Sample_Linear: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1); + break; + + case Sample_Anisotropic: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 8); + break; + + case Sample_Nearest: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1); + break; + } + + switch (sm & Sample_AddressMask) + { + case Sample_Repeat: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + break; + + case Sample_Clamp: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + break; + + case Sample_ClampBorder: + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); + break; + } + glBindTexture(GL_TEXTURE_2D, 0); +} + +void Texture::UpdatePlaceholderTexture(GLuint texId, const Sizei& textureSize) +{ + if (!IsUserAllocated && TexId && texId != TexId) + glDeleteTextures(1, &TexId); + + TexId = texId; + Width = textureSize.w; + Height = textureSize.h; + + IsUserAllocated = true; +} + +}}} diff --git a/LibOVR/Src/CAPI/GL/CAPI_GL_Util.h b/LibOVR/Src/CAPI/GL/CAPI_GL_Util.h new file mode 100644 index 0000000..5e694cc --- /dev/null +++ b/LibOVR/Src/CAPI/GL/CAPI_GL_Util.h @@ -0,0 +1,522 @@ +/************************************************************************************ + +Filename : CAPI_GL_Util.h +Content : Utility header for OpenGL +Created : March 27, 2014 +Authors : Andrew Reisse, David Borel + +Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef INC_OVR_CAPI_GL_Util_h +#define INC_OVR_CAPI_GL_Util_h + +#include "../../OVR_CAPI.h" +#include "../../Kernel/OVR_Array.h" +#include "../../Kernel/OVR_Math.h" +#include "../../Kernel/OVR_RefCount.h" +#include "../../Kernel/OVR_String.h" +#include "../../Kernel/OVR_Types.h" + +#if defined(OVR_OS_WIN32) +#include <Windows.h> +#endif + +#if defined(OVR_OS_MAC) +#include <OpenGL/gl.h> +#include <OpenGL/glext.h> +#else +#ifndef GL_GLEXT_PROTOTYPES +#define GL_GLEXT_PROTOTYPES +#endif +#include <GL/gl.h> +#include <GL/glext.h> +#if defined(OVR_OS_WIN32) +#include <GL/wglext.h> +#endif +#endif + +namespace OVR { namespace CAPI { namespace GL { + +// GL extension Hooks for PC. +#if defined(OVR_OS_WIN32) + +typedef PROC (__stdcall *PFNWGLGETPROCADDRESS) (LPCSTR); +typedef void (__stdcall *PFNGLFLUSHPROC) (); +typedef void (__stdcall *PFNGLFINISHPROC) (); +typedef void (__stdcall *PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (__stdcall *PFNGLCLEARPROC) (GLbitfield); +typedef void (__stdcall *PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices); +typedef void (__stdcall *PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (__stdcall *PFNGLDELETETEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (__stdcall *PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (__stdcall *PFNGLCLEARCOLORPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat a); +typedef void (__stdcall *PFNGLCLEARDEPTHPROC) (GLclampd depth); +typedef void (__stdcall *PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (__stdcall *PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); + +extern PFNWGLGETPROCADDRESS wglGetProcAddress; +extern PFNGLCLEARPROC glClear; +extern PFNGLCLEARCOLORPROC glClearColor; +extern PFNGLCLEARDEPTHPROC glClearDepth; +extern PFNGLVIEWPORTPROC glViewport; +extern PFNGLDRAWARRAYSPROC glDrawArrays; +extern PFNGLDRAWELEMENTSPROC glDrawElements; +extern PFNGLGENTEXTURESPROC glGenTextures; +extern PFNGLDELETETEXTURESPROC glDeleteTextures; +extern PFNGLBINDTEXTUREPROC glBindTexture; +extern PFNGLTEXPARAMETERIPROC glTexParameteri; +extern PFNGLFLUSHPROC glFlush; +extern PFNGLFINISHPROC glFinish; + +extern PFNWGLGETSWAPINTERVALEXTPROC wglGetSwapIntervalEXT; +extern PFNWGLSWAPINTERVALEXTPROC wglSwapIntervalEXT; +extern PFNGLGENFRAMEBUFFERSEXTPROC glGenFramebuffersEXT; +extern PFNGLDELETESHADERPROC glDeleteShader; +extern PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC glCheckFramebufferStatusEXT; +extern PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC glFramebufferRenderbufferEXT; +extern PFNGLFRAMEBUFFERTEXTURE2DEXTPROC glFramebufferTexture2DEXT; +extern PFNGLBINDFRAMEBUFFEREXTPROC glBindFramebufferEXT; +extern PFNGLACTIVETEXTUREPROC glActiveTexture; +extern PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray; +extern PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer; +extern PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray; +extern PFNGLBINDBUFFERPROC glBindBuffer; +extern PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv; +extern PFNGLDELETEBUFFERSPROC glDeleteBuffers; +extern PFNGLBUFFERDATAPROC glBufferData; +extern PFNGLGENBUFFERSPROC glGenBuffers; +extern PFNGLMAPBUFFERPROC glMapBuffer; +extern PFNGLUNMAPBUFFERPROC glUnmapBuffer; +extern PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog; +extern PFNGLGETSHADERIVPROC glGetShaderiv; +extern PFNGLCOMPILESHADERPROC glCompileShader; +extern PFNGLSHADERSOURCEPROC glShaderSource; +extern PFNGLCREATESHADERPROC glCreateShader; +extern PFNGLCREATEPROGRAMPROC glCreateProgram; +extern PFNGLATTACHSHADERPROC glAttachShader; +extern PFNGLDETACHSHADERPROC glDetachShader; +extern PFNGLDELETEPROGRAMPROC glDeleteProgram; +extern PFNGLUNIFORM1IPROC glUniform1i; +extern PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation; +extern PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform; +extern PFNGLUSEPROGRAMPROC glUseProgram; +extern PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog; +extern PFNGLGETPROGRAMIVPROC glGetProgramiv; +extern PFNGLLINKPROGRAMPROC glLinkProgram; +extern PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation; +extern PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation; +extern PFNGLUNIFORM4FVPROC glUniform4fv; +extern PFNGLUNIFORM3FVPROC glUniform3fv; +extern PFNGLUNIFORM2FVPROC glUniform2fv; +extern PFNGLUNIFORM1FVPROC glUniform1fv; +extern PFNGLCOMPRESSEDTEXIMAGE2DPROC glCompressedTexImage2D; +extern PFNGLRENDERBUFFERSTORAGEEXTPROC glRenderbufferStorageEXT; +extern PFNGLBINDRENDERBUFFEREXTPROC glBindRenderbufferEXT; +extern PFNGLGENRENDERBUFFERSEXTPROC glGenRenderbuffersEXT; +extern PFNGLDELETERENDERBUFFERSEXTPROC glDeleteRenderbuffersEXT; + +// For testing +extern PFNGLGENVERTEXARRAYSPROC glGenVertexArrays; + +extern void InitGLExtensions(); + +#endif + + +// Rendering primitive type used to render Model. +enum PrimitiveType +{ + Prim_Triangles, + Prim_Lines, + Prim_TriangleStrip, + Prim_Unknown, + Prim_Count +}; + +// Types of shaders that can be stored together in a ShaderSet. +enum ShaderStage +{ + Shader_Vertex = 0, + Shader_Fragment = 2, + Shader_Pixel = 2, + Shader_Count = 3, +}; + +enum MapFlags +{ + Map_Discard = 1, + Map_Read = 2, // do not use + Map_Unsynchronized = 4, // like D3D11_MAP_NO_OVERWRITE +}; + + +// Buffer types used for uploading geometry & constants. +enum BufferUsage +{ + Buffer_Unknown = 0, + Buffer_Vertex = 1, + Buffer_Index = 2, + Buffer_Uniform = 4, + Buffer_TypeMask = 0xff, + Buffer_ReadOnly = 0x100, // Buffer must be created with Data(). +}; + +enum TextureFormat +{ + Texture_RGBA = 0x0100, + Texture_Depth = 0x8000, + Texture_TypeMask = 0xff00, + Texture_SamplesMask = 0x00ff, + Texture_RenderTarget = 0x10000, + Texture_GenMipmaps = 0x20000, +}; + +// Texture sampling modes. +enum SampleMode +{ + Sample_Linear = 0, + Sample_Nearest = 1, + Sample_Anisotropic = 2, + Sample_FilterMask = 3, + + Sample_Repeat = 0, + Sample_Clamp = 4, + Sample_ClampBorder = 8, // If unsupported Clamp is used instead. + Sample_AddressMask =12, + + Sample_Count =13, +}; + + +// Rendering parameters/pointers describing GL rendering setup. +struct RenderParams +{ +#ifdef OVR_OS_WIN32 + HWND Window; + HGLRC WglContext; + HDC GdiDc; +#endif + + ovrSizei RTSize; + int Multisample; +}; + + +class Buffer : public RefCountBase<Buffer> +{ +public: + RenderParams* pParams; + size_t Size; + GLenum Use; + GLuint GLBuffer; + +public: + Buffer(RenderParams* r); + ~Buffer(); + + GLuint GetBuffer() { return GLBuffer; } + + virtual size_t GetSize() { return Size; } + virtual void* Map(size_t start, size_t size, int flags = 0); + virtual bool Unmap(void *m); + virtual bool Data(int use, const void* buffer, size_t size); +}; + +class Texture : public RefCountBase<Texture> +{ + bool IsUserAllocated; + +public: + RenderParams* pParams; + GLuint TexId; + int Width, Height; + + Texture(RenderParams* rp, int w, int h); + ~Texture(); + + virtual int GetWidth() const { return Width; } + virtual int GetHeight() const { return Height; } + + virtual void SetSampleMode(int sm); + + // Updates texture to point to specified resources + // - used for slave rendering. + void UpdatePlaceholderTexture(GLuint texId, + const Sizei& textureSize); + + virtual void Set(int slot, ShaderStage stage = Shader_Fragment) const; +}; + +// Base class for vertex and pixel shaders. Stored in ShaderSet. +class Shader : public RefCountBase<Shader> +{ + friend class ShaderSet; + +protected: + ShaderStage Stage; + +public: + Shader(ShaderStage s) : Stage(s) {} + virtual ~Shader() {} + + ShaderStage GetStage() const { return Stage; } + + virtual void Set(PrimitiveType) const { } + virtual void SetUniformBuffer(class Buffer* buffers, int i = 0) { OVR_UNUSED2(buffers, i); } + +protected: + virtual bool SetUniform(const char* name, int n, const float* v) { OVR_UNUSED3(name, n, v); return false; } + virtual bool SetUniformBool(const char* name, int n, const bool* v) { OVR_UNUSED3(name, n, v); return false; } +}; + + + +// A group of shaders, one per stage. +// A ShaderSet is applied for rendering with a given fill. +class ShaderSet : public RefCountBase<ShaderSet> +{ +protected: + Ptr<Shader> Shaders[Shader_Count]; + + struct Uniform + { + String Name; + int Location, Size; + int Type; // currently number of floats in vector + }; + Array<Uniform> UniformInfo; + +public: + GLuint Prog; + GLint ProjLoc, ViewLoc; + GLint TexLoc[8]; + bool UsesLighting; + int LightingVer; + + ShaderSet(); + ~ShaderSet(); + + virtual void SetShader(Shader *s); + virtual void UnsetShader(int stage); + Shader* GetShader(int stage) { return Shaders[stage]; } + + virtual void Set(PrimitiveType prim) const + { + glUseProgram(Prog); + + for (int i = 0; i < Shader_Count; i++) + if (Shaders[i]) + Shaders[i]->Set(prim); + } + + // Set a uniform (other than the standard matrices). It is undefined whether the + // uniforms from one shader occupy the same space as those in other shaders + // (unless a buffer is used, then each buffer is independent). + virtual bool SetUniform(const char* name, int n, const float* v); + bool SetUniform1f(const char* name, float x) + { + const float v[] = {x}; + return SetUniform(name, 1, v); + } + bool SetUniform2f(const char* name, float x, float y) + { + const float v[] = {x,y}; + return SetUniform(name, 2, v); + } + bool SetUniform3f(const char* name, float x, float y, float z) + { + const float v[] = {x,y,z}; + return SetUniform(name, 3, v); + } + bool SetUniform4f(const char* name, float x, float y, float z, float w = 1) + { + const float v[] = {x,y,z,w}; + return SetUniform(name, 4, v); + } + + bool SetUniformv(const char* name, const Vector3f& v) + { + const float a[] = {v.x,v.y,v.z,1}; + return SetUniform(name, 4, a); + } + + virtual bool SetUniform4x4f(const char* name, const Matrix4f& m) + { + Matrix4f mt = m.Transposed(); + return SetUniform(name, 16, &mt.M[0][0]); + } + +protected: + GLint GetGLShader(Shader* s); + bool Link(); +}; + + +// Fill combines a ShaderSet (vertex, pixel) with textures, if any. +// Every model has a fill. +class ShaderFill : public RefCountBase<ShaderFill> +{ + Ptr<ShaderSet> Shaders; + Ptr<class Texture> Textures[8]; + void* InputLayout; // HACK this should be abstracted + +public: + ShaderFill(ShaderSet* sh) : Shaders(sh) { InputLayout = NULL; } + ShaderFill(ShaderSet& sh) : Shaders(sh) { InputLayout = NULL; } + + ShaderSet* GetShaders() const { return Shaders; } + void* GetInputLayout() const { return InputLayout; } + + virtual void Set(PrimitiveType prim = Prim_Unknown) const { + Shaders->Set(prim); + for(int i = 0; i < 8; i++) + { + if(Textures[i]) + { + Textures[i]->Set(i); + } + } + } + + virtual void SetTexture(int i, class Texture* tex) { if (i < 8) Textures[i] = tex; } +}; + + +struct DisplayId +{ + // Windows + String MonitorName; // Monitor name for fullscreen mode + + // MacOS + long CgDisplayId; // CGDirectDisplayID + + DisplayId() : CgDisplayId(0) {} + DisplayId(long id) : CgDisplayId(id) {} + DisplayId(String m, long id=0) : MonitorName(m), CgDisplayId(id) {} + + operator bool () const + { + return MonitorName.GetLength() || CgDisplayId; + } + + bool operator== (const DisplayId& b) const + { + return CgDisplayId == b.CgDisplayId && + (strstr(MonitorName.ToCStr(), b.MonitorName.ToCStr()) || + strstr(b.MonitorName.ToCStr(), MonitorName.ToCStr())); + } +}; + + +class ShaderBase : public Shader +{ +public: + RenderParams* pParams; + unsigned char* UniformData; + int UniformsSize; + + enum VarType + { + VARTYPE_FLOAT, + VARTYPE_INT, + VARTYPE_BOOL, + }; + + struct Uniform + { + const char* Name; + VarType Type; + int Offset, Size; + }; + const Uniform* UniformRefl; + size_t UniformReflSize; + + ShaderBase(RenderParams* rp, ShaderStage stage) : Shader(stage), pParams(rp), UniformData(0), UniformsSize(0) {} + ~ShaderBase() + { + if (UniformData) + OVR_FREE(UniformData); + } + + void InitUniforms(const Uniform* refl, size_t reflSize); + bool SetUniform(const char* name, int n, const float* v); + bool SetUniformBool(const char* name, int n, const bool* v); + + void UpdateBuffer(Buffer* b); +}; + + +template<ShaderStage SStage, GLenum SType> +class ShaderImpl : public ShaderBase +{ + friend class ShaderSet; + +public: + ShaderImpl(RenderParams* rp, void* s, size_t size, const Uniform* refl, size_t reflSize) + : ShaderBase(rp, SStage) + , GLShader(0) + { + BOOL success; + OVR_UNUSED(size); + success = Compile((const char*) s); + OVR_ASSERT(success); + InitUniforms(refl, reflSize); + } + ~ShaderImpl() + { + if (GLShader) + { + glDeleteShader(GLShader); + GLShader = 0; + } + } + bool Compile(const char* src) + { + if (!GLShader) + GLShader = glCreateShader(GLStage()); + + glShaderSource(GLShader, 1, &src, 0); + glCompileShader(GLShader); + GLint r; + glGetShaderiv(GLShader, GL_COMPILE_STATUS, &r); + if (!r) + { + GLchar msg[1024]; + glGetShaderInfoLog(GLShader, sizeof(msg), 0, msg); + if (msg[0]) + OVR_DEBUG_LOG(("Compiling shader\n%s\nfailed: %s\n", src, msg)); + + return 0; + } + return 1; + } + + GLenum GLStage() const + { + return SType; + } + +private: + GLuint GLShader; +}; + +typedef ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER> VertexShader; +typedef ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER> FragmentShader; + +}}} + +#endif // INC_OVR_CAPI_GL_Util_h diff --git a/LibOVR/Src/CAPI/Shaders/DistortionChroma_ps.psh b/LibOVR/Src/CAPI/Shaders/DistortionChroma_ps.psh new file mode 100644 index 0000000..5c95ade --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/DistortionChroma_ps.psh @@ -0,0 +1,12 @@ +Texture2D Texture : register(t0); +SamplerState Linear : register(s0); + +float4 main(in float4 oPosition : SV_Position, in float4 oColor : COLOR, + in float3 oTexCoord0 : TEXCOORD0, in float3 oTexCoord1 : TEXCOORD1, in float3 oTexCoord2 : TEXCOORD2) : SV_Target +{ + float ResultR = Texture.Sample(Linear, oTexCoord0.xy).r; + float ResultG = Texture.Sample(Linear, oTexCoord1.xy).g; + float ResultB = Texture.Sample(Linear, oTexCoord2.xy).b; + return float4(ResultR * oColor.r, ResultG * oColor.g, ResultB * oColor.b, 1.0); + //" return oColor.rrrr; +} diff --git a/LibOVR/Src/CAPI/Shaders/DistortionChroma_vs.vsh b/LibOVR/Src/CAPI/Shaders/DistortionChroma_vs.vsh new file mode 100644 index 0000000..6e11647 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/DistortionChroma_vs.vsh @@ -0,0 +1,24 @@ +float2 EyeToSourceUVScale; +float2 EyeToSourceUVOffset; + +void main(in float2 Position : POSITION, in float4 Color : COLOR0, + in float2 TexCoord0 : TEXCOORD0, in float2 TexCoord1 : TEXCOORD1, in float2 TexCoord2 : TEXCOORD2, + out float4 oPosition : SV_Position, out float4 oColor : COLOR, out float3 oTexCoord0 : TEXCOORD0, + out float3 oTexCoord1 : TEXCOORD1, out float3 oTexCoord2 : TEXCOORD2) +{ + oPosition.x = Position.x; + oPosition.y = Position.y; + oPosition.z = 0.5; + oPosition.w = 1.0; + + // Scale them into UV lookup space + float2 tc0scaled = EyeToSourceUVScale * TexCoord0 + EyeToSourceUVOffset; + float2 tc1scaled = EyeToSourceUVScale * TexCoord1 + EyeToSourceUVOffset; + float2 tc2scaled = EyeToSourceUVScale * TexCoord2 + EyeToSourceUVOffset; + + oTexCoord0 = float3(tc0scaled, 1); // R sample. + oTexCoord1 = float3(tc1scaled, 1); // G sample. + oTexCoord2 = float3(tc2scaled, 1); // B sample. + oColor = Color; // Used for vignette fade. +} + diff --git a/LibOVR/Src/CAPI/Shaders/DistortionTimewarpChroma_vs.vsh b/LibOVR/Src/CAPI/Shaders/DistortionTimewarpChroma_vs.vsh new file mode 100644 index 0000000..d629ddd --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/DistortionTimewarpChroma_vs.vsh @@ -0,0 +1,40 @@ +float2 EyeToSourceUVScale; +float2 EyeToSourceUVOffset; +float4x4 EyeRotationStart; +float4x4 EyeRotationEnd; + +float2 TimewarpTexCoordToWarpedPos(float2 inTexCoord, float4x4 rotMat) +{ + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + // Apply the 3x3 timewarp rotation to these vectors. + float3 transformed = float3( mul ( rotMat, float4(inTexCoord.xy, 1, 1) ).xyz); + // Project them back onto the Z=1 plane of the rendered images. + float2 flattened = transformed.xy / transformed.z; + // Scale them into ([0,0.5],[0,1]) or ([0.5,0],[0,1]) UV lookup space (depending on eye) + return flattened * EyeToSourceUVScale + EyeToSourceUVOffset; + +} + +void main(in float2 Position : POSITION, in float4 Color : COLOR0, in float2 TexCoord0 : TEXCOORD0, + in float2 TexCoord1 : TEXCOORD1, in float2 TexCoord2 : TEXCOORD2, + out float4 oPosition : SV_Position, out float4 oColor : COLOR, out float3 oTexCoord0 : TEXCOORD0, + out float3 oTexCoord1 : TEXCOORD1, out float3 oTexCoord2 : TEXCOORD2) +{ + + oPosition.x = Position.x; + oPosition.y = Position.y; + oPosition.z = 0.5; + oPosition.w = 1.0; + + float timewarpLerpFactor = Color.a; + float4x4 lerpedEyeRot = lerp(EyeRotationStart, EyeRotationEnd, timewarpLerpFactor); + //" float4x4 lerpedEyeRot = EyeRotationStart; + + // warped positions are a bit more involved, hence a separate function + oTexCoord0 = float3(TimewarpTexCoordToWarpedPos(TexCoord0, lerpedEyeRot), 1); + oTexCoord1 = float3(TimewarpTexCoordToWarpedPos(TexCoord1, lerpedEyeRot), 1); + oTexCoord2 = float3(TimewarpTexCoordToWarpedPos(TexCoord2, lerpedEyeRot), 1); + + oColor = Color.r; // Used for vignette fade. +} diff --git a/LibOVR/Src/CAPI/Shaders/DistortionTimewarp_vs.vsh b/LibOVR/Src/CAPI/Shaders/DistortionTimewarp_vs.vsh new file mode 100644 index 0000000..627970f --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/DistortionTimewarp_vs.vsh @@ -0,0 +1,36 @@ +float2 EyeToSourceUVScale; +float2 EyeToSourceUVOffset; +float4x4 EyeRotationStart; +float4x4 EyeRotationEnd; + +float2 TimewarpTexCoordToWarpedPos(float2 inTexCoord, float4x4 rotMat) +{ + // Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion). + // These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD. + // Apply the 3x3 timewarp rotation to these vectors. + float3 transformed = float3( mul ( rotMat, float4(inTexCoord,1,1) ).xyz); + // Project them back onto the Z=1 plane of the rendered images. + float2 flattened = transformed.xy / transformed.z; + // Scale them into ([0,0.5],[0,1]) or ([0.5,0],[0,1]) UV lookup space (depending on eye) + return flattened * EyeToSourceUVScale + EyeToSourceUVOffset; + +} + +void main(in float2 Position : POSITION, in float4 Color : COLOR0, in float2 TexCoord0 : TEXCOORD0, + out float4 oPosition : SV_Position, out float4 oColor : COLOR, out float3 oTexCoord0 : TEXCOORD0) +{ + + oPosition.x = Position.x; + oPosition.y = Position.y; + oPosition.z = 0.5; + oPosition.w = 1.0; + + float timewarpLerpFactor = Color.a; + float4x4 lerpedEyeRot = lerp(EyeRotationStart, EyeRotationEnd, timewarpLerpFactor); + + // Warped positions are a bit more involved, hence a separate function + oTexCoord0 = float3(TimewarpTexCoordToWarpedPos(TexCoord0, lerpedEyeRot), 1); + oColor = Color.r; // Used for vignette fade. +} + + diff --git a/LibOVR/Src/CAPI/Shaders/Distortion_ps.psh b/LibOVR/Src/CAPI/Shaders/Distortion_ps.psh new file mode 100644 index 0000000..4a33de5 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/Distortion_ps.psh @@ -0,0 +1,9 @@ +Texture2D Texture : register(t0); +SamplerState Linear : register(s0); + +float4 main(in float4 oPosition : SV_Position, in float4 oColor : COLOR, + in float3 oTexCoord0 : TEXCOORD0) : SV_Target +{ + float3 Result = Texture.Sample(Linear, oTexCoord0.xy).rgb; + return float4(Result.r * oColor.r, Result.g * oColor.g, Result.b * oColor.b, 1.0); +} diff --git a/LibOVR/Src/CAPI/Shaders/Distortion_vs.vsh b/LibOVR/Src/CAPI/Shaders/Distortion_vs.vsh new file mode 100644 index 0000000..d22ea02 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/Distortion_vs.vsh @@ -0,0 +1,14 @@ +float2 EyeToSourceUVScale; +float2 EyeToSourceUVOffset; + +void main(in float2 Position : POSITION, in float4 Color : COLOR0, in float2 TexCoord0 : TEXCOORD0, + out float4 oPosition : SV_Position, out float4 oColor : COLOR, out float3 oTexCoord0 : TEXCOORD0) +{ + oPosition.x = Position.x; + oPosition.y = Position.y; + oPosition.z = 0.5; + oPosition.w = 1.0; + oTexCoord0 = float3(EyeToSourceUVScale * TexCoord0 + EyeToSourceUVOffset, 1); + oColor = Color; // Used for vignette fade. +} + diff --git a/LibOVR/Src/CAPI/Shaders/SimpleQuad_ps.psh b/LibOVR/Src/CAPI/Shaders/SimpleQuad_ps.psh new file mode 100644 index 0000000..9ea10cd --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/SimpleQuad_ps.psh @@ -0,0 +1,6 @@ +float4 Color; + +float4 main() : SV_Target +{ + return Color; +} diff --git a/LibOVR/Src/CAPI/Shaders/SimpleQuad_vs.vsh b/LibOVR/Src/CAPI/Shaders/SimpleQuad_vs.vsh new file mode 100644 index 0000000..4625371 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/SimpleQuad_vs.vsh @@ -0,0 +1,8 @@ +float2 PositionOffset = float2(0, 0); +float2 Scale = float2(1, 1); + +void main( in float3 Position : POSITION, +out float4 oPosition : SV_Position) +{ + oPosition = float4(Position.xy * Scale + PositionOffset, 0.5, 1.0); +}
\ No newline at end of file diff --git a/LibOVR/Src/CAPI/Shaders/genPixelShaderHeader.bat b/LibOVR/Src/CAPI/Shaders/genPixelShaderHeader.bat new file mode 100644 index 0000000..a1311b2 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/genPixelShaderHeader.bat @@ -0,0 +1,15 @@ +@echo off +pushd %~dp0 +echo Compiling shader and packing into header: %~2 +setlocal + +set PATH=%PATH%;"%DXSDK_DIR%Utilities\bin\x86\" +fxc.exe /nologo /E main /T ps_4_0 /Fo "%1" %2 +bin2header.exe "%1" + +echo Generating shader reflection data for %1 +ShaderReflector "%1" "%1_refl.h" + +del "%1" +endlocal +popd diff --git a/LibOVR/Src/CAPI/Shaders/genVertexShaderHeader.bat b/LibOVR/Src/CAPI/Shaders/genVertexShaderHeader.bat new file mode 100644 index 0000000..4591d20 --- /dev/null +++ b/LibOVR/Src/CAPI/Shaders/genVertexShaderHeader.bat @@ -0,0 +1,15 @@ +@echo off +pushd %~dp0 +echo Compiling shader and packing into header: %~2 +setlocal + +set PATH=%PATH%;"%DXSDK_DIR%Utilities\bin\x86\" +fxc.exe /nologo /E main /T vs_4_0 /Fo "%1" %2 +bin2header.exe "%1" + +echo Generating shader reflection data for %1 +ShaderReflector "%1" "%1_refl.h" + +del "%1" +endlocal +popd diff --git a/LibOVR/Src/Kernel/OVR_Alg.cpp b/LibOVR/Src/Kernel/OVR_Alg.cpp index 0c564ab..2e52bc3 100644 --- a/LibOVR/Src/Kernel/OVR_Alg.cpp +++ b/LibOVR/Src/Kernel/OVR_Alg.cpp @@ -5,16 +5,16 @@ Content : Static lookup tables for Alg functions Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Alg.h b/LibOVR/Src/Kernel/OVR_Alg.h index 51261d3..783af21 100644 --- a/LibOVR/Src/Kernel/OVR_Alg.h +++ b/LibOVR/Src/Kernel/OVR_Alg.h @@ -6,16 +6,16 @@ Content : Simple general purpose algorithms: Sort, Binary Search, etc. Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -433,7 +433,29 @@ void InsertionSort(Array& arr) InsertionSortSliced(arr, 0, arr.GetSize(), OperatorLess<ValueType>::Compare); } +//----------------------------------------------------------------------------------- +// ***** Median +// Returns a median value of the input array. +// Caveats: partially sorts the array, returns a reference to the array element +// TBD: This needs to be optimized and generalized +// +template<class Array> +typename Array::ValueType& Median(Array& arr) +{ + UPInt count = arr.GetSize(); + UPInt mid = (count - 1) / 2; + OVR_ASSERT(count > 0); + for (int j = 0; j <= mid; j++) + { + int min = j; + for (int k = j + 1; k < count; k++) + if (arr[k] < arr[min]) + min = k; + Swap(arr[j], arr[min]); + } + return arr[mid]; +} //----------------------------------------------------------------------------------- // ***** LowerBoundSliced @@ -651,13 +673,13 @@ inline UByte UpperBit(UPInt val) if (val & 0xFFFF0000) { - return static_cast<UByte>((val & 0xFF000000) ? + return (val & 0xFF000000) ? UpperBitTable[(val >> 24) ] + 24: - UpperBitTable[(val >> 16) & 0xFF] + 16); + UpperBitTable[(val >> 16) & 0xFF] + 16; } - return static_cast<UByte>((val & 0xFF00) ? + return (val & 0xFF00) ? UpperBitTable[(val >> 8) & 0xFF] + 8: - UpperBitTable[(val ) & 0xFF]); + UpperBitTable[(val ) & 0xFF]; #else @@ -696,13 +718,13 @@ inline UByte LowerBit(UPInt val) if (val & 0xFFFF) { - return static_cast<UByte>( (val & 0xFF) ? + return (val & 0xFF) ? LowerBitTable[ val & 0xFF]: - LowerBitTable[(val >> 8) & 0xFF] + 8 ); + LowerBitTable[(val >> 8) & 0xFF] + 8; } - return static_cast<UByte>( (val & 0xFF0000) ? + return (val & 0xFF0000) ? LowerBitTable[(val >> 16) & 0xFF] + 16: - LowerBitTable[(val >> 24) & 0xFF] + 24 ); + LowerBitTable[(val >> 24) & 0xFF] + 24; #else @@ -813,7 +835,7 @@ namespace ByteUtil { // Swap the byte order of primitive types inline UByte SwapOrder(UByte v) { return v; } inline SByte SwapOrder(SByte v) { return v; } - inline UInt16 SwapOrder(UInt16 v) { return static_cast<UInt16>( UInt16(v>>8)|UInt16(v<<8) ); } + inline UInt16 SwapOrder(UInt16 v) { return UInt16(v>>8)|UInt16(v<<8); } inline SInt16 SwapOrder(SInt16 v) { return SInt16((UInt16(v)>>8)|(v<<8)); } inline UInt32 SwapOrder(UInt32 v) { return (v>>24)|((v&0x00FF0000)>>8)|((v&0x0000FF00)<<8)|(v<<24); } inline SInt32 SwapOrder(SInt32 p) { return (SInt32)SwapOrder(UInt32(p)); } @@ -959,6 +981,80 @@ namespace ByteUtil { +// Used primarily for hardware interfacing such as sensor reports, firmware, etc. +// Reported data is all little-endian. +inline UInt16 DecodeUInt16(const UByte* buffer) +{ + return ByteUtil::LEToSystem ( *(const UInt16*)buffer ); +} + +inline SInt16 DecodeSInt16(const UByte* buffer) +{ + return ByteUtil::LEToSystem ( *(const SInt16*)buffer ); +} + +inline UInt32 DecodeUInt32(const UByte* buffer) +{ + return ByteUtil::LEToSystem ( *(const UInt32*)buffer ); +} + +inline SInt32 DecodeSInt32(const UByte* buffer) +{ + return ByteUtil::LEToSystem ( *(const SInt32*)buffer ); +} + +inline float DecodeFloat(const UByte* buffer) +{ + union { + UInt32 U; + float F; + }; + + U = DecodeUInt32(buffer); + return F; +} + +inline void EncodeUInt16(UByte* buffer, UInt16 val) +{ + *(UInt16*)buffer = ByteUtil::SystemToLE ( val ); +} + +inline void EncodeSInt16(UByte* buffer, SInt16 val) +{ + *(SInt16*)buffer = ByteUtil::SystemToLE ( val ); +} + +inline void EncodeUInt32(UByte* buffer, UInt32 val) +{ + *(UInt32*)buffer = ByteUtil::SystemToLE ( val ); +} + +inline void EncodeSInt32(UByte* buffer, SInt32 val) +{ + *(SInt32*)buffer = ByteUtil::SystemToLE ( val ); +} + +inline void EncodeFloat(UByte* buffer, float val) +{ + union { + UInt32 U; + float F; + }; + + F = val; + EncodeUInt32(buffer, U); +} + +// Converts an 8-bit binary-coded decimal +inline SByte DecodeBCD(UByte byte) +{ + UByte digit1 = (byte >> 4) & 0x0f; + UByte digit2 = byte & 0x0f; + int decimal = digit1 * 10 + digit2; // maximum value = 99 + return (SByte)decimal; +} + + }} // OVR::Alg #endif diff --git a/LibOVR/Src/Kernel/OVR_Allocator.cpp b/LibOVR/Src/Kernel/OVR_Allocator.cpp index 51eebba..0f82561 100644 --- a/LibOVR/Src/Kernel/OVR_Allocator.cpp +++ b/LibOVR/Src/Kernel/OVR_Allocator.cpp @@ -5,16 +5,16 @@ Content : Installable memory allocator implementation Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Allocator.h b/LibOVR/Src/Kernel/OVR_Allocator.h index b2e0472..b862557 100644 --- a/LibOVR/Src/Kernel/OVR_Allocator.h +++ b/LibOVR/Src/Kernel/OVR_Allocator.h @@ -6,16 +6,16 @@ Content : Installable memory allocator Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -104,7 +104,7 @@ namespace OVR { template <class T> OVR_FORCE_INLINE T* Construct(void *p) { - return ::new(p) T; + return ::new(p) T(); } template <class T> diff --git a/LibOVR/Src/Kernel/OVR_Array.h b/LibOVR/Src/Kernel/OVR_Array.h index 552ddcc..7a715ba 100644 --- a/LibOVR/Src/Kernel/OVR_Array.h +++ b/LibOVR/Src/Kernel/OVR_Array.h @@ -6,16 +6,16 @@ Content : Template implementation for Array Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -222,7 +222,7 @@ struct ArrayData : ArrayDataBase<T, Allocator, SizePolicy> ArrayData() : BaseType() { } - ArrayData(int size) + ArrayData(UPInt size) : BaseType() { Resize(size); } ArrayData(const SelfType& a) @@ -281,7 +281,7 @@ struct ArrayDataCC : ArrayDataBase<T, Allocator, SizePolicy> ArrayDataCC(const ValueType& defval) : BaseType(), DefaultValue(defval) { } - ArrayDataCC(const ValueType& defval, int size) + ArrayDataCC(const ValueType& defval, UPInt size) : BaseType(), DefaultValue(defval) { Resize(size); } ArrayDataCC(const SelfType& a) @@ -359,14 +359,14 @@ public: ArrayBase() : Data() {} - ArrayBase(int size) + ArrayBase(UPInt size) : Data(size) {} ArrayBase(const SelfType& a) : Data(a.Data) {} ArrayBase(const ValueType& defval) : Data(defval) {} - ArrayBase(const ValueType& defval, int size) + ArrayBase(const ValueType& defval, UPInt size) : Data(defval, size) {} SizePolicyType* GetSizePolicy() const { return Data.Policy; } @@ -499,6 +499,9 @@ public: // Removing an element from the array is an expensive operation! // It compacts only after removing the last element. + // If order of elements in the array is not important then use + // RemoveAtUnordered, that could be much faster than the regular + // RemoveAt. void RemoveAt(UPInt index) { OVR_ASSERT(index < Data.Size); @@ -517,6 +520,32 @@ public: } } + // Removes an element from the array without respecting of original order of + // elements for better performance. Do not use on array where order of elements + // is important, otherwise use it instead of regular RemoveAt(). + void RemoveAtUnordered(UPInt index) + { + OVR_ASSERT(index < Data.Size); + if (Data.Size == 1) + { + Clear(); + } + else + { + // copy the last element into the 'index' position + // and decrement the size (instead of moving all elements + // in [index + 1 .. size - 1] range). + const UPInt lastElemIndex = Data.Size - 1; + if (index < lastElemIndex) + { + AllocatorType::Destruct(Data.Data + index); + AllocatorType::Construct(Data.Data + index, Data.Data[lastElemIndex]); + } + AllocatorType::Destruct(Data.Data + lastElemIndex); + --Data.Size; + } + } + // Insert the given object at the given index shifting all the elements up. void InsertAt(UPInt index, const ValueType& val = ValueType()) { @@ -725,7 +754,7 @@ public: typedef ArrayBase<ArrayData<T, ContainerAllocator<T>, SizePolicy> > BaseType; Array() : BaseType() {} - Array(int size) : BaseType(size) {} + Array(UPInt size) : BaseType(size) {} Array(const SizePolicyType& p) : BaseType() { SetSizePolicy(p); } Array(const SelfType& a) : BaseType(a) {} const SelfType& operator=(const SelfType& a) { BaseType::operator=(a); return *this; } @@ -747,7 +776,7 @@ public: typedef ArrayBase<ArrayData<T, ContainerAllocator_POD<T>, SizePolicy> > BaseType; ArrayPOD() : BaseType() {} - ArrayPOD(int size) : BaseType(size) {} + ArrayPOD(UPInt size) : BaseType(size) {} ArrayPOD(const SizePolicyType& p) : BaseType() { SetSizePolicy(p); } ArrayPOD(const SelfType& a) : BaseType(a) {} const SelfType& operator=(const SelfType& a) { BaseType::operator=(a); return *this; } @@ -769,7 +798,7 @@ public: typedef ArrayBase<ArrayData<T, ContainerAllocator_CPP<T>, SizePolicy> > BaseType; ArrayCPP() : BaseType() {} - ArrayCPP(int size) : BaseType(size) {} + ArrayCPP(UPInt size) : BaseType(size) {} ArrayCPP(const SizePolicyType& p) : BaseType() { SetSizePolicy(p); } ArrayCPP(const SelfType& a) : BaseType(a) {} const SelfType& operator=(const SelfType& a) { BaseType::operator=(a); return *this; } @@ -793,7 +822,7 @@ public: typedef ArrayBase<ArrayDataCC<T, ContainerAllocator<T>, SizePolicy> > BaseType; ArrayCC(const ValueType& defval) : BaseType(defval) {} - ArrayCC(const ValueType& defval, int size) : BaseType(defval, size) {} + ArrayCC(const ValueType& defval, UPInt size) : BaseType(defval, size) {} ArrayCC(const ValueType& defval, const SizePolicyType& p) : BaseType(defval) { SetSizePolicy(p); } ArrayCC(const SelfType& a) : BaseType(a) {} const SelfType& operator=(const SelfType& a) { BaseType::operator=(a); return *this; } diff --git a/LibOVR/Src/Kernel/OVR_Atomic.cpp b/LibOVR/Src/Kernel/OVR_Atomic.cpp index 1c0efc2..9ea6e76 100644 --- a/LibOVR/Src/Kernel/OVR_Atomic.cpp +++ b/LibOVR/Src/Kernel/OVR_Atomic.cpp @@ -7,16 +7,16 @@ Content : Contains atomic operations and inline fastest locking Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -27,6 +27,7 @@ limitations under the License. ************************************************************************************/ #include "OVR_Atomic.h" +#include "OVR_Allocator.h" #ifdef OVR_ENABLE_THREADS @@ -88,6 +89,74 @@ Lock::~Lock() #endif + +//------------------------------------------------------------------------------------- +// ***** SharedLock + +// This is a general purpose globally shared Lock implementation that should probably be +// moved to Kernel. +// May in theory busy spin-wait if we hit contention on first lock creation, +// but this shouldn't matter in practice since Lock* should be cached. + + +enum { LockInitMarker = 0xFFFFFFFF }; + +Lock* SharedLock::GetLockAddRef() +{ + int oldUseCount; + + do { + oldUseCount = UseCount; + if (oldUseCount == LockInitMarker) + continue; + + if (oldUseCount == 0) + { + // Initialize marker + if (AtomicOps<int>::CompareAndSet_Sync(&UseCount, 0, LockInitMarker)) + { + Construct<Lock>(Buffer); + do { } + while (!AtomicOps<int>::CompareAndSet_Sync(&UseCount, LockInitMarker, 1)); + return toLock(); + } + continue; + } + + } while (!AtomicOps<int>::CompareAndSet_NoSync(&UseCount, oldUseCount, oldUseCount + 1)); + + return toLock(); +} + +void SharedLock::ReleaseLock(Lock* plock) +{ + OVR_UNUSED(plock); + OVR_ASSERT(plock == toLock()); + + int oldUseCount; + + do { + oldUseCount = UseCount; + OVR_ASSERT(oldUseCount != LockInitMarker); + + if (oldUseCount == 1) + { + // Initialize marker + if (AtomicOps<int>::CompareAndSet_Sync(&UseCount, 1, LockInitMarker)) + { + Destruct<Lock>(toLock()); + + do { } + while (!AtomicOps<int>::CompareAndSet_Sync(&UseCount, LockInitMarker, 0)); + + return; + } + continue; + } + + } while (!AtomicOps<int>::CompareAndSet_NoSync(&UseCount, oldUseCount, oldUseCount - 1)); +} + } // OVR #endif // OVR_ENABLE_THREADS diff --git a/LibOVR/Src/Kernel/OVR_Atomic.h b/LibOVR/Src/Kernel/OVR_Atomic.h index b539ccd..b826251 100644 --- a/LibOVR/Src/Kernel/OVR_Atomic.h +++ b/LibOVR/Src/Kernel/OVR_Atomic.h @@ -8,16 +8,16 @@ Content : Contains atomic operations and inline fastest locking Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -865,6 +865,25 @@ public: }; +//------------------------------------------------------------------------------------- +// Globally shared Lock implementation used for MessageHandlers, etc. + +class SharedLock +{ +public: + SharedLock() : UseCount(0) {} + + Lock* GetLockAddRef(); + void ReleaseLock(Lock* plock); + +private: + Lock* toLock() { return (Lock*)Buffer; } + + // UseCount and max alignment. + volatile int UseCount; + UInt64 Buffer[(sizeof(Lock)+sizeof(UInt64)-1)/sizeof(UInt64)]; +}; + } // OVR diff --git a/LibOVR/Src/Kernel/OVR_Color.h b/LibOVR/Src/Kernel/OVR_Color.h index 0881997..cf536da 100644 --- a/LibOVR/Src/Kernel/OVR_Color.h +++ b/LibOVR/Src/Kernel/OVR_Color.h @@ -6,16 +6,16 @@ Content : Contains color struct. Created : February 7, 2013 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_ContainerAllocator.h b/LibOVR/Src/Kernel/OVR_ContainerAllocator.h index 36c22d1..afc0e6a 100644 --- a/LibOVR/Src/Kernel/OVR_ContainerAllocator.h +++ b/LibOVR/Src/Kernel/OVR_ContainerAllocator.h @@ -6,16 +6,16 @@ Content : Template allocators and constructors for containers. Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Deque.h b/LibOVR/Src/Kernel/OVR_Deque.h new file mode 100644 index 0000000..747810e --- /dev/null +++ b/LibOVR/Src/Kernel/OVR_Deque.h @@ -0,0 +1,310 @@ +/************************************************************************************ + +Filename : OVR_Deque.h +Content : Deque container +Created : Nov. 15, 2013 +Authors : Dov Katz + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Deque_h +#define OVR_Deque_h + +namespace OVR{ + +template <class Elem> +class Deque +{ +public: + enum + { + DefaultCapacity = 500 + }; + + Deque(int capacity = DefaultCapacity); + Deque(const Deque<Elem> &OtherDeque); + virtual ~Deque(void); + + virtual void PushBack (const Elem &Item); // Adds Item to the end + virtual void PushFront (const Elem &Item); // Adds Item to the beginning + virtual Elem PopBack (void); // Removes Item from the end + virtual Elem PopFront (void); // Removes Item from the beginning + virtual const Elem& PeekBack (int count = 0) const; // Returns count-th Item from the end + virtual const Elem& PeekFront (int count = 0) const; // Returns count-th Item from the beginning + + virtual inline UPInt GetSize (void) const; // Returns Number of Elements + virtual inline UPInt GetCapacity(void) const; // Returns the maximum possible number of elements + virtual void Clear (void); // Remove all elements + virtual inline bool IsEmpty () const; + virtual inline bool IsFull () const; + +protected: + Elem *Data; // The actual Data array + const int Capacity; // Deque capacity + int Beginning; // Index of the first element + int End; // Index of the next after last element + + // Instead of calculating the number of elements, using this variable + // is much more convenient. + int ElemCount; + +private: + Deque& operator= (const Deque& q) { }; // forbidden +}; + +template <class Elem> +class InPlaceMutableDeque : public Deque<Elem> +{ +public: + InPlaceMutableDeque( int capacity = Deque<Elem>::DefaultCapacity ) : Deque<Elem>( capacity ) {} + virtual ~InPlaceMutableDeque() {}; + + virtual Elem& PeekBack (int count = 0); // Returns count-th Item from the end + virtual Elem& PeekFront (int count = 0); // Returns count-th Item from the beginning +private: + InPlaceMutableDeque& operator=(const InPlaceMutableDeque& q) {}; +}; + +// Same as Deque, but allows to write more elements than maximum capacity +// Old elements are lost as they are overwritten with the new ones +template <class Elem> +class CircularBuffer : public Deque<Elem> +{ +public: + CircularBuffer(int MaxSize = Deque<Elem>::DefaultCapacity) : Deque<Elem>(MaxSize) { }; + + // The following methods are inline as a workaround for a VS bug causing erroneous C4505 warnings + // See: http://stackoverflow.com/questions/3051992/compiler-warning-at-c-template-base-class + inline virtual void PushBack (const Elem &Item); // Adds Item to the end, overwriting the oldest element at the beginning if necessary + inline virtual void PushFront (const Elem &Item); // Adds Item to the beginning, overwriting the oldest element at the end if necessary +}; + +//---------------------------------------------------------------------------------- + +// Deque Constructor function +template <class Elem> +Deque<Elem>::Deque(int capacity) : +Capacity( capacity ), Beginning(0), End(0), ElemCount(0) +{ + Data = (Elem*) OVR_ALLOC(Capacity * sizeof(Elem)); + ConstructArray<Elem>(Data, Capacity); +} + +// Deque Copy Constructor function +template <class Elem> +Deque<Elem>::Deque(const Deque &OtherDeque) : +Capacity( OtherDeque.Capacity ) // Initialize the constant +{ + Beginning = OtherDeque.Beginning; + End = OtherDeque.End; + ElemCount = OtherDeque.ElemCount; + + Data = (Elem*) OVR_ALLOC(Capacity * sizeof(Elem)); + for (int i = 0; i < Capacity; i++) + Data[i] = OtherDeque.Data[i]; +} + +// Deque Destructor function +template <class Elem> +Deque<Elem>::~Deque(void) +{ + DestructArray<Elem>(Data, Capacity); + OVR_FREE(Data); +} + +template <class Elem> +void Deque<Elem>::Clear() +{ + Beginning = 0; + End = 0; + ElemCount = 0; + + DestructArray<Elem>(Data, Capacity); + ConstructArray<Elem>(Data, Capacity); +} + +// Push functions +template <class Elem> +void Deque<Elem>::PushBack(const Elem &Item) +{ + // Error Check: Make sure we aren't + // exceeding our maximum storage space + OVR_ASSERT( ElemCount < Capacity ); + + Data[ End++ ] = Item; + ++ElemCount; + + // Check for wrap-around + if (End >= Capacity) + End -= Capacity; +} + +template <class Elem> +void Deque<Elem>::PushFront(const Elem &Item) +{ + // Error Check: Make sure we aren't + // exceeding our maximum storage space + OVR_ASSERT( ElemCount < Capacity ); + + Beginning--; + // Check for wrap-around + if (Beginning < 0) + Beginning += Capacity; + + Data[ Beginning ] = Item; + ++ElemCount; +} + +// Pop functions +template <class Elem> +Elem Deque<Elem>::PopFront(void) +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( ElemCount > 0 ); + + Elem ReturnValue = Data[ Beginning ]; + Destruct<Elem>(&Data[ Beginning ]); + Construct<Elem>(&Data[ Beginning ]); + + ++Beginning; + --ElemCount; + + // Check for wrap-around + if (Beginning >= Capacity) + Beginning -= Capacity; + + return ReturnValue; +} + +template <class Elem> +Elem Deque<Elem>::PopBack(void) +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( ElemCount > 0 ); + + End--; + --ElemCount; + + // Check for wrap-around + if (End < 0) + End += Capacity; + + Elem ReturnValue = Data[ End ]; + Destruct<Elem>(&Data[ End ]); + Construct<Elem>(&Data[ End ]); + + return ReturnValue; +} + +// Peek functions +template <class Elem> +const Elem& Deque<Elem>::PeekFront(int count) const +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( ElemCount > count ); + + int idx = Beginning + count; + if (idx >= Capacity) + idx -= Capacity; + return Data[ idx ]; +} + +template <class Elem> +const Elem& Deque<Elem>::PeekBack(int count) const +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( ElemCount > count ); + + int idx = End - count - 1; + if (idx < 0) + idx += Capacity; + return Data[ idx ]; +} + +// Mutable Peek functions +template <class Elem> +Elem& InPlaceMutableDeque<Elem>::PeekFront(int count) +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( Deque<Elem>::ElemCount > count ); + + int idx = Deque<Elem>::Beginning + count; + if (idx >= Deque<Elem>::Capacity) + idx -= Deque<Elem>::Capacity; + return Deque<Elem>::Data[ idx ]; +} + +template <class Elem> +Elem& InPlaceMutableDeque<Elem>::PeekBack(int count) +{ + // Error Check: Make sure we aren't reading from an empty Deque + OVR_ASSERT( Deque<Elem>::ElemCount > count ); + + int idx = Deque<Elem>::End - count - 1; + if (idx < 0) + idx += Deque<Elem>::Capacity; + return Deque<Elem>::Data[ idx ]; +} + +template <class Elem> +inline UPInt Deque<Elem>::GetCapacity(void) const +{ + return Deque<Elem>::Capacity; +} + +template <class Elem> +inline UPInt Deque<Elem>::GetSize(void) const +{ + return Deque<Elem>::ElemCount; +} + +template <class Elem> +inline bool Deque<Elem>::IsEmpty(void) const +{ + return Deque<Elem>::ElemCount==0; +} + +template <class Elem> +inline bool Deque<Elem>::IsFull(void) const +{ + return Deque<Elem>::ElemCount==Deque<Elem>::Capacity; +} + +// ******* CircularBuffer<Elem> ******* +// Push functions +template <class Elem> +void CircularBuffer<Elem>::PushBack(const Elem &Item) +{ + if (this->IsFull()) + this->PopFront(); + Deque<Elem>::PushBack(Item); +} + +template <class Elem> +void CircularBuffer<Elem>::PushFront(const Elem &Item) +{ + if (this->IsFull()) + this->PopBack(); + Deque<Elem>::PushFront(Item); +} + +}; + +#endif diff --git a/LibOVR/Src/Kernel/OVR_File.cpp b/LibOVR/Src/Kernel/OVR_File.cpp index 3f52488..31ab516 100644 --- a/LibOVR/Src/Kernel/OVR_File.cpp +++ b/LibOVR/Src/Kernel/OVR_File.cpp @@ -6,16 +6,16 @@ Content : File wrapper class implementation (Win32) Created : April 5, 1999 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_File.h b/LibOVR/Src/Kernel/OVR_File.h index 92c53ab..a8dc615 100644 --- a/LibOVR/Src/Kernel/OVR_File.h +++ b/LibOVR/Src/Kernel/OVR_File.h @@ -11,16 +11,16 @@ Notes : errno may not be preserved across use of BaseFile member functio : Directories cannot be deleted while files opened from them are in use (For the GetFullName function) -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_FileFILE.cpp b/LibOVR/Src/Kernel/OVR_FileFILE.cpp index fd01118..4fe4cbe 100644 --- a/LibOVR/Src/Kernel/OVR_FileFILE.cpp +++ b/LibOVR/Src/Kernel/OVR_FileFILE.cpp @@ -6,16 +6,16 @@ Content : File wrapper class implementation (Win32) Created : April 5, 1999 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -561,9 +561,10 @@ bool FILEFile::CloseCancel() } */ -File *FileFILEOpen(const String& path, int flags, int mode) +Ptr<File> FileFILEOpen(const String& path, int flags, int mode) { - return new FILEFile(path, flags, mode); + Ptr<File> result = *new FILEFile(path, flags, mode); + return result; } // Helper function: obtain file information time. diff --git a/LibOVR/Src/Kernel/OVR_Hash.h b/LibOVR/Src/Kernel/OVR_Hash.h index 98c206b..04c4db8 100644 --- a/LibOVR/Src/Kernel/OVR_Hash.h +++ b/LibOVR/Src/Kernel/OVR_Hash.h @@ -6,16 +6,16 @@ Content : Template hash-table/set implementation Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_KeyCodes.h b/LibOVR/Src/Kernel/OVR_KeyCodes.h index 42d98ee..b5c5930 100644 --- a/LibOVR/Src/Kernel/OVR_KeyCodes.h +++ b/LibOVR/Src/Kernel/OVR_KeyCodes.h @@ -5,16 +5,16 @@ Filename : OVR_KeyCodes.h Content : Common keyboard constants Created : September 19, 2012 -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -147,7 +147,7 @@ enum KeyCode Key_Semicolon = 59, Key_Equal = 61, - Key_Bar = 192, + Key_Backtick = 96, // ` and tilda~ when shifted (US keyboard) Key_BracketLeft = 91, Key_Backslash, Key_BracketRight, diff --git a/LibOVR/Src/Kernel/OVR_List.h b/LibOVR/Src/Kernel/OVR_List.h index d5e79a3..6b49f37 100644 --- a/LibOVR/Src/Kernel/OVR_List.h +++ b/LibOVR/Src/Kernel/OVR_List.h @@ -6,16 +6,16 @@ Content : Template implementation for doubly-connected linked List Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Lockless.cpp b/LibOVR/Src/Kernel/OVR_Lockless.cpp new file mode 100644 index 0000000..0f25805 --- /dev/null +++ b/LibOVR/Src/Kernel/OVR_Lockless.cpp @@ -0,0 +1,231 @@ +/************************************************************************************ + +PublicHeader: OVR.h +Filename : OVR_Lockless.cpp +Content : Test logic for lock-less classes +Created : December 27, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "OVR_Lockless.h" + +#ifdef OVR_LOCKLESS_TEST + +#include "OVR_Threads.h" +#include "OVR_Timer.h" +#include "OVR_Log.h" + + +namespace OVR { namespace LocklessTest { + + +const int TestIterations = 10000000; + +// Use volatile dummys to force compiler to do spinning. +volatile int Dummy1; +int Unused1[32]; +volatile int Dummy2; +int Unused2[32]; +volatile int Dummy3; +int Unused3[32]; + + +// Data block out of 20 consecutive integers, should be internally consistent. +struct TestData +{ + enum { ItemCount = 20 }; + + int Data[ItemCount]; + + + void Set(int val) + { + for (int i=0; i<ItemCount; i++) + { + Data[i] = val*100 + i; + } + } + + int ReadAndCheckConsistency(int prevValue) const + { + int val = Data[0]; + + for (int i=1; i<ItemCount; i++) + { + + if (Data[i] != (val + i)) + { + // Only complain once per same-value entry + if (prevValue != val / 100) + { + LogText("LocklessTest Fail - corruption at %d inside block %d\n", + i, val/100); + // OVR_ASSERT(Data[i] == val + i); + } + break; + } + } + + return val / 100; + } +}; + + + +volatile bool FirstItemWritten = false; +LocklessUpdater<TestData> TestDataUpdater; + +// Use this lock to verify that testing algorithm is otherwise correct... +Lock TestLock; + + +//------------------------------------------------------------------------------------- + +// Consumer thread reads values from TestDataUpdater and +// ensures that each one is internally consistent. + +class Consumer : public Thread +{ + virtual int Run() + { + LogText("LocklessTest::Consumer::Run started.\n"); + + + while (!FirstItemWritten) + { + // spin until producer wrote first value... + } + + TestData d; + int oldValue = 0; + int newValue; + + do + { + { + //Lock::Locker scope(&TestLock); + d = TestDataUpdater.GetState(); + } + + newValue = d.ReadAndCheckConsistency(oldValue); + + // Values should increase or stay the same! + if (newValue < oldValue) + { + LogText("LocklessTest Fail - %d after %d; delta = %d\n", + newValue, oldValue, newValue - oldValue); + // OVR_ASSERT(0); + } + + + if (oldValue != newValue) + { + oldValue = newValue; + + if (oldValue % (TestIterations/30) == 0) + { + LogText("LocklessTest::Consumer - %5.2f%% done\n", + 100.0f * (float)oldValue/(float)TestIterations); + } + } + + // Spin a while + for (int j = 0; j< 300; j++) + { + Dummy3 = j; + } + + + } while (oldValue < (TestIterations * 99 / 100)); + + LogText("LocklessTest::Consumer::Run exiting.\n"); + return 0; + } + +}; + + +//------------------------------------------------------------------------------------- + +class Producer : public Thread +{ + + virtual int Run() + { + LogText("LocklessTest::Producer::Run started.\n"); + + for (int testVal = 0; testVal < TestIterations; testVal++) + { + TestData d; + d.Set(testVal); + + { + //Lock::Locker scope(&TestLock); + TestDataUpdater.SetState(d); + } + + FirstItemWritten = true; + + // Spin a bit + for(int j = 0; j < 1000; j++) + { + Dummy2 = j; + } + + if (testVal % (TestIterations/30) == 0) + { + LogText("LocklessTest::Producer - %5.2f%% done\n", + 100.0f * (float)testVal/(float)TestIterations); + } + } + + LogText("LocklessTest::Producer::Run exiting.\n"); + return 0; + } +}; + + +} // namespace LocklessTest + + + +void StartLocklessTest() +{ + // These threads will release themselves once done + Ptr<LocklessTest::Producer> producerThread = *new LocklessTest::Producer; + Ptr<LocklessTest::Consumer> consumerThread = *new LocklessTest::Consumer; + + producerThread->Start(); + consumerThread->Start(); + + /* + while (!producerThread->IsFinished() && consumerThread->IsFinished()) + { + Thread::MSleep(500); + } */ + + // TBD: Cleanup +} + + +} // namespace OVR + +#endif // OVR_LOCKLESS_TEST diff --git a/LibOVR/Src/Kernel/OVR_Lockless.h b/LibOVR/Src/Kernel/OVR_Lockless.h new file mode 100644 index 0000000..a12f824 --- /dev/null +++ b/LibOVR/Src/Kernel/OVR_Lockless.h @@ -0,0 +1,107 @@ +/************************************************************************************ + +PublicHeader: OVR.h +Filename : OVR_Lockless.h +Content : Lock-less classes for producer/consumer communication +Created : November 9, 2013 +Authors : John Carmack + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Lockless_h +#define OVR_Lockless_h + +#include "OVR_Atomic.h" + +// Define this to compile-in Lockless test logic +//#define OVR_LOCKLESS_TEST + +namespace OVR { + + +// ***** LocklessUpdater + +// For single producer cases where you only care about the most recent update, not +// necessarily getting every one that happens (vsync timing, SensorFusion updates). +// +// This is multiple consumer safe, but is currently only used with a single consumer. + +template<class T> +class LocklessUpdater +{ +public: + LocklessUpdater() : UpdateBegin( 0 ), UpdateEnd( 0 ) {} + + T GetState() const + { + // Copy the state out, then retry with the alternate slot + // if we determine that our copy may have been partially + // stepped on by a new update. + T state; + int begin, end, final; + + for(;;) + { + // We are adding 0, only using these as atomic memory barriers, so it + // is ok to cast off the const, allowing GetState() to remain const. + end = UpdateEnd.ExchangeAdd_Sync(0); + state = Slots[ end & 1 ]; + begin = UpdateBegin.ExchangeAdd_Sync(0); + if ( begin == end ) { + break; + } + + // The producer is potentially blocked while only having partially + // written the update, so copy out the other slot. + state = Slots[ (begin & 1) ^ 1 ]; + final = UpdateBegin.ExchangeAdd_NoSync(0); + if ( final == begin ) { + break; + } + + // The producer completed the last update and started a new one before + // we got it copied out, so try fetching the current buffer again. + } + return state; + } + + void SetState( T state ) + { + const int slot = UpdateBegin.ExchangeAdd_Sync(1) & 1; + // Write to (slot ^ 1) because ExchangeAdd returns 'previous' value before add. + Slots[slot ^ 1] = state; + UpdateEnd.ExchangeAdd_Sync(1); + } + + mutable AtomicInt<int> UpdateBegin; + mutable AtomicInt<int> UpdateEnd; + T Slots[2]; +}; + + +#ifdef OVR_LOCKLESS_TEST +void StartLocklessTest(); +#endif + + +} // namespace OVR + +#endif // OVR_Lockless_h + diff --git a/LibOVR/Src/Kernel/OVR_Log.cpp b/LibOVR/Src/Kernel/OVR_Log.cpp index 76b09ad..d5f8a68 100644 --- a/LibOVR/Src/Kernel/OVR_Log.cpp +++ b/LibOVR/Src/Kernel/OVR_Log.cpp @@ -5,16 +5,16 @@ Content : Logging support Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Log.h b/LibOVR/Src/Kernel/OVR_Log.h index 9c6feb6..4d9acc1 100644 --- a/LibOVR/Src/Kernel/OVR_Log.h +++ b/LibOVR/Src/Kernel/OVR_Log.h @@ -6,16 +6,16 @@ Content : Logging support Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -108,7 +108,7 @@ public: virtual ~Log(); // Log formating buffer size used by default LogMessageVarg. Longer strings are truncated. - enum { MaxLogBufferMessageSize = 2048 }; + enum { MaxLogBufferMessageSize = 4096 }; unsigned GetLoggingMask() const { return LoggingMask; } void SetLoggingMask(unsigned logMask) { LoggingMask = logMask; } diff --git a/LibOVR/Src/Kernel/OVR_Math.cpp b/LibOVR/Src/Kernel/OVR_Math.cpp index e5a1f0e..396d3ff 100644 --- a/LibOVR/Src/Kernel/OVR_Math.cpp +++ b/LibOVR/Src/Kernel/OVR_Math.cpp @@ -5,16 +5,16 @@ Content : Implementation of 3D primitives such as vectors, matrices. Created : September 4, 2012 Authors : Andrew Reisse, Michael Antonov, Anna Yershova -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -72,83 +72,20 @@ const double Math<double>::SingularityRadius = 0.000000000001; // Use for Gimbal //------------------------------------------------------------------------------------- -// ***** Matrix4f - - -Matrix4f Matrix4f::LookAtRH(const Vector3f& eye, const Vector3f& at, const Vector3f& up) -{ - Vector3f z = (eye - at).Normalized(); // Forward - Vector3f x = up.Cross(z).Normalized(); // Right - Vector3f y = z.Cross(x); - - Matrix4f m(x.x, x.y, x.z, -(x.Dot(eye)), - y.x, y.y, y.z, -(y.Dot(eye)), - z.x, z.y, z.z, -(z.Dot(eye)), - 0, 0, 0, 1 ); - return m; -} - -Matrix4f Matrix4f::LookAtLH(const Vector3f& eye, const Vector3f& at, const Vector3f& up) -{ - Vector3f z = (at - eye).Normalized(); // Forward - Vector3f x = up.Cross(z).Normalized(); // Right - Vector3f y = z.Cross(x); - - Matrix4f m(x.x, x.y, x.z, -(x.Dot(eye)), - y.x, y.y, y.z, -(y.Dot(eye)), - z.x, z.y, z.z, -(z.Dot(eye)), - 0, 0, 0, 1 ); - return m; -} - - -Matrix4f Matrix4f::PerspectiveLH(float yfov, float aspect, float znear, float zfar) -{ - Matrix4f m; - float tanHalfFov = tan(yfov * 0.5f); - - m.M[0][0] = 1.0f / (aspect * tanHalfFov); - m.M[1][1] = 1.0f / tanHalfFov; - m.M[2][2] = zfar / (zfar - znear); - m.M[3][2] = 1.0f; - m.M[2][3] = (zfar * znear) / (znear - zfar); - m.M[3][3] = 0.0f; - - // Note: Post-projection matrix result assumes Left-Handed coordinate system, - // with Y up, X right and Z forward. This supports positive z-buffer values. - return m; -} - - -Matrix4f Matrix4f::PerspectiveRH(float yfov, float aspect, float znear, float zfar) -{ - Matrix4f m; - float tanHalfFov = tan(yfov * 0.5f); - - m.M[0][0] = 1.0f / (aspect * tanHalfFov); - m.M[1][1] = 1.0f / tanHalfFov; - m.M[2][2] = zfar / (znear - zfar); - // m.M[2][2] = zfar / (zfar - znear); - m.M[3][2] = -1.0f; - m.M[2][3] = (zfar * znear) / (znear - zfar); - m.M[3][3] = 0.0f; - - // Note: Post-projection matrix result assumes Left-Handed coordinate system, - // with Y up, X right and Z forward. This supports positive z-buffer values. - // This is the case even for RHS cooridnate input. - return m; -} - -Matrix4f Matrix4f::Ortho2D(float w, float h) -{ - Matrix4f m; - m.M[0][0] = 2.0f/w; - m.M[1][1] = -2.0f/h; - m.M[0][3] = -1.0; - m.M[1][3] = 1.0; - m.M[2][2] = 0; - return m; -} +// ***** Matrix4 + +template<> +const Matrix4<float> Matrix4<float>::IdentityValue = Matrix4<float>(1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f); + +template<> +const Matrix4<double> Matrix4<double>::IdentityValue = Matrix4<double>(1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0); + } // Namespace OVR diff --git a/LibOVR/Src/Kernel/OVR_Math.h b/LibOVR/Src/Kernel/OVR_Math.h index cdcce81..9bd5bad 100644 --- a/LibOVR/Src/Kernel/OVR_Math.h +++ b/LibOVR/Src/Kernel/OVR_Math.h @@ -4,18 +4,19 @@ PublicHeader: OVR.h Filename : OVR_Math.h Content : Implementation of 3D primitives such as vectors, matrices. Created : September 4, 2012 -Authors : Andrew Reisse, Michael Antonov, Steve LaValle, Anna Yershova, Max Katsev +Authors : Andrew Reisse, Michael Antonov, Steve LaValle, + Anna Yershova, Max Katsev, Dov Katz -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -35,6 +36,8 @@ limitations under the License. #include "OVR_Types.h" #include "OVR_RefCount.h" #include "OVR_Std.h" +#include "OVR_Alg.h" + namespace OVR { @@ -86,15 +89,85 @@ struct WorldAxes { OVR_ASSERT(abs(x) != abs(y) && abs(y) != abs(z) && abs(z) != abs(x));} }; +} // namespace OVR + + +//------------------------------------------------------------------------------------// +// ***** C Compatibility Types + +// These declarations are used to support conversion between C types used in +// LibOVR C interfaces and their C++ versions. As an example, they allow passing +// Vector3f into a function that expects ovrVector3f. + +typedef struct ovrQuatf_ ovrQuatf; +typedef struct ovrQuatd_ ovrQuatd; +typedef struct ovrSizei_ ovrSizei; +typedef struct ovrSizef_ ovrSizef; +typedef struct ovrRecti_ ovrRecti; +typedef struct ovrVector2i_ ovrVector2i; +typedef struct ovrVector2f_ ovrVector2f; +typedef struct ovrVector3f_ ovrVector3f; +typedef struct ovrVector3d_ ovrVector3d; +typedef struct ovrMatrix3d_ ovrMatrix3d; +typedef struct ovrMatrix4f_ ovrMatrix4f; +typedef struct ovrPosef_ ovrPosef; +typedef struct ovrPosed_ ovrPosed; +typedef struct ovrPoseStatef_ ovrPoseStatef; +typedef struct ovrPoseStated_ ovrPoseStated; + +namespace OVR { + +// Forward-declare our templates. +template<class T> class Quat; +template<class T> class Size; +template<class T> class Rect; +template<class T> class Vector2; +template<class T> class Vector3; +template<class T> class Matrix3; +template<class T> class Matrix4; +template<class T> class Pose; +template<class T> class PoseState; + +// CompatibleTypes::Type is used to lookup a compatible C-version of a C++ class. +template<class C> +struct CompatibleTypes +{ + // Declaration here seems necessary for MSVC; specializations are + // used instead. + typedef float Type; +}; + +// Specializations providing CompatibleTypes::Type value. +template<> struct CompatibleTypes<Quat<float> > { typedef ovrQuatf Type; }; +template<> struct CompatibleTypes<Quat<double> > { typedef ovrQuatd Type; }; +template<> struct CompatibleTypes<Matrix3<double> > { typedef ovrMatrix3d Type; }; +template<> struct CompatibleTypes<Matrix4<float> > { typedef ovrMatrix4f Type; }; +template<> struct CompatibleTypes<Size<int> > { typedef ovrSizei Type; }; +template<> struct CompatibleTypes<Size<float> > { typedef ovrSizef Type; }; +template<> struct CompatibleTypes<Rect<int> > { typedef ovrRecti Type; }; +template<> struct CompatibleTypes<Vector2<int> > { typedef ovrVector2i Type; }; +template<> struct CompatibleTypes<Vector2<float> > { typedef ovrVector2f Type; }; +template<> struct CompatibleTypes<Vector3<float> > { typedef ovrVector3f Type; }; +template<> struct CompatibleTypes<Vector3<double> > { typedef ovrVector3d Type; }; + +template<> struct CompatibleTypes<Pose<float> > { typedef ovrPosef Type; }; +template<> struct CompatibleTypes<PoseState<float> >{ typedef ovrPoseStatef Type; }; + +template<> struct CompatibleTypes<Pose<double> > { typedef ovrPosed Type; }; +template<> struct CompatibleTypes<PoseState<double> >{ typedef ovrPoseStated Type; }; //------------------------------------------------------------------------------------// -// ************************************ Math *****************************************// +// ***** Math // // Math class contains constants and functions. This class is a template specialized // per type, with Math<float> and Math<double> being distinct. template<class Type> class Math { +public: + // By default, support explicit conversion to float. This allows Vector2<int> to + // compile, for example. + typedef float OtherFloatType; }; // Single-precision Math constants class. @@ -116,6 +189,9 @@ public: static const float Tolerance; // 0.00001f; static const float SingularityRadius; // 0.0000001f for Gimbal lock numerical problems + + // Used to support direct conversions in template classes. + typedef double OtherFloatType; }; // Double-precision Math constants class. @@ -136,10 +212,12 @@ public: static const double DegreeToRadFactor; static const double Tolerance; // 0.00001; - static const double SingularityRadius; // 0.000000000001 for Gimbal lock numerical problems + static const double SingularityRadius; // 0.000000000001 for Gimbal lock numerical problems + typedef float OtherFloatType; }; + typedef Math<float> Mathf; typedef Math<double> Mathd; @@ -172,6 +250,7 @@ inline int isnan(double x) { return _isnan(x); }; template<class T> class Quat; + //------------------------------------------------------------------------------------- // ***** Vector2<> @@ -187,7 +266,22 @@ public: Vector2() : x(0), y(0) { } Vector2(T x_, T y_) : x(x_), y(y_) { } explicit Vector2(T s) : x(s), y(s) { } + explicit Vector2(const Vector2<typename Math<T>::OtherFloatType> &src) + : x((T)src.x), y((T)src.y) { } + + // C-interop support. + typedef typename CompatibleTypes<Vector2<T> >::Type CompatibleType; + + Vector2(const CompatibleType& s) : x(s.x), y(s.y) { } + + operator const CompatibleType& () const + { + OVR_COMPILER_ASSERT(sizeof(Vector2<T>) == sizeof(CompatibleType)); + return reinterpret_cast<const CompatibleType&>(*this); + } + + bool operator== (const Vector2& b) const { return x == b.x && y == b.y; } bool operator!= (const Vector2& b) const { return x != b.x || y != b.y; } @@ -207,6 +301,11 @@ public: x *= rcp; y *= rcp; return *this; } + static Vector2 Min(const Vector2& a, const Vector2& b) { return Vector2((a.x < b.x) ? a.x : b.x, + (a.y < b.y) ? a.y : b.y); } + static Vector2 Max(const Vector2& a, const Vector2& b) { return Vector2((a.x > b.x) ? a.x : b.x, + (a.y > b.y) ? a.y : b.y); } + // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance. bool Compare(const Vector2&b, T tolerance = Mathf::Tolerance) { @@ -216,10 +315,15 @@ public: // Entrywise product of two vectors Vector2 EntrywiseMultiply(const Vector2& b) const { return Vector2(x * b.x, y * b.y);} + + // Multiply and divide operators do entry-wise math. Used Dot() for dot product. + Vector2 operator* (const Vector2& b) const { return Vector2(x * b.x, y * b.y); } + Vector2 operator/ (const Vector2& b) const { return Vector2(x / b.x, y / b.y); } + // Dot product // Used to calculate angle q between two vectors among other things, // as (A dot B) = |a||b|cos(q). - T Dot(const Vector2& b) const { return x*b.x + y*b.y; } + T Dot(const Vector2& b) const { return x*b.x + y*b.y; } // Returns the angle from this vector to b, in radians. T Angle(const Vector2& b) const @@ -232,9 +336,13 @@ public: // Return Length of the vector squared. T LengthSq() const { return (x * x + y * y); } + // Return vector length. T Length() const { return sqrt(LengthSq()); } + // Returns squared distance between two points represented by vectors. + T DistanceSq(Vector2& b) const { return (*this - b).LengthSq(); } + // Returns distance between two points represented by vectors. T Distance(Vector2& b) const { return (*this - b).Length(); } @@ -273,7 +381,7 @@ public: typedef Vector2<float> Vector2f; typedef Vector2<double> Vector2d; - +typedef Vector2<int> Vector2i; //------------------------------------------------------------------------------------- // ***** Vector3<> - 3D vector of {x, y, z} @@ -291,6 +399,20 @@ public: Vector3() : x(0), y(0), z(0) { } Vector3(T x_, T y_, T z_ = 0) : x(x_), y(y_), z(z_) { } explicit Vector3(T s) : x(s), y(s), z(s) { } + explicit Vector3(const Vector3<typename Math<T>::OtherFloatType> &src) + : x((T)src.x), y((T)src.y), z((T)src.z) { } + + + // C-interop support. + typedef typename CompatibleTypes<Vector3<T> >::Type CompatibleType; + + Vector3(const CompatibleType& s) : x(s.x), y(s.y), z(s.z) { } + + operator const CompatibleType& () const + { + OVR_COMPILER_ASSERT(sizeof(Vector3<T>) == sizeof(CompatibleType)); + return reinterpret_cast<const CompatibleType&>(*this); + } bool operator== (const Vector3& b) const { return x == b.x && y == b.y && z == b.z; } bool operator!= (const Vector3& b) const { return x != b.x || y != b.y || z != b.z; } @@ -311,6 +433,19 @@ public: x *= rcp; y *= rcp; z *= rcp; return *this; } + static Vector3 Min(const Vector3& a, const Vector3& b) + { + return Vector3((a.x < b.x) ? a.x : b.x, + (a.y < b.y) ? a.y : b.y, + (a.z < b.z) ? a.z : b.z); + } + static Vector3 Max(const Vector3& a, const Vector3& b) + { + return Vector3((a.x > b.x) ? a.x : b.x, + (a.y > b.y) ? a.y : b.y, + (a.z > b.z) ? a.z : b.z); + } + // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance. bool Compare(const Vector3&b, T tolerance = Mathf::Tolerance) { @@ -319,11 +454,33 @@ public: (fabs(b.z-z) < tolerance); } + T& operator[] (int idx) + { + OVR_ASSERT(0 <= idx && idx < 3); + return *(&x + idx); + } + + const T& operator[] (int idx) const + { + OVR_ASSERT(0 <= idx && idx < 3); + return *(&x + idx); + } + // Entrywise product of two vectors Vector3 EntrywiseMultiply(const Vector3& b) const { return Vector3(x * b.x, y * b.y, z * b.z);} + // Multiply and divide operators do entry-wise math + Vector3 operator* (const Vector3& b) const { return Vector3(x * b.x, + y * b.y, + z * b.z); } + + Vector3 operator/ (const Vector3& b) const { return Vector3(x / b.x, + y / b.y, + z / b.z); } + + // Dot product // Used to calculate angle q between two vectors among other things, // as (A dot B) = |a||b|cos(q). @@ -347,11 +504,15 @@ public: // Return Length of the vector squared. T LengthSq() const { return (x * x + y * y + z * z); } + // Return vector length. T Length() const { return sqrt(LengthSq()); } + // Returns squared distance between two points represented by vectors. + T DistanceSq(Vector3& b) const { return (*this - b).LengthSq(); } + // Returns distance between two points represented by vectors. - T Distance(Vector3& b) const { return (*this - b).Length(); } + T Distance(Vector3 const& b) const { return (*this - b).Length(); } // Determine if this a unit vector. bool IsNormalized() const { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance; } @@ -392,6 +553,24 @@ public: typedef Vector3<float> Vector3f; typedef Vector3<double> Vector3d; +typedef Vector3<SInt32> Vector3i; + + +// JDC: this was defined in Render_Device.h, I moved it here, but it +// needs to be fleshed out like the other Vector types. +// +// A vector with a dummy w component for alignment in uniform buffers (and for float colors). +// The w component is not used in any calculations. + +struct Vector4f : public Vector3f +{ + float w; + + Vector4f() : w(1) {} + Vector4f(const Vector3f& v) : Vector3f(v), w(1) {} + Vector4f(float r, float g, float b, float a) : Vector3f(r,g,b), w(a) {} +}; + //------------------------------------------------------------------------------------- @@ -404,34 +583,53 @@ template<class T> class Size { public: - T Width, Height; + T w, h; + + Size() : w(0), h(0) { } + Size(T w_, T h_) : w(w_), h(h_) { } + explicit Size(T s) : w(s), h(s) { } + explicit Size(const Size<typename Math<T>::OtherFloatType> &src) + : w((T)src.w), h((T)src.h) { } + + // C-interop support. + typedef typename CompatibleTypes<Size<T> >::Type CompatibleType; - Size() : Width(0), Height(0) { } - Size(T w_, T h_) : Width(w_), Height(h_) { } - explicit Size(T s) : Width(s), Height(s) { } + Size(const CompatibleType& s) : w(s.w), h(s.h) { } + + operator const CompatibleType& () const + { + OVR_COMPILER_ASSERT(sizeof(Size<T>) == sizeof(CompatibleType)); + return reinterpret_cast<const CompatibleType&>(*this); + } - bool operator== (const Size& b) const { return Width == b.Width && Height == b.Height; } - bool operator!= (const Size& b) const { return Width != b.Width || Height != b.Height; } + bool operator== (const Size& b) const { return w == b.w && h == b.h; } + bool operator!= (const Size& b) const { return w != b.w || h != b.h; } - Size operator+ (const Size& b) const { return Size(Width + b.Width, Height + b.Height); } - Size& operator+= (const Size& b) { Width += b.Width; Height += b.Height; return *this; } - Size operator- (const Size& b) const { return Size(Width - b.Width, Height - b.Height); } - Size& operator-= (const Size& b) { Width -= b.Width; Height -= b.Height; return *this; } - Size operator- () const { return Size(-Width, -Height); } - Size operator* (const Size& b) const { return Size(Width * b.Width, Height * b.Height); } - Size& operator*= (const Size& b) { Width *= b.Width; Height *= b.Height; return *this; } - Size operator/ (const Size& b) const { return Size(Width / b.Width, Height / b.Height); } - Size& operator/= (const Size& b) { Width /= b.Width; Height /= b.Height; return *this; } + Size operator+ (const Size& b) const { return Size(w + b.w, h + b.h); } + Size& operator+= (const Size& b) { w += b.w; h += b.h; return *this; } + Size operator- (const Size& b) const { return Size(w - b.w, h - b.h); } + Size& operator-= (const Size& b) { w -= b.w; h -= b.h; return *this; } + Size operator- () const { return Size(-w, -h); } + Size operator* (const Size& b) const { return Size(w * b.w, h * b.h); } + Size& operator*= (const Size& b) { w *= b.w; h *= b.h; return *this; } + Size operator/ (const Size& b) const { return Size(w / b.w, h / b.h); } + Size& operator/= (const Size& b) { w /= b.w; h /= b.h; return *this; } // Scalar multiplication/division scales both components. - Size operator* (T s) const { return Size(Width*s, Height*s); } - Size& operator*= (T s) { Width *= s; Height *= s; return *this; } - Size operator/ (T s) const { return Size(Width/s, Height/s); } - Size& operator/= (T s) { Width /= s; Height /= s; return *this; } + Size operator* (T s) const { return Size(w*s, h*s); } + Size& operator*= (T s) { w *= s; h *= s; return *this; } + Size operator/ (T s) const { return Size(w/s, h/s); } + Size& operator/= (T s) { w /= s; h /= s; return *this; } + + static Size Min(const Size& a, const Size& b) { return Size((a.w < b.w) ? a.w : b.w, + (a.h < b.h) ? a.h : b.h); } + static Size Max(const Size& a, const Size& b) { return Size((a.w > b.w) ? a.w : b.w, + (a.h > b.h) ? a.h : b.h); } + - T Area() const { return Width * Height; } + T Area() const { return w * h; } - inline Vector2<T> ToVector() const { return Vector2<T>(Width, Height); } + inline Vector2<T> ToVector() const { return Vector2<T>(w, h); } }; @@ -441,10 +639,494 @@ typedef Size<float> Sizef; typedef Size<double> Sized; + +//----------------------------------------------------------------------------------- +// ***** Rect + +// Rect describes a rectangular area for rendering, that includes position and size. +template<class T> +class Rect +{ +public: + T x, y; + T w, h; + + Rect() { } + Rect(T x1, T y1, T w1, T h1) : x(x1), y(y1), w(w1), h(h1) { } + Rect(const Vector2<T>& pos, const Size<T>& sz) : x(pos.x), y(pos.y), w(sz.w), h(sz.h) { } + Rect(const Size<T>& sz) : x(0), y(0), w(sz.w), h(sz.h) { } + + // C-interop support. + typedef typename CompatibleTypes<Rect<T> >::Type CompatibleType; + + Rect(const CompatibleType& s) : x(s.Pos.x), y(s.Pos.y), w(s.Size.w), h(s.Size.h) { } + + operator const CompatibleType& () const + { + OVR_COMPILER_ASSERT(sizeof(Rect<T>) == sizeof(CompatibleType)); + return reinterpret_cast<const CompatibleType&>(*this); + } + + Vector2<T> GetPos() const { return Vector2<T>(x, y); } + Size<T> GetSize() const { return Size<T>(w, h); } + void SetPos(const Vector2<T>& pos) { x = pos.x; y = pos.y; } + void SetSize(const Size<T>& sz) { w = sz.w; h = sz.h; } + + bool operator == (const Rect& vp) const + { return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h); } + bool operator != (const Rect& vp) const + { return !operator == (vp); } +}; + +typedef Rect<int> Recti; + + +//-------------------------------------------------------------------------------------// +// ***** Quat +// +// Quatf represents a quaternion class used for rotations. +// +// Quaternion multiplications are done in right-to-left order, to match the +// behavior of matrices. + + +template<class T> +class Quat +{ +public: + // w + Xi + Yj + Zk + T x, y, z, w; + + Quat() : x(0), y(0), z(0), w(1) { } + Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) { } + explicit Quat(const Quat<typename Math<T>::OtherFloatType> &src) + : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) { } + + // C-interop support. + Quat(const typename CompatibleTypes<Quat<T> >::Type& s) : x(s.x), y(s.y), z(s.z), w(s.w) { } + + operator const typename CompatibleTypes<Quat<T> >::Type () const + { + typename CompatibleTypes<Quat<T> >::Type result; + result.x = x; + result.y = y; + result.z = z; + result.w = w; + return result; + } + + // Constructs quaternion for rotation around the axis by an angle. + Quat(const Vector3<T>& axis, T angle) + { + // Make sure we don't divide by zero. + if (axis.LengthSq() == 0) + { + // Assert if the axis is zero, but the angle isn't + OVR_ASSERT(angle == 0); + x = 0; y = 0; z = 0; w = 1; + return; + } + + Vector3<T> unitAxis = axis.Normalized(); + T sinHalfAngle = sin(angle * T(0.5)); + + w = cos(angle * T(0.5)); + x = unitAxis.x * sinHalfAngle; + y = unitAxis.y * sinHalfAngle; + z = unitAxis.z * sinHalfAngle; + } + + // Constructs quaternion for rotation around one of the coordinate axis by an angle. + Quat(Axis A, T angle, RotateDirection d = Rotate_CCW, HandedSystem s = Handed_R) + { + T sinHalfAngle = s * d *sin(angle * T(0.5)); + T v[3]; + v[0] = v[1] = v[2] = T(0); + v[A] = sinHalfAngle; + + w = cos(angle * T(0.5)); + x = v[0]; + y = v[1]; + z = v[2]; + } + + // Compute axis and angle from quaternion + void GetAxisAngle(Vector3<T>* axis, T* angle) const + { + if ( x*x + y*y + z*z > Math<T>::Tolerance * Math<T>::Tolerance ) { + *axis = Vector3<T>(x, y, z).Normalized(); + *angle = 2 * Acos(w); + if (*angle > Math<T>::Pi) // Reduce the magnitude of the angle, if necessary + { + *angle = Math<T>::TwoPi - *angle; + *axis = *axis * (-1); + } + } + else + { + *axis = Vector3<T>(1, 0, 0); + *angle= 0; + } + } + + // Constructs the quaternion from a rotation matrix + explicit Quat(const Matrix4<T>& m) + { + T trace = m.M[0][0] + m.M[1][1] + m.M[2][2]; + + // In almost all cases, the first part is executed. + // However, if the trace is not positive, the other + // cases arise. + if (trace > T(0)) + { + T s = sqrt(trace + T(1)) * T(2); // s=4*qw + w = T(0.25) * s; + x = (m.M[2][1] - m.M[1][2]) / s; + y = (m.M[0][2] - m.M[2][0]) / s; + z = (m.M[1][0] - m.M[0][1]) / s; + } + else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) + { + T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2); + w = (m.M[2][1] - m.M[1][2]) / s; + x = T(0.25) * s; + y = (m.M[0][1] + m.M[1][0]) / s; + z = (m.M[2][0] + m.M[0][2]) / s; + } + else if (m.M[1][1] > m.M[2][2]) + { + T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy + w = (m.M[0][2] - m.M[2][0]) / s; + x = (m.M[0][1] + m.M[1][0]) / s; + y = T(0.25) * s; + z = (m.M[1][2] + m.M[2][1]) / s; + } + else + { + T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz + w = (m.M[1][0] - m.M[0][1]) / s; + x = (m.M[0][2] + m.M[2][0]) / s; + y = (m.M[1][2] + m.M[2][1]) / s; + z = T(0.25) * s; + } + } + + // Constructs the quaternion from a rotation matrix + explicit Quat(const Matrix3<T>& m) + { + T trace = m.M[0][0] + m.M[1][1] + m.M[2][2]; + + // In almost all cases, the first part is executed. + // However, if the trace is not positive, the other + // cases arise. + if (trace > T(0)) + { + T s = sqrt(trace + T(1)) * T(2); // s=4*qw + w = T(0.25) * s; + x = (m.M[2][1] - m.M[1][2]) / s; + y = (m.M[0][2] - m.M[2][0]) / s; + z = (m.M[1][0] - m.M[0][1]) / s; + } + else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) + { + T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2); + w = (m.M[2][1] - m.M[1][2]) / s; + x = T(0.25) * s; + y = (m.M[0][1] + m.M[1][0]) / s; + z = (m.M[2][0] + m.M[0][2]) / s; + } + else if (m.M[1][1] > m.M[2][2]) + { + T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy + w = (m.M[0][2] - m.M[2][0]) / s; + x = (m.M[0][1] + m.M[1][0]) / s; + y = T(0.25) * s; + z = (m.M[1][2] + m.M[2][1]) / s; + } + else + { + T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz + w = (m.M[1][0] - m.M[0][1]) / s; + x = (m.M[0][2] + m.M[2][0]) / s; + y = (m.M[1][2] + m.M[2][1]) / s; + z = T(0.25) * s; + } + } + + bool operator== (const Quat& b) const { return x == b.x && y == b.y && z == b.z && w == b.w; } + bool operator!= (const Quat& b) const { return x != b.x || y != b.y || z != b.z || w != b.w; } + + Quat operator+ (const Quat& b) const { return Quat(x + b.x, y + b.y, z + b.z, w + b.w); } + Quat& operator+= (const Quat& b) { w += b.w; x += b.x; y += b.y; z += b.z; return *this; } + Quat operator- (const Quat& b) const { return Quat(x - b.x, y - b.y, z - b.z, w - b.w); } + Quat& operator-= (const Quat& b) { w -= b.w; x -= b.x; y -= b.y; z -= b.z; return *this; } + + Quat operator* (T s) const { return Quat(x * s, y * s, z * s, w * s); } + Quat& operator*= (T s) { w *= s; x *= s; y *= s; z *= s; return *this; } + Quat operator/ (T s) const { T rcp = T(1)/s; return Quat(x * rcp, y * rcp, z * rcp, w *rcp); } + Quat& operator/= (T s) { T rcp = T(1)/s; w *= rcp; x *= rcp; y *= rcp; z *= rcp; return *this; } + + + // Get Imaginary part vector + Vector3<T> Imag() const { return Vector3<T>(x,y,z); } + + // Get quaternion length. + T Length() const { return sqrt(LengthSq()); } + + // Get quaternion length squared. + T LengthSq() const { return (x * x + y * y + z * z + w * w); } + + // Simple Euclidean distance in R^4 (not SLERP distance, but at least respects Haar measure) + T Distance(const Quat& q) const + { + T d1 = (*this - q).Length(); + T d2 = (*this + q).Length(); // Antipodal point check + return (d1 < d2) ? d1 : d2; + } + + T DistanceSq(const Quat& q) const + { + T d1 = (*this - q).LengthSq(); + T d2 = (*this + q).LengthSq(); // Antipodal point check + return (d1 < d2) ? d1 : d2; + } + + T Dot(const Quat& q) const + { + return x * q.x + y * q.y + z * q.z + w * q.w; + } + + // Angle between two quaternions in radians + T Angle(const Quat& q) const + { + return 2 * Acos(Alg::Abs(Dot(q))); + } + + // Normalize + bool IsNormalized() const { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance; } + + void Normalize() + { + T l = Length(); + OVR_ASSERT(l != T(0)); + *this /= l; + } + + Quat Normalized() const + { + T l = Length(); + OVR_ASSERT(l != T(0)); + return *this / l; + } + + // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized. + Quat Conj() const { return Quat(-x, -y, -z, w); } + + // Quaternion multiplication. Combines quaternion rotations, performing the one on the + // right hand side first. + Quat operator* (const Quat& b) const { return Quat(w * b.x + x * b.w + y * b.z - z * b.y, + w * b.y - x * b.z + y * b.w + z * b.x, + w * b.z + x * b.y - y * b.x + z * b.w, + w * b.w - x * b.x - y * b.y - z * b.z); } + + // + // this^p normalized; same as rotating by this p times. + Quat PowNormalized(T p) const + { + Vector3<T> v; + T a; + GetAxisAngle(&v, &a); + return Quat(v, a * p); + } + + // Normalized linear interpolation of quaternions + Quat Nlerp(const Quat& other, T a) + { + T sign = (Dot(other) >= 0) ? 1 : -1; + return (*this * sign * a + other * (1-a)).Normalized(); + } + + // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise, + // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1. + Vector3<T> Rotate(const Vector3<T>& v) const + { + return ((*this * Quat<T>(v.x, v.y, v.z, T(0))) * Inverted()).Imag(); + } + + // Inversed quaternion rotates in the opposite direction. + Quat Inverted() const + { + return Quat(-x, -y, -z, w); + } + + // Sets this quaternion to the one rotates in the opposite direction. + void Invert() + { + *this = Quat(-x, -y, -z, w); + } + + // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of + // axis rotations and the specified coordinate system. Right-handed coordinate system + // is the default, with CCW rotations while looking in the negative axis direction. + // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. + // rotation a around axis A1 + // is followed by rotation b around axis A2 + // is followed by rotation c around axis A3 + // rotations are CCW or CW (D) in LH or RH coordinate system (S) + template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S> + void GetEulerAngles(T *a, T *b, T *c) const + { + OVR_COMPILER_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3)); + + T Q[3] = { x, y, z }; //Quaternion components x,y,z + + T ww = w*w; + T Q11 = Q[A1]*Q[A1]; + T Q22 = Q[A2]*Q[A2]; + T Q33 = Q[A3]*Q[A3]; + + T psign = T(-1); + // Determine whether even permutation + if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) + psign = T(1); + + T s2 = psign * T(2) * (psign*w*Q[A2] + Q[A1]*Q[A3]); + + if (s2 < T(-1) + Math<T>::SingularityRadius) + { // South pole singularity + *a = T(0); + *b = -S*D*Math<T>::PiOver2; + *c = S*D*atan2(T(2)*(psign*Q[A1]*Q[A2] + w*Q[A3]), + ww + Q22 - Q11 - Q33 ); + } + else if (s2 > T(1) - Math<T>::SingularityRadius) + { // North pole singularity + *a = T(0); + *b = S*D*Math<T>::PiOver2; + *c = S*D*atan2(T(2)*(psign*Q[A1]*Q[A2] + w*Q[A3]), + ww + Q22 - Q11 - Q33); + } + else + { + *a = -S*D*atan2(T(-2)*(w*Q[A1] - psign*Q[A2]*Q[A3]), + ww + Q33 - Q11 - Q22); + *b = S*D*asin(s2); + *c = S*D*atan2(T(2)*(w*Q[A3] - psign*Q[A1]*Q[A2]), + ww + Q11 - Q22 - Q33); + } + return; + } + + template <Axis A1, Axis A2, Axis A3, RotateDirection D> + void GetEulerAngles(T *a, T *b, T *c) const + { GetEulerAngles<A1, A2, A3, D, Handed_R>(a, b, c); } + + template <Axis A1, Axis A2, Axis A3> + void GetEulerAngles(T *a, T *b, T *c) const + { GetEulerAngles<A1, A2, A3, Rotate_CCW, Handed_R>(a, b, c); } + + + // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of + // axis rotations and the specified coordinate system. Right-handed coordinate system + // is the default, with CCW rotations while looking in the negative axis direction. + // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. + // rotation a around axis A1 + // is followed by rotation b around axis A2 + // is followed by rotation c around axis A1 + // Rotations are CCW or CW (D) in LH or RH coordinate system (S) + template <Axis A1, Axis A2, RotateDirection D, HandedSystem S> + void GetEulerAnglesABA(T *a, T *b, T *c) const + { + OVR_COMPILER_ASSERT(A1 != A2); + + T Q[3] = {x, y, z}; // Quaternion components + + // Determine the missing axis that was not supplied + int m = 3 - A1 - A2; + + T ww = w*w; + T Q11 = Q[A1]*Q[A1]; + T Q22 = Q[A2]*Q[A2]; + T Qmm = Q[m]*Q[m]; + + T psign = T(-1); + if ((A1 + 1) % 3 == A2) // Determine whether even permutation + { + psign = T(1); + } + + T c2 = ww + Q11 - Q22 - Qmm; + if (c2 < T(-1) + Math<T>::SingularityRadius) + { // South pole singularity + *a = T(0); + *b = S*D*Math<T>::Pi; + *c = S*D*atan2( T(2)*(w*Q[A1] - psign*Q[A2]*Q[m]), + ww + Q22 - Q11 - Qmm); + } + else if (c2 > T(1) - Math<T>::SingularityRadius) + { // North pole singularity + *a = T(0); + *b = T(0); + *c = S*D*atan2( T(2)*(w*Q[A1] - psign*Q[A2]*Q[m]), + ww + Q22 - Q11 - Qmm); + } + else + { + *a = S*D*atan2( psign*w*Q[m] + Q[A1]*Q[A2], + w*Q[A2] -psign*Q[A1]*Q[m]); + *b = S*D*acos(c2); + *c = S*D*atan2( -psign*w*Q[m] + Q[A1]*Q[A2], + w*Q[A2] + psign*Q[A1]*Q[m]); + } + return; + } +}; + +typedef Quat<float> Quatf; +typedef Quat<double> Quatd; + //------------------------------------------------------------------------------------- -// ***** Matrix4f +// ***** Pose + +// Position and orientation combined. + +template<class T> +class Pose +{ +public: + + typedef typename CompatibleTypes<Pose<T> >::Type CompatibleType; + + Pose() { } + Pose(const Quat<T>& orientation, const Vector3<T>& pos) + : Orientation(orientation), Position(pos) { } + Pose(const Pose& s) + : Orientation(s.Orientation), Position(s.Position) { } + Pose(const CompatibleType& s) + : Orientation(s.Orientation), Position(s.Position) { } + explicit Pose(const Pose<typename Math<T>::OtherFloatType> &s) + : Orientation(s.Orientation), Position(s.Position) { } + + operator const typename CompatibleTypes<Pose<T> >::Type () const + { + typename CompatibleTypes<Pose<T> >::Type result; + result.Orientation = Orientation; + result.Position = Position; + return result; + } + + Quat<T> Orientation; + Vector3<T> Position; +}; + +typedef Pose<float> Posef; +typedef Pose<double> Posed; + + +//------------------------------------------------------------------------------------- +// ***** Matrix4 // -// Matrix4f is a 4x4 matrix used for 3d transformations and projections. +// Matrix4 is a 4x4 matrix used for 3d transformations and projections. // Translation stored in the last column. // The matrix is stored in row-major order in memory, meaning that values // of the first row are stored before the next one. @@ -469,28 +1151,29 @@ typedef Size<double> Sized; // // The basis vectors are first three columns. -class Matrix4f +template<class T> +class Matrix4 { - static Matrix4f IdentityValue; + static const Matrix4 IdentityValue; public: - float M[4][4]; + T M[4][4]; enum NoInitType { NoInit }; // Construct with no memory initialization. - Matrix4f(NoInitType) { } + Matrix4(NoInitType) { } // By default, we construct identity matrix. - Matrix4f() + Matrix4() { SetIdentity(); } - Matrix4f(float m11, float m12, float m13, float m14, - float m21, float m22, float m23, float m24, - float m31, float m32, float m33, float m34, - float m41, float m42, float m43, float m44) + Matrix4(T m11, T m12, T m13, T m14, + T m21, T m22, T m23, T m24, + T m31, T m32, T m33, T m34, + T m41, T m42, T m43, T m44) { M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = m14; M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = m24; @@ -498,9 +1181,9 @@ public: M[3][0] = m41; M[3][1] = m42; M[3][2] = m43; M[3][3] = m44; } - Matrix4f(float m11, float m12, float m13, - float m21, float m22, float m23, - float m31, float m32, float m33) + Matrix4(T m11, T m12, T m13, + T m21, T m22, T m23, + T m31, T m32, T m33) { M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = 0; M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = 0; @@ -508,7 +1191,50 @@ public: M[3][0] = 0; M[3][1] = 0; M[3][2] = 0; M[3][3] = 1; } - void ToString(char* dest, UPInt destsize) + explicit Matrix4(const Quat<T>& q) + { + T ww = q.w*q.w; + T xx = q.x*q.x; + T yy = q.y*q.y; + T zz = q.z*q.z; + + M[0][0] = ww + xx - yy - zz; M[0][1] = 2 * (q.x*q.y - q.w*q.z); M[0][2] = 2 * (q.x*q.z + q.w*q.y); M[0][3] = 0; + M[1][0] = 2 * (q.x*q.y + q.w*q.z); M[1][1] = ww - xx + yy - zz; M[1][2] = 2 * (q.y*q.z - q.w*q.x); M[1][3] = 0; + M[2][0] = 2 * (q.x*q.z - q.w*q.y); M[2][1] = 2 * (q.y*q.z + q.w*q.x); M[2][2] = ww - xx - yy + zz; M[2][3] = 0; + M[3][0] = 0; M[3][1] = 0; M[3][2] = 0; M[3][3] = 1; + } + + explicit Matrix4(const Pose<T>& p) + { + Matrix4 result(p.Orientation); + result.SetTranslation(p.Position); + *this = result; + } + + // C-interop support + explicit Matrix4(const Matrix4<typename Math<T>::OtherFloatType> &src) + { + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + M[i][j] = (T)src.M[i][j]; + } + + // C-interop support. + Matrix4(const typename CompatibleTypes<Matrix4<T> >::Type& s) + { + OVR_COMPILER_ASSERT(sizeof(s) == sizeof(Matrix4)); + memcpy(M, s.M, sizeof(M)); + } + + operator const typename CompatibleTypes<Matrix4<T> >::Type () const + { + typename CompatibleTypes<Matrix4<T> >::Type result; + OVR_COMPILER_ASSERT(sizeof(result) == sizeof(Matrix4)); + memcpy(result.M, M, sizeof(M)); + return result; + } + + void ToString(char* dest, UPInt destsize) const { UPInt pos = 0; for (int r=0; r<4; r++) @@ -516,13 +1242,13 @@ public: pos += OVR_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]); } - static Matrix4f FromString(const char* src) + static Matrix4 FromString(const char* src) { - Matrix4f result; + Matrix4 result; for (int r=0; r<4; r++) for (int c=0; c<4; c++) { - result.M[r][c] = (float)atof(src); + result.M[r][c] = (T)atof(src); while (src && *src != ' ') src++; while (src && *src == ' ') @@ -531,7 +1257,7 @@ public: return result; } - static const Matrix4f& Identity() { return IdentityValue; } + static const Matrix4& Identity() { return IdentityValue; } void SetIdentity() { @@ -541,14 +1267,24 @@ public: M[0][3] = M[1][3] = M[2][1] = M[3][0] = 0; } - Matrix4f operator+ (const Matrix4f& b) const + bool operator== (const Matrix4& b) const + { + bool isEqual = true; + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + isEqual &= (M[i][j] == b.M[i][j]); + + return isEqual; + } + + Matrix4 operator+ (const Matrix4& b) const { - Matrix4f result(*this); + Matrix4 result(*this); result += b; return result; } - Matrix4f& operator+= (const Matrix4f& b) + Matrix4& operator+= (const Matrix4& b) { for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) @@ -556,14 +1292,14 @@ public: return *this; } - Matrix4f operator- (const Matrix4f& b) const + Matrix4 operator- (const Matrix4& b) const { - Matrix4f result(*this); + Matrix4 result(*this); result -= b; return result; } - Matrix4f& operator-= (const Matrix4f& b) + Matrix4& operator-= (const Matrix4& b) { for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) @@ -572,7 +1308,7 @@ public: } // Multiplies two matrices into destination with minimum copying. - static Matrix4f& Multiply(Matrix4f* d, const Matrix4f& a, const Matrix4f& b) + static Matrix4& Multiply(Matrix4* d, const Matrix4& a, const Matrix4& b) { OVR_ASSERT((d != &a) && (d != &b)); int i = 0; @@ -586,26 +1322,26 @@ public: return *d; } - Matrix4f operator* (const Matrix4f& b) const + Matrix4 operator* (const Matrix4& b) const { - Matrix4f result(Matrix4f::NoInit); + Matrix4 result(Matrix4::NoInit); Multiply(&result, *this, b); return result; } - Matrix4f& operator*= (const Matrix4f& b) + Matrix4& operator*= (const Matrix4& b) { - return Multiply(this, Matrix4f(*this), b); + return Multiply(this, Matrix4(*this), b); } - Matrix4f operator* (float s) const + Matrix4 operator* (T s) const { - Matrix4f result(*this); + Matrix4 result(*this); result *= s; return result; } - Matrix4f& operator*= (float s) + Matrix4& operator*= (T s) { for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) @@ -614,14 +1350,14 @@ public: } - Matrix4f operator/ (float s) const + Matrix4 operator/ (T s) const { - Matrix4f result(*this); + Matrix4 result(*this); result /= s; return result; } - Matrix4f& operator/= (float s) + Matrix4& operator/= (T s) { for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) @@ -629,16 +1365,16 @@ public: return *this; } - Vector3f Transform(const Vector3f& v) const + Vector3<T> Transform(const Vector3<T>& v) const { - return Vector3f(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3], - M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3], - M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]); + return Vector3<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3], + M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3], + M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]); } - Matrix4f Transposed() const + Matrix4 Transposed() const { - return Matrix4f(M[0][0], M[1][0], M[2][0], M[3][0], + return Matrix4(M[0][0], M[1][0], M[2][0], M[3][0], M[0][1], M[1][1], M[2][1], M[3][1], M[0][2], M[1][2], M[2][2], M[3][2], M[0][3], M[1][3], M[2][3], M[3][3]); @@ -650,35 +1386,35 @@ public: } - float SubDet (const UPInt* rows, const UPInt* cols) const + T SubDet (const UPInt* rows, const UPInt* cols) const { return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]); } - float Cofactor(UPInt I, UPInt J) const + T Cofactor(UPInt I, UPInt J) const { const UPInt indices[4][3] = {{1,2,3},{0,2,3},{0,1,3},{0,1,2}}; return ((I+J)&1) ? -SubDet(indices[I],indices[J]) : SubDet(indices[I],indices[J]); } - float Determinant() const + T Determinant() const { return M[0][0] * Cofactor(0,0) + M[0][1] * Cofactor(0,1) + M[0][2] * Cofactor(0,2) + M[0][3] * Cofactor(0,3); } - Matrix4f Adjugated() const + Matrix4 Adjugated() const { - return Matrix4f(Cofactor(0,0), Cofactor(1,0), Cofactor(2,0), Cofactor(3,0), + return Matrix4(Cofactor(0,0), Cofactor(1,0), Cofactor(2,0), Cofactor(3,0), Cofactor(0,1), Cofactor(1,1), Cofactor(2,1), Cofactor(3,1), Cofactor(0,2), Cofactor(1,2), Cofactor(2,2), Cofactor(3,2), Cofactor(0,3), Cofactor(1,3), Cofactor(2,3), Cofactor(3,3)); } - Matrix4f Inverted() const + Matrix4 Inverted() const { - float det = Determinant(); + T det = Determinant(); assert(det != 0); return Adjugated() * (1.0f/det); } @@ -690,14 +1426,14 @@ public: // This is more efficient than general inverse, but ONLY works // correctly if it is a homogeneous transform matrix (rot + trans) - Matrix4f InvertedHomogeneousTransform() const + Matrix4 InvertedHomogeneousTransform() const { // Make the inverse rotation matrix - Matrix4f rinv = this->Transposed(); + Matrix4 rinv = this->Transposed(); rinv.M[3][0] = rinv.M[3][1] = rinv.M[3][2] = 0.0f; // Make the inverse translation matrix - Vector3f tvinv = Vector3f(-M[0][3],-M[1][3],-M[2][3]); - Matrix4f tinv = Matrix4f::Translation(tvinv); + Vector3<T> tvinv(-M[0][3],-M[1][3],-M[2][3]); + Matrix4 tinv = Matrix4::Translation(tvinv); return rinv * tinv; // "untranslate", then "unrotate" } @@ -715,25 +1451,25 @@ public: // is followed by rotation c around axis A3 // rotations are CCW or CW (D) in LH or RH coordinate system (S) template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S> - void ToEulerAngles(float *a, float *b, float *c) + void ToEulerAngles(T *a, T *b, T *c) { OVR_COMPILER_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3)); - float psign = -1.0f; + T psign = -1; if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) // Determine whether even permutation - psign = 1.0f; + psign = 1; - float pm = psign*M[A1][A3]; - if (pm < -1.0f + Math<float>::SingularityRadius) + T pm = psign*M[A1][A3]; + if (pm < -1.0f + Math<T>::SingularityRadius) { // South pole singularity - *a = 0.0f; - *b = -S*D*Math<float>::PiOver2; + *a = 0; + *b = -S*D*Math<T>::PiOver2; *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] ); } - else if (pm > 1.0f - Math<float>::SingularityRadius) + else if (pm > 1.0f - Math<T>::SingularityRadius) { // North pole singularity - *a = 0.0f; - *b = S*D*Math<float>::PiOver2; + *a = 0; + *b = S*D*Math<T>::PiOver2; *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] ); } else @@ -753,28 +1489,28 @@ public: // is followed by rotation c around axis A1 // rotations are CCW or CW (D) in LH or RH coordinate system (S) template <Axis A1, Axis A2, RotateDirection D, HandedSystem S> - void ToEulerAnglesABA(float *a, float *b, float *c) + void ToEulerAnglesABA(T *a, T *b, T *c) { OVR_COMPILER_ASSERT(A1 != A2); // Determine the axis that was not supplied int m = 3 - A1 - A2; - float psign = -1.0f; + T psign = -1; if ((A1 + 1) % 3 == A2) // Determine whether even permutation psign = 1.0f; - float c2 = M[A1][A1]; - if (c2 < -1.0f + Math<float>::SingularityRadius) + T c2 = M[A1][A1]; + if (c2 < -1 + Math<T>::SingularityRadius) { // South pole singularity - *a = 0.0f; - *b = S*D*Math<float>::Pi; + *a = 0; + *b = S*D*Math<T>::Pi; *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]); } - else if (c2 > 1.0f - Math<float>::SingularityRadius) + else if (c2 > 1.0f - Math<T>::SingularityRadius) { // North pole singularity - *a = 0.0f; - *b = 0.0f; + *a = 0; + *b = 0; *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]); } else @@ -788,7 +1524,7 @@ public: // Creates a matrix that converts the vertices from one coordinate system // to another. - static Matrix4f AxisConversion(const WorldAxes& to, const WorldAxes& from) + static Matrix4 AxisConversion(const WorldAxes& to, const WorldAxes& from) { // Holds axis values from the 'to' structure int toArray[3] = { to.XAxis, to.YAxis, to.ZAxis }; @@ -799,22 +1535,22 @@ public: inv[abs(to.YAxis)] = 1; inv[abs(to.ZAxis)] = 2; - Matrix4f m(0, 0, 0, - 0, 0, 0, - 0, 0, 0); + Matrix4 m(0, 0, 0, + 0, 0, 0, + 0, 0, 0); // Only three values in the matrix need to be changed to 1 or -1. - m.M[inv[abs(from.XAxis)]][0] = float(from.XAxis/toArray[inv[abs(from.XAxis)]]); - m.M[inv[abs(from.YAxis)]][1] = float(from.YAxis/toArray[inv[abs(from.YAxis)]]); - m.M[inv[abs(from.ZAxis)]][2] = float(from.ZAxis/toArray[inv[abs(from.ZAxis)]]); + m.M[inv[abs(from.XAxis)]][0] = T(from.XAxis/toArray[inv[abs(from.XAxis)]]); + m.M[inv[abs(from.YAxis)]][1] = T(from.YAxis/toArray[inv[abs(from.YAxis)]]); + m.M[inv[abs(from.ZAxis)]][2] = T(from.ZAxis/toArray[inv[abs(from.ZAxis)]]); return m; } // Creates a matrix for translation by vector - static Matrix4f Translation(const Vector3f& v) + static Matrix4 Translation(const Vector3<T>& v) { - Matrix4f t; + Matrix4 t; t.M[0][3] = v.x; t.M[1][3] = v.y; t.M[2][3] = v.z; @@ -822,19 +1558,32 @@ public: } // Creates a matrix for translation by vector - static Matrix4f Translation(float x, float y, float z = 0.0f) + static Matrix4 Translation(T x, T y, T z = 0.0f) { - Matrix4f t; + Matrix4 t; t.M[0][3] = x; t.M[1][3] = y; t.M[2][3] = z; return t; } + // Sets the translation part + void SetTranslation(const Vector3<T>& v) + { + M[0][3] = v.x; + M[1][3] = v.y; + M[2][3] = v.z; + } + + Vector3<T> GetTranslation() const + { + return Vector3<T>( M[0][3], M[1][3], M[2][3] ); + } + // Creates a matrix for scaling by vector - static Matrix4f Scaling(const Vector3f& v) + static Matrix4 Scaling(const Vector3<T>& v) { - Matrix4f t; + Matrix4 t; t.M[0][0] = v.x; t.M[1][1] = v.y; t.M[2][2] = v.z; @@ -842,9 +1591,9 @@ public: } // Creates a matrix for scaling by vector - static Matrix4f Scaling(float x, float y, float z) + static Matrix4 Scaling(T x, T y, T z) { - Matrix4f t; + Matrix4 t; t.M[0][0] = x; t.M[1][1] = y; t.M[2][2] = z; @@ -852,38 +1601,50 @@ public: } // Creates a matrix for scaling by constant - static Matrix4f Scaling(float s) + static Matrix4 Scaling(T s) { - Matrix4f t; + Matrix4 t; t.M[0][0] = s; t.M[1][1] = s; t.M[2][2] = s; return t; } - + // Simple L1 distance in R^12 + T Distance(const Matrix4& m2) const + { + T d = fabs(M[0][0] - m2.M[0][0]) + fabs(M[0][1] - m2.M[0][1]); + d += fabs(M[0][2] - m2.M[0][2]) + fabs(M[0][3] - m2.M[0][3]); + d += fabs(M[1][0] - m2.M[1][0]) + fabs(M[1][1] - m2.M[1][1]); + d += fabs(M[1][2] - m2.M[1][2]) + fabs(M[1][3] - m2.M[1][3]); + d += fabs(M[2][0] - m2.M[2][0]) + fabs(M[2][1] - m2.M[2][1]); + d += fabs(M[2][2] - m2.M[2][2]) + fabs(M[2][3] - m2.M[2][3]); + d += fabs(M[3][0] - m2.M[3][0]) + fabs(M[3][1] - m2.M[3][1]); + d += fabs(M[3][2] - m2.M[3][2]) + fabs(M[3][3] - m2.M[3][3]); + return d; + } // Creates a rotation matrix rotating around the X axis by 'angle' radians. // Just for quick testing. Not for final API. Need to remove case. - static Matrix4f RotationAxis(Axis A, float angle, RotateDirection d, HandedSystem s) + static Matrix4 RotationAxis(Axis A, T angle, RotateDirection d, HandedSystem s) { - float sina = s * d *sin(angle); - float cosa = cos(angle); + T sina = s * d *sin(angle); + T cosa = cos(angle); switch(A) { case Axis_X: - return Matrix4f(1, 0, 0, - 0, cosa, -sina, - 0, sina, cosa); + return Matrix4(1, 0, 0, + 0, cosa, -sina, + 0, sina, cosa); case Axis_Y: - return Matrix4f(cosa, 0, sina, - 0, 1, 0, - -sina, 0, cosa); + return Matrix4(cosa, 0, sina, + 0, 1, 0, + -sina, 0, cosa); case Axis_Z: - return Matrix4f(cosa, -sina, 0, - sina, cosa, 0, - 0, 0, 1); + return Matrix4(cosa, -sina, 0, + sina, cosa, 0, + 0, 0, 1); } } @@ -895,13 +1656,13 @@ public: // same as looking down from positive axis values towards origin. // LHS: Positive angle values rotate clock-wise (CW), while looking in the // negative axis direction. - static Matrix4f RotationX(float angle) + static Matrix4 RotationX(T angle) { - float sina = sin(angle); - float cosa = cos(angle); - return Matrix4f(1, 0, 0, - 0, cosa, -sina, - 0, sina, cosa); + T sina = sin(angle); + T cosa = cos(angle); + return Matrix4(1, 0, 0, + 0, cosa, -sina, + 0, sina, cosa); } // Creates a rotation matrix rotating around the Y axis by 'angle' radians. @@ -911,13 +1672,13 @@ public: // same as looking down from positive axis values towards origin. // LHS: Positive angle values rotate clock-wise (CW), while looking in the // negative axis direction. - static Matrix4f RotationY(float angle) + static Matrix4 RotationY(T angle) { - float sina = sin(angle); - float cosa = cos(angle); - return Matrix4f(cosa, 0, sina, - 0, 1, 0, - -sina, 0, cosa); + T sina = sin(angle); + T cosa = cos(angle); + return Matrix4(cosa, 0, sina, + 0, 1, 0, + -sina, 0, cosa); } // Creates a rotation matrix rotating around the Z axis by 'angle' radians. @@ -927,27 +1688,47 @@ public: // same as looking down from positive axis values towards origin. // LHS: Positive angle values rotate clock-wise (CW), while looking in the // negative axis direction. - static Matrix4f RotationZ(float angle) + static Matrix4 RotationZ(T angle) { - float sina = sin(angle); - float cosa = cos(angle); - return Matrix4f(cosa, -sina, 0, - sina, cosa, 0, - 0, 0, 1); + T sina = sin(angle); + T cosa = cos(angle); + return Matrix4(cosa, -sina, 0, + sina, cosa, 0, + 0, 0, 1); } - // LookAtRH creates a View transformation matrix for right-handed coordinate system. // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up' // specifying the up vector. The resulting matrix should be used with PerspectiveRH // projection. - static Matrix4f LookAtRH(const Vector3f& eye, const Vector3f& at, const Vector3f& up); - + static Matrix4 LookAtRH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up) + { + Vector3<T> z = (eye - at).Normalized(); // Forward + Vector3<T> x = up.Cross(z).Normalized(); // Right + Vector3<T> y = z.Cross(x); + + Matrix4 m(x.x, x.y, x.z, -(x.Dot(eye)), + y.x, y.y, y.z, -(y.Dot(eye)), + z.x, z.y, z.z, -(z.Dot(eye)), + 0, 0, 0, 1 ); + return m; + } + // LookAtLH creates a View transformation matrix for left-handed coordinate system. // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up' // specifying the up vector. - static Matrix4f LookAtLH(const Vector3f& eye, const Vector3f& at, const Vector3f& up); - + static Matrix4 LookAtLH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up) + { + Vector3<T> z = (at - eye).Normalized(); // Forward + Vector3<T> x = up.Cross(z).Normalized(); // Right + Vector3<T> y = z.Cross(x); + + Matrix4 m(x.x, x.y, x.z, -(x.Dot(eye)), + y.x, y.y, y.z, -(y.Dot(eye)), + z.x, z.y, z.z, -(z.Dot(eye)), + 0, 0, 0, 1 ); + return m; + } // PerspectiveRH creates a right-handed perspective projection matrix that can be // used with the Oculus sample renderer. @@ -958,8 +1739,22 @@ public: // zfar - Absolute value of far Z clipping clipping range (larger then near). // Even though RHS usually looks in the direction of negative Z, positive values // are expected for znear and zfar. - static Matrix4f PerspectiveRH(float yfov, float aspect, float znear, float zfar); - + static Matrix4 PerspectiveRH(T yfov, T aspect, T znear, T zfar) + { + Matrix4 m; + T tanHalfFov = tan(yfov * 0.5f); + + m.M[0][0] = 1 / (aspect * tanHalfFov); + m.M[1][1] = 1 / tanHalfFov; + m.M[2][2] = zfar / (zfar - znear); + m.M[3][2] = 1; + m.M[2][3] = (zfar * znear) / (znear - zfar); + m.M[3][3] = 0; + + // Note: Post-projection matrix result assumes Left-Handed coordinate system, + // with Y up, X right and Z forward. This supports positive z-buffer values. + return m; + } // PerspectiveRH creates a left-handed perspective projection matrix that can be // used with the Oculus sample renderer. @@ -968,350 +1763,601 @@ public: // Note that xfov = yfov * aspect. // znear - Absolute value of near Z clipping clipping range. // zfar - Absolute value of far Z clipping clipping range (larger then near). - static Matrix4f PerspectiveLH(float yfov, float aspect, float znear, float zfar); - + static Matrix4 PerspectiveLH(T yfov, T aspect, T znear, T zfar) + { + Matrix4 m; + T tanHalfFov = tan(yfov * 0.5f); + + m.M[0][0] = 1.0 / (aspect * tanHalfFov); + m.M[1][1] = 1.0 / tanHalfFov; + m.M[2][2] = zfar / (znear - zfar); + // m.M[2][2] = zfar / (zfar - znear); + m.M[3][2] = -1.0; + m.M[2][3] = (zfar * znear) / (znear - zfar); + m.M[3][3] = 0.0; + + // Note: Post-projection matrix result assumes Left-Handed coordinate system, + // with Y up, X right and Z forward. This supports positive z-buffer values. + // This is the case even for RHS cooridnate input. + return m; + } - static Matrix4f Ortho2D(float w, float h); + static Matrix4 Ortho2D(T w, T h) + { + Matrix4 m; + m.M[0][0] = 2.0/w; + m.M[1][1] = -2.0/h; + m.M[0][3] = -1.0; + m.M[1][3] = 1.0; + m.M[2][2] = 0; + return m; + } }; +typedef Matrix4<float> Matrix4f; +typedef Matrix4<double> Matrix4d; -//-------------------------------------------------------------------------------------// -// **************************************** Quat **************************************// +//------------------------------------------------------------------------------------- +// ***** Matrix3 // -// Quatf represents a quaternion class used for rotations. -// -// Quaternion multiplications are done in right-to-left order, to match the -// behavior of matrices. +// Matrix3 is a 3x3 matrix used for representing a rotation matrix. +// The matrix is stored in row-major order in memory, meaning that values +// of the first row are stored before the next one. +// +// The arrangement of the matrix is chosen to be in Right-Handed +// coordinate system and counterclockwise rotations when looking down +// the axis +// +// Transformation Order: +// - Transformations are applied from right to left, so the expression +// M1 * M2 * M3 * V means that the vector V is transformed by M3 first, +// followed by M2 and M1. +// +// Coordinate system: Right Handed +// +// Rotations: Counterclockwise when looking down the axis. All angles are in radians. +template<typename T> +class SymMat3; template<class T> -class Quat +class Matrix3 { + static const Matrix3 IdentityValue; + public: - // w + Xi + Yj + Zk - T x, y, z, w; + T M[3][3]; - Quat() : x(0), y(0), z(0), w(1) {} - Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) {} + enum NoInitType { NoInit }; + // Construct with no memory initialization. + Matrix3(NoInitType) { } - // Constructs quaternion for rotation around the axis by an angle. - Quat(const Vector3<T>& axis, T angle) - { - Vector3<T> unitAxis = axis.Normalized(); - T sinHalfAngle = sin(angle * T(0.5)); + // By default, we construct identity matrix. + Matrix3() + { + SetIdentity(); + } - w = cos(angle * T(0.5)); - x = unitAxis.x * sinHalfAngle; - y = unitAxis.y * sinHalfAngle; - z = unitAxis.z * sinHalfAngle; + Matrix3(T m11, T m12, T m13, + T m21, T m22, T m23, + T m31, T m32, T m33) + { + M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; + M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; + M[2][0] = m31; M[2][1] = m32; M[2][2] = m33; + } + + /* + explicit Matrix3(const Quat<T>& q) + { + T ww = q.w*q.w; + T xx = q.x*q.x; + T yy = q.y*q.y; + T zz = q.z*q.z; + + M[0][0] = ww + xx - yy - zz; M[0][1] = 2 * (q.x*q.y - q.w*q.z); M[0][2] = 2 * (q.x*q.z + q.w*q.y); + M[1][0] = 2 * (q.x*q.y + q.w*q.z); M[1][1] = ww - xx + yy - zz; M[1][2] = 2 * (q.y*q.z - q.w*q.x); + M[2][0] = 2 * (q.x*q.z - q.w*q.y); M[2][1] = 2 * (q.y*q.z + q.w*q.x); M[2][2] = ww - xx - yy + zz; + } + */ + + explicit Matrix3(const Quat<T>& q) + { + const T tx = q.x+q.x, ty = q.y+q.y, tz = q.z+q.z; + const T twx = q.w*tx, twy = q.w*ty, twz = q.w*tz; + const T txx = q.x*tx, txy = q.x*ty, txz = q.x*tz; + const T tyy = q.y*ty, tyz = q.y*tz, tzz = q.z*tz; + M[0][0] = T(1) - (tyy + tzz); M[0][1] = txy - twz; M[0][2] = txz + twy; + M[1][0] = txy + twz; M[1][1] = T(1) - (txx + tzz); M[1][2] = tyz - twx; + M[2][0] = txz - twy; M[2][1] = tyz + twx; M[2][2] = T(1) - (txx + tyy); + } + + inline explicit Matrix3(T s) + { + M[0][0] = M[1][1] = M[2][2] = s; + M[0][1] = M[0][2] = M[1][0] = M[1][2] = M[2][0] = M[2][1] = 0; } - // Constructs quaternion for rotation around one of the coordinate axis by an angle. - void AxisAngle(Axis A, T angle, RotateDirection d, HandedSystem s) - { - T sinHalfAngle = s * d *sin(angle * T(0.5)); - T v[3]; - v[0] = v[1] = v[2] = T(0); - v[A] = sinHalfAngle; + explicit Matrix3(const Pose<T>& p) + { + Matrix3 result(p.Orientation); + result.SetTranslation(p.Position); + *this = result; + } - w = cos(angle * T(0.5)); - x = v[0]; - y = v[1]; - z = v[2]; - } + // C-interop support + explicit Matrix3(const Matrix4<typename Math<T>::OtherFloatType> &src) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] = (T)src.M[i][j]; + } + // C-interop support. + Matrix3(const typename CompatibleTypes<Matrix3<T> >::Type& s) + { + OVR_COMPILER_ASSERT(sizeof(s) == sizeof(Matrix3)); + memcpy(M, s.M, sizeof(M)); + } - // Compute axis and angle from quaternion - void GetAxisAngle(Vector3<T>* axis, T* angle) const - { - if ( x*x + y*y + z*z > Math<T>::Tolerance * Math<T>::Tolerance ) { - *axis = Vector3<T>(x, y, z).Normalized(); - *angle = T(2) * Acos(w); - } - else - { - *axis = Vector3<T>(1, 0, 0); - *angle= 0; - } - } + operator const typename CompatibleTypes<Matrix3<T> >::Type () const + { + typename CompatibleTypes<Matrix3<T> >::Type result; + OVR_COMPILER_ASSERT(sizeof(result) == sizeof(Matrix3)); + memcpy(result.M, M, sizeof(M)); + return result; + } - bool operator== (const Quat& b) const { return x == b.x && y == b.y && z == b.z && w == b.w; } - bool operator!= (const Quat& b) const { return x != b.x || y != b.y || z != b.z || w != b.w; } + void ToString(char* dest, UPInt destsize) const + { + UPInt pos = 0; + for (int r=0; r<3; r++) + for (int c=0; c<3; c++) + pos += OVR_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]); + } - Quat operator+ (const Quat& b) const { return Quat(x + b.x, y + b.y, z + b.z, w + b.w); } - Quat& operator+= (const Quat& b) { w += b.w; x += b.x; y += b.y; z += b.z; return *this; } - Quat operator- (const Quat& b) const { return Quat(x - b.x, y - b.y, z - b.z, w - b.w); } - Quat& operator-= (const Quat& b) { w -= b.w; x -= b.x; y -= b.y; z -= b.z; return *this; } + static Matrix3 FromString(const char* src) + { + Matrix3 result; + for (int r=0; r<3; r++) + for (int c=0; c<3; c++) + { + result.M[r][c] = (T)atof(src); + while (src && *src != ' ') + src++; + while (src && *src == ' ') + src++; + } + return result; + } - Quat operator* (T s) const { return Quat(x * s, y * s, z * s, w * s); } - Quat& operator*= (T s) { w *= s; x *= s; y *= s; z *= s; return *this; } - Quat operator/ (T s) const { T rcp = T(1)/s; return Quat(x * rcp, y * rcp, z * rcp, w *rcp); } - Quat& operator/= (T s) { T rcp = T(1)/s; w *= rcp; x *= rcp; y *= rcp; z *= rcp; return *this; } + static const Matrix3& Identity() { return IdentityValue; } + void SetIdentity() + { + M[0][0] = M[1][1] = M[2][2] = 1; + M[0][1] = M[1][0] = M[2][0] = 0; + M[0][2] = M[1][2] = M[2][1] = 0; + } - // Get Imaginary part vector - Vector3<T> Imag() const { return Vector3<T>(x,y,z); } + bool operator== (const Matrix3& b) const + { + bool isEqual = true; + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + isEqual &= (M[i][j] == b.M[i][j]); - // Get quaternion length. - T Length() const { return sqrt(x * x + y * y + z * z + w * w); } - // Get quaternion length squared. - T LengthSq() const { return (x * x + y * y + z * z + w * w); } + return isEqual; + } - // Simple Eulidean distance in R^4 (not SLERP distance, but at least respects Haar measure) - T Distance(const Quat& q) const - { - T d1 = (*this - q).Length(); - T d2 = (*this + q).Length(); // Antipodal point check - return (d1 < d2) ? d1 : d2; + Matrix3 operator+ (const Matrix3& b) const + { + Matrix4<T> result(*this); + result += b; + return result; } - T DistanceSq(const Quat& q) const - { - T d1 = (*this - q).LengthSq(); - T d2 = (*this + q).LengthSq(); // Antipodal point check - return (d1 < d2) ? d1 : d2; - } + Matrix3& operator+= (const Matrix3& b) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] += b.M[i][j]; + return *this; + } - // Normalize - bool IsNormalized() const { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance; } + void operator= (const Matrix3& b) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] = b.M[i][j]; + return; + } - void Normalize() + void operator= (const SymMat3<T>& b) { - T l = Length(); - OVR_ASSERT(l != T(0)); - *this /= l; + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] = 0; + + M[0][0] = b.v[0]; + M[0][1] = b.v[1]; + M[0][2] = b.v[2]; + M[1][1] = b.v[3]; + M[1][2] = b.v[4]; + M[2][2] = b.v[5]; + + return; } - Quat Normalized() const - { - T l = Length(); - OVR_ASSERT(l != T(0)); - return *this / l; + Matrix3 operator- (const Matrix3& b) const + { + Matrix3 result(*this); + result -= b; + return result; } - // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized. - Quat Conj() const { return Quat(-x, -y, -z, w); } + Matrix3& operator-= (const Matrix3& b) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] -= b.M[i][j]; + return *this; + } - // Quaternion multiplication. Combines quaternion rotations, performing the one on the - // right hand side first. - Quat operator* (const Quat& b) const { return Quat(w * b.x + x * b.w + y * b.z - z * b.y, - w * b.y - x * b.z + y * b.w + z * b.x, - w * b.z + x * b.y - y * b.x + z * b.w, - w * b.w - x * b.x - y * b.y - z * b.z); } + // Multiplies two matrices into destination with minimum copying. + static Matrix3& Multiply(Matrix3* d, const Matrix3& a, const Matrix3& b) + { + OVR_ASSERT((d != &a) && (d != &b)); + int i = 0; + do { + d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0]; + d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1]; + d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2]; + } while((++i) < 3); + + return *d; + } - // - // this^p normalized; same as rotating by this p times. - Quat PowNormalized(T p) const - { - Vector3<T> v; - T a; - GetAxisAngle(&v, &a); - return Quat(v, a * p); - } - - // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise, - // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1. - Vector3<T> Rotate(const Vector3<T>& v) const - { - return ((*this * Quat<T>(v.x, v.y, v.z, T(0))) * Inverted()).Imag(); - } + Matrix3 operator* (const Matrix3& b) const + { + Matrix3 result(Matrix3::NoInit); + Multiply(&result, *this, b); + return result; + } - - // Inversed quaternion rotates in the opposite direction. - Quat Inverted() const - { - return Quat(-x, -y, -z, w); - } + Matrix3& operator*= (const Matrix3& b) + { + return Multiply(this, Matrix3(*this), b); + } - // Sets this quaternion to the one rotates in the opposite direction. - void Invert() - { - *this = Quat(-x, -y, -z, w); - } - - // Converting quaternion to matrix. - operator Matrix4f() const - { - T ww = w*w; - T xx = x*x; - T yy = y*y; - T zz = z*z; + Matrix3 operator* (T s) const + { + Matrix3 result(*this); + result *= s; + return result; + } - return Matrix4f(float(ww + xx - yy - zz), float(T(2) * (x*y - w*z)), float(T(2) * (x*z + w*y)), - float(T(2) * (x*y + w*z)), float(ww - xx + yy - zz), float(T(2) * (y*z - w*x)), - float(T(2) * (x*z - w*y)), float(T(2) * (y*z + w*x)), float(ww - xx - yy + zz) ); - } + Matrix3& operator*= (T s) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] *= s; + return *this; + } + Vector3<T> operator* (const Vector3<T> &b) const + { + Vector3<T> result; + result.x = M[0][0]*b.x + M[0][1]*b.y + M[0][2]*b.z; + result.y = M[1][0]*b.x + M[1][1]*b.y + M[1][2]*b.z; + result.z = M[2][0]*b.x + M[2][1]*b.y + M[2][2]*b.z; + + return result; + } - // Converting matrix to quaternion - static Quat<T> Matrix4fToQuat(const Matrix4f& m) + Matrix3 operator/ (T s) const { - T trace = m.M[0][0] + m.M[1][1] + m.M[2][2]; - Quat<T> q; + Matrix3 result(*this); + result /= s; + return result; + } - // In almost all cases, the first part is executed. - // However, if the trace is not positive, the other - // cases arise. - if (trace > T(0)) - { - T s = sqrt(trace + T(1)) * T(2); // s=4*qw - q.w = T(0.25) * s; - q.x = (m.M[2][1] - m.M[1][2]) / s; - q.y = (m.M[0][2] - m.M[2][0]) / s; - q.z = (m.M[1][0] - m.M[0][1]) / s; - } - else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) - { - T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2); - q.w = (m.M[2][1] - m.M[1][2]) / s; - q.x = T(0.25) * s; - q.y = (m.M[0][1] + m.M[1][0]) / s; - q.z = (m.M[2][0] + m.M[0][2]) / s; - } - else if (m.M[1][1] > m.M[2][2]) - { - T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy - q.w = (m.M[0][2] - m.M[2][0]) / s; - q.x = (m.M[0][1] + m.M[1][0]) / s; - q.y = T(0.25) * s; - q.z = (m.M[1][2] + m.M[2][1]) / s; - } - else - { - T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz - q.w = (m.M[1][0] - m.M[0][1]) / s; - q.x = (m.M[0][2] + m.M[2][0]) / s; - q.y = (m.M[1][2] + m.M[2][1]) / s; - q.z = T(0.25) * s; - } - return q; + Matrix3& operator/= (T s) + { + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) + M[i][j] /= s; + return *this; } + Vector3<T> Transform(const Vector3<T>& v) const + { + return Vector3<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z, + M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z, + M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z); + } - - // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of - // axis rotations and the specified coordinate system. Right-handed coordinate system - // is the default, with CCW rotations while looking in the negative axis direction. - // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. - // rotation a around axis A1 - // is followed by rotation b around axis A2 - // is followed by rotation c around axis A3 - // rotations are CCW or CW (D) in LH or RH coordinate system (S) - template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S> - void GetEulerAngles(T *a, T *b, T *c) + Matrix3 Transposed() const + { + return Matrix3(M[0][0], M[1][0], M[2][0], + M[0][1], M[1][1], M[2][1], + M[0][2], M[1][2], M[2][2]); + } + + void Transpose() + { + *this = Transposed(); + } + + + T SubDet (const UPInt* rows, const UPInt* cols) const + { + return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]]) + - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]]) + + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]); + } + + // M += a*b.t() + inline void Rank1Add(const Vector3<T> &a, const Vector3<T> &b) + { + M[0][0] += a.x*b.x; M[0][1] += a.x*b.y; M[0][2] += a.x*b.z; + M[1][0] += a.y*b.x; M[1][1] += a.y*b.y; M[1][2] += a.y*b.z; + M[2][0] += a.z*b.x; M[2][1] += a.z*b.y; M[2][2] += a.z*b.z; + } + + // M -= a*b.t() + inline void Rank1Sub(const Vector3<T> &a, const Vector3<T> &b) + { + M[0][0] -= a.x*b.x; M[0][1] -= a.x*b.y; M[0][2] -= a.x*b.z; + M[1][0] -= a.y*b.x; M[1][1] -= a.y*b.y; M[1][2] -= a.y*b.z; + M[2][0] -= a.z*b.x; M[2][1] -= a.z*b.y; M[2][2] -= a.z*b.z; + } + + inline Vector3<T> Col(int c) const + { + return Vector3<T>(M[0][c], M[1][c], M[2][c]); + } + + inline Vector3<T> Row(int r) const + { + return Vector3<T>(M[r][0], M[r][1], M[r][2]); + } + + inline T Determinant() const + { + const Matrix3<T>& m = *this; + T d; + + d = m.M[0][0] * (m.M[1][1]*m.M[2][2] - m.M[1][2] * m.M[2][1]); + d -= m.M[0][1] * (m.M[1][0]*m.M[2][2] - m.M[1][2] * m.M[2][0]); + d += m.M[0][2] * (m.M[1][0]*m.M[2][1] - m.M[1][1] * m.M[2][0]); + + return d; + } + + inline Matrix3<T> Inverse() const { - OVR_COMPILER_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3)); + Matrix3<T> a; + const Matrix3<T>& m = *this; + T d = Determinant(); - T Q[3] = { x, y, z }; //Quaternion components x,y,z + assert(d != 0); + T s = T(1)/d; - T ww = w*w; - T Q11 = Q[A1]*Q[A1]; - T Q22 = Q[A2]*Q[A2]; - T Q33 = Q[A3]*Q[A3]; + a.M[0][0] = s * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]); + a.M[1][0] = s * (m.M[1][2] * m.M[2][0] - m.M[1][0] * m.M[2][2]); + a.M[2][0] = s * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]); - T psign = T(-1); - // Determine whether even permutation - if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) - psign = T(1); + a.M[0][1] = s * (m.M[0][2] * m.M[2][1] - m.M[0][1] * m.M[2][2]); + a.M[1][1] = s * (m.M[0][0] * m.M[2][2] - m.M[0][2] * m.M[2][0]); + a.M[2][1] = s * (m.M[0][1] * m.M[2][0] - m.M[0][0] * m.M[2][1]); - T s2 = psign * T(2) * (psign*w*Q[A2] + Q[A1]*Q[A3]); - - if (s2 < T(-1) + Math<T>::SingularityRadius) - { // South pole singularity - *a = T(0); - *b = -S*D*Math<T>::PiOver2; - *c = S*D*atan2(T(2)*(psign*Q[A1]*Q[A2] + w*Q[A3]), - ww + Q22 - Q11 - Q33 ); - } - else if (s2 > T(1) - Math<T>::SingularityRadius) - { // North pole singularity - *a = T(0); - *b = S*D*Math<T>::PiOver2; - *c = S*D*atan2(T(2)*(psign*Q[A1]*Q[A2] + w*Q[A3]), - ww + Q22 - Q11 - Q33); - } - else - { - *a = -S*D*atan2(T(-2)*(w*Q[A1] - psign*Q[A2]*Q[A3]), - ww + Q33 - Q11 - Q22); - *b = S*D*asin(s2); - *c = S*D*atan2(T(2)*(w*Q[A3] - psign*Q[A1]*Q[A2]), - ww + Q11 - Q22 - Q33); - } - return; + a.M[0][2] = s * (m.M[0][1] * m.M[1][2] - m.M[0][2] * m.M[1][1]); + a.M[1][2] = s * (m.M[0][2] * m.M[1][0] - m.M[0][0] * m.M[1][2]); + a.M[2][2] = s * (m.M[0][0] * m.M[1][1] - m.M[0][1] * m.M[1][0]); + + return a; } + +}; - template <Axis A1, Axis A2, Axis A3, RotateDirection D> - void GetEulerAngles(T *a, T *b, T *c) - { GetEulerAngles<A1, A2, A3, D, Handed_R>(a, b, c); } +typedef Matrix3<float> Matrix3f; +typedef Matrix3<double> Matrix3d; - template <Axis A1, Axis A2, Axis A3> - void GetEulerAngles(T *a, T *b, T *c) - { GetEulerAngles<A1, A2, A3, Rotate_CCW, Handed_R>(a, b, c); } +//------------------------------------------------------------------------------------- +template<typename T> +class SymMat3 +{ +private: + typedef SymMat3<T> this_type; - // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of - // axis rotations and the specified coordinate system. Right-handed coordinate system - // is the default, with CCW rotations while looking in the negative axis direction. - // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned. - // rotation a around axis A1 - // is followed by rotation b around axis A2 - // is followed by rotation c around axis A1 - // Rotations are CCW or CW (D) in LH or RH coordinate system (S) - template <Axis A1, Axis A2, RotateDirection D, HandedSystem S> - void GetEulerAnglesABA(T *a, T *b, T *c) - { - OVR_COMPILER_ASSERT(A1 != A2); +public: + typedef T Value_t; + // Upper symmetric + T v[6]; // _00 _01 _02 _11 _12 _22 - T Q[3] = {x, y, z}; // Quaternion components + inline SymMat3() {} - // Determine the missing axis that was not supplied - int m = 3 - A1 - A2; + inline explicit SymMat3(T s) + { + v[0] = v[3] = v[5] = s; + v[1] = v[2] = v[4] = 0; + } - T ww = w*w; - T Q11 = Q[A1]*Q[A1]; - T Q22 = Q[A2]*Q[A2]; - T Qmm = Q[m]*Q[m]; + inline explicit SymMat3(T a00, T a01, T a02, T a11, T a12, T a22) + { + v[0] = a00; v[1] = a01; v[2] = a02; + v[3] = a11; v[4] = a12; + v[5] = a22; + } - T psign = T(-1); - if ((A1 + 1) % 3 == A2) // Determine whether even permutation - { - psign = T(1); - } + static inline int Index(unsigned int i, unsigned int j) + { + return (i <= j) ? (3*i - i*(i+1)/2 + j) : (3*j - j*(j+1)/2 + i); + } - T c2 = ww + Q11 - Q22 - Qmm; - if (c2 < T(-1) + Math<T>::SingularityRadius) - { // South pole singularity - *a = T(0); - *b = S*D*Math<T>::Pi; - *c = S*D*atan2( T(2)*(w*Q[A1] - psign*Q[A2]*Q[m]), - ww + Q22 - Q11 - Qmm); - } - else if (c2 > T(1) - Math<T>::SingularityRadius) - { // North pole singularity - *a = T(0); - *b = T(0); - *c = S*D*atan2( T(2)*(w*Q[A1] - psign*Q[A2]*Q[m]), - ww + Q22 - Q11 - Qmm); - } - else - { - *a = S*D*atan2( psign*w*Q[m] + Q[A1]*Q[A2], - w*Q[A2] -psign*Q[A1]*Q[m]); - *b = S*D*acos(c2); - *c = S*D*atan2( -psign*w*Q[m] + Q[A1]*Q[A2], - w*Q[A2] + psign*Q[A1]*Q[m]); - } - return; - } + inline T operator()(int i, int j) const { return v[Index(i,j)]; } + + inline T &operator()(int i, int j) { return v[Index(i,j)]; } -}; + template<typename U> + inline SymMat3<U> CastTo() const + { + return SymMat3<U>(static_cast<U>(v[0]), static_cast<U>(v[1]), static_cast<U>(v[2]), + static_cast<U>(v[3]), static_cast<U>(v[4]), static_cast<U>(v[5])); + } -typedef Quat<float> Quatf; -typedef Quat<double> Quatd; + inline this_type& operator+=(const this_type& b) + { + v[0]+=b.v[0]; + v[1]+=b.v[1]; + v[2]+=b.v[2]; + v[3]+=b.v[3]; + v[4]+=b.v[4]; + v[5]+=b.v[5]; + return *this; + } + + inline this_type& operator-=(const this_type& b) + { + v[0]-=b.v[0]; + v[1]-=b.v[1]; + v[2]-=b.v[2]; + v[3]-=b.v[3]; + v[4]-=b.v[4]; + v[5]-=b.v[5]; + + return *this; + } + + inline this_type& operator*=(T s) + { + v[0]*=s; + v[1]*=s; + v[2]*=s; + v[3]*=s; + v[4]*=s; + v[5]*=s; + + return *this; + } + + inline SymMat3 operator*(T s) const + { + SymMat3 d; + d.v[0] = v[0]*s; + d.v[1] = v[1]*s; + d.v[2] = v[2]*s; + d.v[3] = v[3]*s; + d.v[4] = v[4]*s; + d.v[5] = v[5]*s; + + return d; + } + + // Multiplies two matrices into destination with minimum copying. + static SymMat3& Multiply(SymMat3* d, const SymMat3& a, const SymMat3& b) + { + // _00 _01 _02 _11 _12 _22 + + d->v[0] = a.v[0] * b.v[0]; + d->v[1] = a.v[0] * b.v[1] + a.v[1] * b.v[3]; + d->v[2] = a.v[0] * b.v[2] + a.v[1] * b.v[4]; + + d->v[3] = a.v[3] * b.v[3]; + d->v[4] = a.v[3] * b.v[4] + a.v[4] * b.v[5]; + + d->v[5] = a.v[5] * b.v[5]; + + return *d; + } + + inline T Determinant() const + { + const this_type& m = *this; + T d; + + d = m(0,0) * (m(1,1)*m(2,2) - m(1,2) * m(2,1)); + d -= m(0,1) * (m(1,0)*m(2,2) - m(1,2) * m(2,0)); + d += m(0,2) * (m(1,0)*m(2,1) - m(1,1) * m(2,0)); + + return d; + } + + inline this_type Inverse() const + { + this_type a; + const this_type& m = *this; + T d = Determinant(); + + assert(d != 0); + T s = T(1)/d; + + a(0,0) = s * (m(1,1) * m(2,2) - m(1,2) * m(2,1)); + + a(0,1) = s * (m(0,2) * m(2,1) - m(0,1) * m(2,2)); + a(1,1) = s * (m(0,0) * m(2,2) - m(0,2) * m(2,0)); + + a(0,2) = s * (m(0,1) * m(1,2) - m(0,2) * m(1,1)); + a(1,2) = s * (m(0,2) * m(1,0) - m(0,0) * m(1,2)); + a(2,2) = s * (m(0,0) * m(1,1) - m(0,1) * m(1,0)); + + return a; + } + inline T Trace() const { return v[0] + v[3] + v[5]; } + // M = a*a.t() + inline void Rank1(const Vector3<T> &a) + { + v[0] = a.x*a.x; v[1] = a.x*a.y; v[2] = a.x*a.z; + v[3] = a.y*a.y; v[4] = a.y*a.z; + v[5] = a.z*a.z; + } + + // M += a*a.t() + inline void Rank1Add(const Vector3<T> &a) + { + v[0] += a.x*a.x; v[1] += a.x*a.y; v[2] += a.x*a.z; + v[3] += a.y*a.y; v[4] += a.y*a.z; + v[5] += a.z*a.z; + } + + // M -= a*a.t() + inline void Rank1Sub(const Vector3<T> &a) + { + v[0] -= a.x*a.x; v[1] -= a.x*a.y; v[2] -= a.x*a.z; + v[3] -= a.y*a.y; v[4] -= a.y*a.z; + v[5] -= a.z*a.z; + } +}; + +typedef SymMat3<float> SymMat3f; +typedef SymMat3<double> SymMat3d; + +template<typename T> +inline Matrix3<T> operator*(const SymMat3<T>& a, const SymMat3<T>& b) +{ + #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c)) + return Matrix3<T>( + AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2), + AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2), + AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2)); + #undef AJB_ARBC +} + +template<typename T> +inline Matrix3<T> operator*(const Matrix3<T>& a, const SymMat3<T>& b) +{ + #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c)) + return Matrix3<T>( + AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2), + AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2), + AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2)); + #undef AJB_ARBC +} //------------------------------------------------------------------------------------- // ***** Angle diff --git a/LibOVR/Src/Kernel/OVR_RefCount.cpp b/LibOVR/Src/Kernel/OVR_RefCount.cpp index 8bb80ef..c6301ed 100644 --- a/LibOVR/Src/Kernel/OVR_RefCount.cpp +++ b/LibOVR/Src/Kernel/OVR_RefCount.cpp @@ -5,16 +5,16 @@ Content : Reference counting implementation Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_RefCount.h b/LibOVR/Src/Kernel/OVR_RefCount.h index 8f2e3ad..775e24c 100644 --- a/LibOVR/Src/Kernel/OVR_RefCount.h +++ b/LibOVR/Src/Kernel/OVR_RefCount.h @@ -6,16 +6,16 @@ Content : Reference counting implementation headers Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -62,7 +62,7 @@ class RefCountNTSImpl; class RefCountImplCore { protected: - volatile int RefCount; + volatile int RefCount; public: // RefCountImpl constructor always initializes RefCount to 1 by default. @@ -145,7 +145,7 @@ public: // RefCountVImpl provides Thread-Safe implementation of reference counting, plus, // virtual AddRef and Release. -class RefCountVImpl : public RefCountImplCore +class RefCountVImpl : virtual public RefCountImplCore { public: // Thread-Safe Ref-Count Implementation. @@ -194,6 +194,8 @@ public: // Redefine all new & delete operators. OVR_MEMORY_REDEFINE_NEW_IMPL(Base, OVR_REFCOUNTALLOC_CHECK_DELETE) +#undef OVR_REFCOUNTALLOC_CHECK_DELETE + #ifdef OVR_DEFINE_NEW #define new OVR_DEFINE_NEW #endif @@ -202,6 +204,35 @@ public: }; +template<class Base> +class RefCountBaseStatVImpl : virtual public Base +{ +public: + RefCountBaseStatVImpl() { } + + // *** Override New and Delete + + // DOM-IGNORE-BEGIN + // Undef new temporarily if it is being redefined +#ifdef OVR_DEFINE_NEW +#undef new +#endif + +#define OVR_REFCOUNTALLOC_CHECK_DELETE(class_name, p) + + // Redefine all new & delete operators. + OVR_MEMORY_REDEFINE_NEW_IMPL(Base, OVR_REFCOUNTALLOC_CHECK_DELETE) + +#undef OVR_REFCOUNTALLOC_CHECK_DELETE + +#ifdef OVR_DEFINE_NEW +#define new OVR_DEFINE_NEW +#endif + // OVR_BUILD_DEFINE_NEW + // DOM-IGNORE-END +}; + + //----------------------------------------------------------------------------------- // *** End user RefCountBase<> classes @@ -225,11 +256,11 @@ public: // RefCountBaseV is the same as RefCountBase but with virtual AddRef/Release template<class C> -class RefCountBaseV : public RefCountBaseStatImpl<RefCountVImpl> +class RefCountBaseV : virtual public RefCountBaseStatVImpl<RefCountVImpl> { public: // Constructor. - OVR_FORCE_INLINE RefCountBaseV() : RefCountBaseStatImpl<RefCountVImpl>() { } + OVR_FORCE_INLINE RefCountBaseV() : RefCountBaseStatVImpl<RefCountVImpl>() { } }; diff --git a/LibOVR/Src/Kernel/OVR_Std.cpp b/LibOVR/Src/Kernel/OVR_Std.cpp index 4159652..6b5be18 100644 --- a/LibOVR/Src/Kernel/OVR_Std.cpp +++ b/LibOVR/Src/Kernel/OVR_Std.cpp @@ -5,16 +5,16 @@ Content : Standard C function implementation Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Std.h b/LibOVR/Src/Kernel/OVR_Std.h index 4ec8d65..c11f853 100644 --- a/LibOVR/Src/Kernel/OVR_Std.h +++ b/LibOVR/Src/Kernel/OVR_Std.h @@ -6,16 +6,16 @@ Content : Standard C function interface Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -229,7 +229,7 @@ inline long OVR_CDECL OVR_strtol(const char* string, char** tailptr, int radix) return strtol(string, tailptr, radix); } -inline unsigned long OVR_CDECL OVR_strtoul(const char* string, char** tailptr, int radix) +inline long OVR_CDECL OVR_strtoul(const char* string, char** tailptr, int radix) { return strtoul(string, tailptr, radix); } @@ -281,7 +281,7 @@ inline UPInt OVR_CDECL OVR_sprintf(char *dest, UPInt destsize, const char* forma { va_list argList; va_start(argList,format); - SInt32 ret; + UPInt ret; #if defined(OVR_CC_MSVC) #if defined(OVR_MSVC_SAFESTRING) ret = _vsnprintf_s(dest, destsize, _TRUNCATE, format, argList); @@ -298,7 +298,7 @@ inline UPInt OVR_CDECL OVR_sprintf(char *dest, UPInt destsize, const char* forma OVR_ASSERT(ret < destsize); #endif va_end(argList); - return (UPInt)ret; + return ret; } inline UPInt OVR_CDECL OVR_vsprintf(char *dest, UPInt destsize, const char * format, va_list argList) diff --git a/LibOVR/Src/Kernel/OVR_String.cpp b/LibOVR/Src/Kernel/OVR_String.cpp index 8c72c6d..86aa126 100644 --- a/LibOVR/Src/Kernel/OVR_String.cpp +++ b/LibOVR/Src/Kernel/OVR_String.cpp @@ -6,16 +6,16 @@ Content : String UTF8 string implementation with copy-on-write semantics Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_String.h b/LibOVR/Src/Kernel/OVR_String.h index 6c09178..f7151c7 100644 --- a/LibOVR/Src/Kernel/OVR_String.h +++ b/LibOVR/Src/Kernel/OVR_String.h @@ -7,16 +7,16 @@ Content : String UTF8 string implementation with copy-on-write semantics Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -236,7 +236,7 @@ public: // String& Insert(const UInt32* substr, UPInt posAt, SPInt size = -1); // Get Byte index of the character at position = index - UPInt GetByteIndex(UPInt index) const { return (UPInt)UTF8Util::GetByteIndex(static_cast<SPInt>(index), GetData()->Data); } + UPInt GetByteIndex(UPInt index) const { return (UPInt)UTF8Util::GetByteIndex(index, GetData()->Data); } // Utility: case-insensitive string compare. stricmp() & strnicmp() are not // ANSI or POSIX, do not seem to appear in Linux. @@ -286,7 +286,7 @@ public: void operator += (const String& src); void operator += (const char* psrc) { AppendString(psrc); } void operator += (const wchar_t* psrc) { AppendString(psrc); } - void operator += (char ch) { AppendChar( static_cast<UInt32>(ch) ); } + void operator += (char ch) { AppendChar(ch); } String operator + (const char* str) const; String operator + (const String& src) const; @@ -476,10 +476,10 @@ public: void operator = (const String& src); // Addition - void operator += (const String& src) { AppendString(src.ToCStr(),static_cast<SPInt>(src.GetSize())); } + void operator += (const String& src) { AppendString(src.ToCStr(),src.GetSize()); } void operator += (const char* psrc) { AppendString(psrc); } void operator += (const wchar_t* psrc) { AppendString(psrc); } - void operator += (char ch) { AppendChar( static_cast<SPInt>(ch) ); } + void operator += (char ch) { AppendChar(ch); } //String operator + (const char* str) const ; //String operator + (const String& src) const ; diff --git a/LibOVR/Src/Kernel/OVR_StringHash.h b/LibOVR/Src/Kernel/OVR_StringHash.h index 90679e0..baa80a7 100644 --- a/LibOVR/Src/Kernel/OVR_StringHash.h +++ b/LibOVR/Src/Kernel/OVR_StringHash.h @@ -7,16 +7,16 @@ Content : String hash table used when optional case-insensitive Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_String_FormatUtil.cpp b/LibOVR/Src/Kernel/OVR_String_FormatUtil.cpp index e0db5d4..e196dd7 100644 --- a/LibOVR/Src/Kernel/OVR_String_FormatUtil.cpp +++ b/LibOVR/Src/Kernel/OVR_String_FormatUtil.cpp @@ -5,16 +5,16 @@ Content : String format functions. Created : February 27, 2013 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_String_PathUtil.cpp b/LibOVR/Src/Kernel/OVR_String_PathUtil.cpp index 4a7e87a..02abe15 100644 --- a/LibOVR/Src/Kernel/OVR_String_PathUtil.cpp +++ b/LibOVR/Src/Kernel/OVR_String_PathUtil.cpp @@ -5,16 +5,16 @@ Content : String filename/url helper function Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_SysFile.cpp b/LibOVR/Src/Kernel/OVR_SysFile.cpp index cdbc843..f487492 100644 --- a/LibOVR/Src/Kernel/OVR_SysFile.cpp +++ b/LibOVR/Src/Kernel/OVR_SysFile.cpp @@ -6,16 +6,16 @@ Content : File wrapper class implementation (Win32) Created : April 5, 1999 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -83,7 +83,7 @@ SysFile::SysFile() : DelegatedFile(0) pFile = *new UnopenedFile; } -File* FileFILEOpen(const String& path, int flags, int mode); +Ptr<File> FileFILEOpen(const String& path, int flags, int mode); // Opens a file SysFile::SysFile(const String& path, int flags, int mode) : DelegatedFile(0) @@ -96,7 +96,7 @@ SysFile::SysFile(const String& path, int flags, int mode) : DelegatedFile(0) // Will fail if file's already open bool SysFile::Open(const String& path, int flags, int mode) { - pFile = *FileFILEOpen(path, flags, mode); + pFile = FileFILEOpen(path, flags, mode); if ((!pFile) || (!pFile->IsValid())) { pFile = *new UnopenedFile; diff --git a/LibOVR/Src/Kernel/OVR_SysFile.h b/LibOVR/Src/Kernel/OVR_SysFile.h index 3241e67..d492377 100644 --- a/LibOVR/Src/Kernel/OVR_SysFile.h +++ b/LibOVR/Src/Kernel/OVR_SysFile.h @@ -11,16 +11,16 @@ Notes : errno may not be preserved across use of GBaseFile member functi : Directories cannot be deleted while files opened from them are in use (For the GetFullName function) -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_System.cpp b/LibOVR/Src/Kernel/OVR_System.cpp index e57a663..3144ade 100644 --- a/LibOVR/Src/Kernel/OVR_System.cpp +++ b/LibOVR/Src/Kernel/OVR_System.cpp @@ -6,16 +6,16 @@ Content : General kernel initialization/cleanup, including that Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_System.h b/LibOVR/Src/Kernel/OVR_System.h index 1e4065f..253fe19 100644 --- a/LibOVR/Src/Kernel/OVR_System.h +++ b/LibOVR/Src/Kernel/OVR_System.h @@ -7,16 +7,16 @@ Content : General kernel initialization/cleanup, including that Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Threads.h b/LibOVR/Src/Kernel/OVR_Threads.h index 0585972..e1f5abe 100644 --- a/LibOVR/Src/Kernel/OVR_Threads.h +++ b/LibOVR/Src/Kernel/OVR_Threads.h @@ -6,16 +6,16 @@ Content : Contains thread-related (safe) functionality Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_ThreadsPthread.cpp b/LibOVR/Src/Kernel/OVR_ThreadsPthread.cpp deleted file mode 100644 index bf87f8c..0000000 --- a/LibOVR/Src/Kernel/OVR_ThreadsPthread.cpp +++ /dev/null @@ -1,821 +0,0 @@ -/************************************************************************************ - -PublicHeader: OVR -Filename : OVR_Threads.h -Content : -Created : -Notes : - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -************************************************************************************/ - -#include "OVR_Threads.h" -#include "OVR_Hash.h" - -#ifdef OVR_ENABLE_THREADS - -#include "OVR_Timer.h" -#include "OVR_Log.h" - -#include <pthread.h> -#include <time.h> - -#ifdef OVR_OS_PS3 -#include <sys/sys_time.h> -#include <sys/timer.h> -#include <sys/synchronization.h> -#define sleep(x) sys_timer_sleep(x) -#define usleep(x) sys_timer_usleep(x) -using std::timespec; -#else -#include <unistd.h> -#include <sys/time.h> -#include <errno.h> -#endif - -namespace OVR { - -// ***** Mutex implementation - - -// *** Internal Mutex implementation structure - -class MutexImpl : public NewOverrideBase -{ - // System mutex or semaphore - pthread_mutex_t SMutex; - bool Recursive; - unsigned LockCount; - pthread_t LockedBy; - - friend class WaitConditionImpl; - -public: - // Constructor/destructor - MutexImpl(Mutex* pmutex, bool recursive = 1); - ~MutexImpl(); - - // Locking functions - void DoLock(); - bool TryLock(); - void Unlock(Mutex* pmutex); - // Returns 1 if the mutes is currently locked - bool IsLockedByAnotherThread(Mutex* pmutex); - bool IsSignaled() const; -}; - -pthread_mutexattr_t Lock::RecursiveAttr; -bool Lock::RecursiveAttrInit = 0; - -// *** Constructor/destructor -MutexImpl::MutexImpl(Mutex* pmutex, bool recursive) -{ - Recursive = recursive; - LockCount = 0; - - if (Recursive) - { - if (!Lock::RecursiveAttrInit) - { - pthread_mutexattr_init(&Lock::RecursiveAttr); - pthread_mutexattr_settype(&Lock::RecursiveAttr, PTHREAD_MUTEX_RECURSIVE); - Lock::RecursiveAttrInit = 1; - } - - pthread_mutex_init(&SMutex, &Lock::RecursiveAttr); - } - else - pthread_mutex_init(&SMutex, 0); -} - -MutexImpl::~MutexImpl() -{ - pthread_mutex_destroy(&SMutex); -} - - -// Lock and try lock -void MutexImpl::DoLock() -{ - while (pthread_mutex_lock(&SMutex)); - LockCount++; - LockedBy = pthread_self(); -} - -bool MutexImpl::TryLock() -{ - if (!pthread_mutex_trylock(&SMutex)) - { - LockCount++; - LockedBy = pthread_self(); - return 1; - } - - return 0; -} - -void MutexImpl::Unlock(Mutex* pmutex) -{ - OVR_ASSERT(pthread_self() == LockedBy && LockCount > 0); - - unsigned lockCount; - LockCount--; - lockCount = LockCount; - - pthread_mutex_unlock(&SMutex); -} - -bool MutexImpl::IsLockedByAnotherThread(Mutex* pmutex) -{ - // There could be multiple interpretations of IsLocked with respect to current thread - if (LockCount == 0) - return 0; - if (pthread_self() != LockedBy) - return 1; - return 0; -} - -bool MutexImpl::IsSignaled() const -{ - // An mutex is signaled if it is not locked ANYWHERE - // Note that this is different from IsLockedByAnotherThread function, - // that takes current thread into account - return LockCount == 0; -} - - -// *** Actual Mutex class implementation - -Mutex::Mutex(bool recursive) -{ - // NOTE: RefCount mode already thread-safe for all waitables. - pImpl = new MutexImpl(this, recursive); -} - -Mutex::~Mutex() -{ - delete pImpl; -} - -// Lock and try lock -void Mutex::DoLock() -{ - pImpl->DoLock(); -} -bool Mutex::TryLock() -{ - return pImpl->TryLock(); -} -void Mutex::Unlock() -{ - pImpl->Unlock(this); -} -bool Mutex::IsLockedByAnotherThread() -{ - return pImpl->IsLockedByAnotherThread(this); -} - - - -//----------------------------------------------------------------------------------- -// ***** Event - -bool Event::Wait(unsigned delay) -{ - Mutex::Locker lock(&StateMutex); - - // Do the correct amount of waiting - if (delay == OVR_WAIT_INFINITE) - { - while(!State) - StateWaitCondition.Wait(&StateMutex); - } - else if (delay) - { - if (!State) - StateWaitCondition.Wait(&StateMutex, delay); - } - - bool state = State; - // Take care of temporary 'pulsing' of a state - if (Temporary) - { - Temporary = false; - State = false; - } - return state; -} - -void Event::updateState(bool newState, bool newTemp, bool mustNotify) -{ - Mutex::Locker lock(&StateMutex); - State = newState; - Temporary = newTemp; - if (mustNotify) - StateWaitCondition.NotifyAll(); -} - - - -// ***** Wait Condition Implementation - -// Internal implementation class -class WaitConditionImpl : public NewOverrideBase -{ - pthread_mutex_t SMutex; - pthread_cond_t Condv; - -public: - - // Constructor/destructor - WaitConditionImpl(); - ~WaitConditionImpl(); - - // Release mutex and wait for condition. The mutex is re-aqured after the wait. - bool Wait(Mutex *pmutex, unsigned delay = OVR_WAIT_INFINITE); - - // Notify a condition, releasing at one object waiting - void Notify(); - // Notify a condition, releasing all objects waiting - void NotifyAll(); -}; - - -WaitConditionImpl::WaitConditionImpl() -{ - pthread_mutex_init(&SMutex, 0); - pthread_cond_init(&Condv, 0); -} - -WaitConditionImpl::~WaitConditionImpl() -{ - pthread_mutex_destroy(&SMutex); - pthread_cond_destroy(&Condv); -} - -bool WaitConditionImpl::Wait(Mutex *pmutex, unsigned delay) -{ - bool result = 1; - unsigned lockCount = pmutex->pImpl->LockCount; - - // Mutex must have been locked - if (lockCount == 0) - return 0; - - pthread_mutex_lock(&SMutex); - - // Finally, release a mutex or semaphore - if (pmutex->pImpl->Recursive) - { - // Release the recursive mutex N times - pmutex->pImpl->LockCount = 0; - for(unsigned i=0; i<lockCount; i++) - pthread_mutex_unlock(&pmutex->pImpl->SMutex); - } - else - { - pmutex->pImpl->LockCount = 0; - pthread_mutex_unlock(&pmutex->pImpl->SMutex); - } - - // Note that there is a gap here between mutex.Unlock() and Wait(). - // The other mutex protects this gap. - - if (delay == OVR_WAIT_INFINITE) - pthread_cond_wait(&Condv,&SMutex); - else - { - timespec ts; -#ifdef OVR_OS_PS3 - sys_time_sec_t s; - sys_time_nsec_t ns; - sys_time_get_current_time(&s, &ns); - - ts.tv_sec = s + (delay / 1000); - ts.tv_nsec = ns + (delay % 1000) * 1000000; - -#else - struct timeval tv; - gettimeofday(&tv, 0); - - ts.tv_sec = tv.tv_sec + (delay / 1000); - ts.tv_nsec = (tv.tv_usec + (delay % 1000) * 1000) * 1000; -#endif - if (ts.tv_nsec > 999999999) - { - ts.tv_sec++; - ts.tv_nsec -= 1000000000; - } - int r = pthread_cond_timedwait(&Condv,&SMutex, &ts); - OVR_ASSERT(r == 0 || r == ETIMEDOUT); - if (r) - result = 0; - } - - pthread_mutex_unlock(&SMutex); - - // Re-aquire the mutex - for(unsigned i=0; i<lockCount; i++) - pmutex->DoLock(); - - // Return the result - return result; -} - -// Notify a condition, releasing the least object in a queue -void WaitConditionImpl::Notify() -{ - pthread_mutex_lock(&SMutex); - pthread_cond_signal(&Condv); - pthread_mutex_unlock(&SMutex); -} - -// Notify a condition, releasing all objects waiting -void WaitConditionImpl::NotifyAll() -{ - pthread_mutex_lock(&SMutex); - pthread_cond_broadcast(&Condv); - pthread_mutex_unlock(&SMutex); -} - - - -// *** Actual implementation of WaitCondition - -WaitCondition::WaitCondition() -{ - pImpl = new WaitConditionImpl; -} -WaitCondition::~WaitCondition() -{ - delete pImpl; -} - -bool WaitCondition::Wait(Mutex *pmutex, unsigned delay) -{ - return pImpl->Wait(pmutex, delay); -} -// Notification -void WaitCondition::Notify() -{ - pImpl->Notify(); -} -void WaitCondition::NotifyAll() -{ - pImpl->NotifyAll(); -} - - -// ***** Current thread - -// Per-thread variable -/* -static __thread Thread* pCurrentThread = 0; - -// Static function to return a pointer to the current thread -void Thread::InitCurrentThread(Thread *pthread) -{ - pCurrentThread = pthread; -} - -// Static function to return a pointer to the current thread -Thread* Thread::GetThread() -{ - return pCurrentThread; -} -*/ - - -// *** Thread constructors. - -Thread::Thread(UPInt stackSize, int processor) -{ - // NOTE: RefCount mode already thread-safe for all Waitable objects. - CreateParams params; - params.stackSize = stackSize; - params.processor = processor; - Init(params); -} - -Thread::Thread(Thread::ThreadFn threadFunction, void* userHandle, UPInt stackSize, - int processor, Thread::ThreadState initialState) -{ - CreateParams params(threadFunction, userHandle, stackSize, processor, initialState); - Init(params); -} - -Thread::Thread(const CreateParams& params) -{ - Init(params); -} - -void Thread::Init(const CreateParams& params) -{ - // Clear the variables - ThreadFlags = 0; - ThreadHandle = 0; - ExitCode = 0; - SuspendCount = 0; - StackSize = params.stackSize; - Processor = params.processor; - Priority = params.priority; - - // Clear Function pointers - ThreadFunction = params.threadFunction; - UserHandle = params.userHandle; - if (params.initialState != NotRunning) - Start(params.initialState); -} - -Thread::~Thread() -{ - // Thread should not running while object is being destroyed, - // this would indicate ref-counting issue. - //OVR_ASSERT(IsRunning() == 0); - - // Clean up thread. - ThreadHandle = 0; -} - - - -// *** Overridable User functions. - -// Default Run implementation -int Thread::Run() -{ - // Call pointer to function, if available. - return (ThreadFunction) ? ThreadFunction(this, UserHandle) : 0; -} -void Thread::OnExit() -{ -} - - -// Finishes the thread and releases internal reference to it. -void Thread::FinishAndRelease() -{ - // Note: thread must be US. - ThreadFlags &= (UInt32)~(OVR_THREAD_STARTED); - ThreadFlags |= OVR_THREAD_FINISHED; - - // Release our reference; this is equivalent to 'delete this' - // from the point of view of our thread. - Release(); -} - - - -// *** ThreadList - used to track all created threads - -class ThreadList : public NewOverrideBase -{ - //------------------------------------------------------------------------ - struct ThreadHashOp - { - size_t operator()(const Thread* ptr) - { - return (((size_t)ptr) >> 6) ^ (size_t)ptr; - } - }; - - HashSet<Thread*, ThreadHashOp> ThreadSet; - Mutex ThreadMutex; - WaitCondition ThreadsEmpty; - // Track the root thread that created us. - pthread_t RootThreadId; - - static ThreadList* volatile pRunningThreads; - - void addThread(Thread *pthread) - { - Mutex::Locker lock(&ThreadMutex); - ThreadSet.Add(pthread); - } - - void removeThread(Thread *pthread) - { - Mutex::Locker lock(&ThreadMutex); - ThreadSet.Remove(pthread); - if (ThreadSet.GetSize() == 0) - ThreadsEmpty.Notify(); - } - - void finishAllThreads() - { - // Only original root thread can call this. - OVR_ASSERT(pthread_self() == RootThreadId); - - Mutex::Locker lock(&ThreadMutex); - while (ThreadSet.GetSize() != 0) - ThreadsEmpty.Wait(&ThreadMutex); - } - -public: - - ThreadList() - { - RootThreadId = pthread_self(); - } - ~ThreadList() { } - - - static void AddRunningThread(Thread *pthread) - { - // Non-atomic creation ok since only the root thread - if (!pRunningThreads) - { - pRunningThreads = new ThreadList; - OVR_ASSERT(pRunningThreads); - } - pRunningThreads->addThread(pthread); - } - - // NOTE: 'pthread' might be a dead pointer when this is - // called so it should not be accessed; it is only used - // for removal. - static void RemoveRunningThread(Thread *pthread) - { - OVR_ASSERT(pRunningThreads); - pRunningThreads->removeThread(pthread); - } - - static void FinishAllThreads() - { - // This is ok because only root thread can wait for other thread finish. - if (pRunningThreads) - { - pRunningThreads->finishAllThreads(); - delete pRunningThreads; - pRunningThreads = 0; - } - } -}; - -// By default, we have no thread list. -ThreadList* volatile ThreadList::pRunningThreads = 0; - - -// FinishAllThreads - exposed publicly in Thread. -void Thread::FinishAllThreads() -{ - ThreadList::FinishAllThreads(); -} - -// *** Run override - -int Thread::PRun() -{ - // Suspend us on start, if requested - if (ThreadFlags & OVR_THREAD_START_SUSPENDED) - { - Suspend(); - ThreadFlags &= (UInt32)~OVR_THREAD_START_SUSPENDED; - } - - // Call the virtual run function - ExitCode = Run(); - return ExitCode; -} - - - - -// *** User overridables - -bool Thread::GetExitFlag() const -{ - return (ThreadFlags & OVR_THREAD_EXIT) != 0; -} - -void Thread::SetExitFlag(bool exitFlag) -{ - // The below is atomic since ThreadFlags is AtomicInt. - if (exitFlag) - ThreadFlags |= OVR_THREAD_EXIT; - else - ThreadFlags &= (UInt32) ~OVR_THREAD_EXIT; -} - - -// Determines whether the thread was running and is now finished -bool Thread::IsFinished() const -{ - return (ThreadFlags & OVR_THREAD_FINISHED) != 0; -} -// Determines whether the thread is suspended -bool Thread::IsSuspended() const -{ - return SuspendCount > 0; -} -// Returns current thread state -Thread::ThreadState Thread::GetThreadState() const -{ - if (IsSuspended()) - return Suspended; - if (ThreadFlags & OVR_THREAD_STARTED) - return Running; - return NotRunning; -} -/* -static const char* mapsched_policy(int policy) -{ - switch(policy) - { - case SCHED_OTHER: - return "SCHED_OTHER"; - case SCHED_RR: - return "SCHED_RR"; - case SCHED_FIFO: - return "SCHED_FIFO"; - - } - return "UNKNOWN"; -} - int policy; - sched_param sparam; - pthread_getschedparam(pthread_self(), &policy, &sparam); - int max_prior = sched_get_priority_max(policy); - int min_prior = sched_get_priority_min(policy); - printf(" !!!! policy: %s, priority: %d, max priority: %d, min priority: %d\n", mapsched_policy(policy), sparam.sched_priority, max_prior, min_prior); -#include <stdio.h> -*/ -// ***** Thread management - -// The actual first function called on thread start -void* Thread_PthreadStartFn(void* phandle) -{ - Thread* pthread = (Thread*)phandle; - int result = pthread->PRun(); - // Signal the thread as done and release it atomically. - pthread->FinishAndRelease(); - // At this point Thread object might be dead; however we can still pass - // it to RemoveRunningThread since it is only used as a key there. - ThreadList::RemoveRunningThread(pthread); - return (void*) result; -} - -int Thread::InitAttr = 0; -pthread_attr_t Thread::Attr; - -/* static */ -int Thread::GetOSPriority(ThreadPriority p) -//static inline int MapToSystemPrority(Thread::ThreadPriority p) -{ -#ifdef OVR_OS_PS3 - switch(p) - { - case Thread::CriticalPriority: return 0; - case Thread::HighestPriority: return 300; - case Thread::AboveNormalPriority: return 600; - case Thread::NormalPriority: return 1000; - case Thread::BelowNormalPriority: return 1500; - case Thread::LowestPriority: return 2500; - case Thread::IdlePriority: return 3071; - } return 1000; -#else - OVR_UNUSED(p); - return -1; -#endif -} - -bool Thread::Start(ThreadState initialState) -{ - if (initialState == NotRunning) - return 0; - if (GetThreadState() != NotRunning) - { - OVR_DEBUG_LOG(("Thread::Start failed - thread %p already running", this)); - return 0; - } - - if (!InitAttr) - { - pthread_attr_init(&Attr); - pthread_attr_setdetachstate(&Attr, PTHREAD_CREATE_DETACHED); - pthread_attr_setstacksize(&Attr, 128 * 1024); - sched_param sparam; - sparam.sched_priority = Thread::GetOSPriority(NormalPriority); - pthread_attr_setschedparam(&Attr, &sparam); - InitAttr = 1; - } - - ExitCode = 0; - SuspendCount = 0; - ThreadFlags = (initialState == Running) ? 0 : OVR_THREAD_START_SUSPENDED; - - // AddRef to us until the thread is finished - AddRef(); - ThreadList::AddRunningThread(this); - - int result; - if (StackSize != 128 * 1024 || Priority != NormalPriority) - { - pthread_attr_t attr; - - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - pthread_attr_setstacksize(&attr, StackSize); - sched_param sparam; - sparam.sched_priority = Thread::GetOSPriority(Priority); - pthread_attr_setschedparam(&attr, &sparam); - result = pthread_create(&ThreadHandle, &attr, Thread_PthreadStartFn, this); - pthread_attr_destroy(&attr); - } - else - result = pthread_create(&ThreadHandle, &Attr, Thread_PthreadStartFn, this); - - if (result) - { - ThreadFlags = 0; - Release(); - ThreadList::RemoveRunningThread(this); - return 0; - } - return 1; -} - - -// Suspend the thread until resumed -bool Thread::Suspend() -{ - OVR_DEBUG_LOG(("Thread::Suspend - cannot suspend threads on this system")); - return 0; -} - -// Resumes currently suspended thread -bool Thread::Resume() -{ - return 0; -} - - -// Quits with an exit code -void Thread::Exit(int exitCode) -{ - // Can only exist the current thread - // if (GetThread() != this) - // return; - - // Call the virtual OnExit function - OnExit(); - - // Signal this thread object as done and release it's references. - FinishAndRelease(); - ThreadList::RemoveRunningThread(this); - - pthread_exit((void *) exitCode); -} - -ThreadId GetCurrentThreadId() -{ - return (void*)pthread_self(); -} - -// *** Sleep functions - -/* static */ -bool Thread::Sleep(unsigned secs) -{ - sleep(secs); - return 1; -} -/* static */ -bool Thread::MSleep(unsigned msecs) -{ - usleep(msecs*1000); - return 1; -} - -/* static */ -int Thread::GetCPUCount() -{ - return 1; -} - - -#ifdef OVR_OS_PS3 - -sys_lwmutex_attribute_t Lock::LockAttr = { SYS_SYNC_PRIORITY, SYS_SYNC_RECURSIVE }; - -#endif - -} - -#endif // OVR_ENABLE_THREADS diff --git a/LibOVR/Src/Kernel/OVR_ThreadsWinAPI.cpp b/LibOVR/Src/Kernel/OVR_ThreadsWinAPI.cpp index 8880082..663d859 100644 --- a/LibOVR/Src/Kernel/OVR_ThreadsWinAPI.cpp +++ b/LibOVR/Src/Kernel/OVR_ThreadsWinAPI.cpp @@ -6,16 +6,16 @@ Content : Windows specific thread-related (safe) functionality Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_Timer.cpp b/LibOVR/Src/Kernel/OVR_Timer.cpp index 84ff4a1..a8de47d 100644 --- a/LibOVR/Src/Kernel/OVR_Timer.cpp +++ b/LibOVR/Src/Kernel/OVR_Timer.cpp @@ -5,16 +5,16 @@ Content : Provides static functions for precise timing Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -39,49 +39,48 @@ limitations under the License. namespace OVR { +// For recorded data playback +bool Timer::useFakeSeconds = false; +double Timer::FakeSeconds = 0; + //------------------------------------------------------------------------ // *** Timer - Platform Independent functions -double ovr_GetTimeInSeconds() +// Returns global high-resolution application timer in seconds. +double Timer::GetSeconds() { - return Timer::GetSeconds(); + if(useFakeSeconds) + return FakeSeconds; + + return double(Timer::GetTicksNanos()) * 0.000000001; } -UInt64 Timer::GetProfileTicks() +#ifndef OVR_OS_WIN32 + +// Unused on OSs other then Win32. +void Timer::initializeTimerSystem() { - return (GetRawTicks() * MksPerSecond) / GetRawFrequency(); } -double Timer::GetProfileSeconds() +void Timer::shutdownTimerSystem() { - static UInt64 StartTime = GetProfileTicks(); - return TicksToSeconds(GetProfileTicks()-StartTime); } -#ifndef OVR_OS_ANDROID - -double Timer::GetSeconds() -{ - return (double)Timer::GetRawTicks() / (double) GetRawFrequency(); -} #endif + //------------------------------------------------------------------------ // *** Android Specific Timer - #if defined(OVR_OS_ANDROID) -// Returns global high-resolution application timer in seconds. -double Timer::GetSeconds() +UInt64 Timer::GetTicksNanos() { - return double(Timer::GetRawTicks()) * 0.000000001; -} + if (useFakeSeconds) + return (UInt64) (FakeSeconds * NanosPerSecond); -UInt64 Timer::GetRawTicks() -{ // Choreographer vsync timestamp is based on. struct timespec tp; const int status = clock_gettime(CLOCK_MONOTONIC, &tp); @@ -94,108 +93,178 @@ UInt64 Timer::GetRawTicks() return result; } -UInt64 Timer::GetRawFrequency() -{ - return MksPerSecond * 1000; -} - -#endif - //------------------------------------------------------------------------ // *** Win32 Specific Timer -#if defined (OVR_OS_WIN32) +#elif defined (OVR_OS_WIN32) -CRITICAL_SECTION WinAPI_GetTimeCS; -volatile UInt32 WinAPI_OldTime = 0; -volatile UInt32 WinAPI_WrapCounter = 0; +// This helper class implements high-resolution wrapper that combines timeGetTime() output +// with QueryPerformanceCounter. timeGetTime() is lower precision but drives the high bits, +// as it's tied to the system clock. +struct PerformanceTimer +{ + PerformanceTimer() + : OldMMTimeMs(0), MMTimeWrapCounter(0), PrefFrequency(0), + LastResultNanos(0), PerfMinusTicksDeltaNanos(0) + { } + + enum { + MMTimerResolutionNanos = 1000000 + }; + + void Initialize(); + void Shutdown(); + + UInt64 GetTimeNanos(); + + + UINT64 getFrequency() + { + if (PrefFrequency == 0) + { + LARGE_INTEGER freq; + QueryPerformanceFrequency(&freq); + PrefFrequency = freq.QuadPart; + } + return PrefFrequency; + } + + + CRITICAL_SECTION TimeCS; + // timeGetTime() support with wrap. + UInt32 OldMMTimeMs; + UInt32 MMTimeWrapCounter; + // Cached performance frequency result. + UInt64 PrefFrequency; + + // Computed as (perfCounterNanos - ticksCounterNanos) initially, + // and used to adjust timing. + UInt64 PerfMinusTicksDeltaNanos; + // Last returned value in nanoseconds, to ensure we don't back-step in time. + UInt64 LastResultNanos; +}; + +PerformanceTimer Win32_PerfTimer; + + +void PerformanceTimer::Initialize() +{ + timeBeginPeriod(1); + InitializeCriticalSection(&TimeCS); + MMTimeWrapCounter = 0; + getFrequency(); +} -UInt32 Timer::GetTicksMs() +void PerformanceTimer::Shutdown() { - return timeGetTime(); + DeleteCriticalSection(&TimeCS); + timeEndPeriod(1); } -UInt64 Timer::GetTicks() +UInt64 PerformanceTimer::GetTimeNanos() { - DWORD ticks = timeGetTime(); - UInt64 result; + UInt64 resultNanos; + LARGE_INTEGER li; + DWORD mmTimeMs; // On Win32 QueryPerformanceFrequency is unreliable due to SMP and // performance levels, so use this logic to detect wrapping and track // high bits. - ::EnterCriticalSection(&WinAPI_GetTimeCS); - - if (WinAPI_OldTime > ticks) - WinAPI_WrapCounter++; - WinAPI_OldTime = ticks; + ::EnterCriticalSection(&TimeCS); - result = (UInt64(WinAPI_WrapCounter) << 32) | ticks; - ::LeaveCriticalSection(&WinAPI_GetTimeCS); - - return result * MksPerMs; -} - -UInt64 Timer::GetRawTicks() -{ - LARGE_INTEGER li; + // Get raw value and perf counter "At the same time". + mmTimeMs = timeGetTime(); QueryPerformanceCounter(&li); - return li.QuadPart; -} -UInt64 Timer::GetRawFrequency() -{ - static UInt64 perfFreq = 0; - if (perfFreq == 0) + if (OldMMTimeMs > mmTimeMs) + MMTimeWrapCounter++; + OldMMTimeMs = mmTimeMs; + + // Normalize to nanoseconds. + UInt64 mmCounterNanos = ((UInt64(MMTimeWrapCounter) << 32) | mmTimeMs) * 1000000; + UInt64 frequency = getFrequency(); + UInt64 perfCounterSeconds = UInt64(li.QuadPart) / frequency; + UInt64 perfRemainderNanos = ( (UInt64(li.QuadPart) - perfCounterSeconds * frequency) * + Timer::NanosPerSecond ) / frequency; + UInt64 perfCounterNanos = perfCounterSeconds * Timer::NanosPerSecond + perfRemainderNanos; + + if (PerfMinusTicksDeltaNanos == 0) + PerfMinusTicksDeltaNanos = perfCounterNanos - mmCounterNanos; + + + // Compute result before snapping. + // + // On first call, this evaluates to: + // resultNanos = mmCounterNanos. + // Next call, assuming no wrap: + // resultNanos = prev_mmCounterNanos + (perfCounterNanos - prev_perfCounterNanos). + // After wrap, this would be: + // resultNanos = snapped(prev_mmCounterNanos +/- 1ms) + (perfCounterNanos - prev_perfCounterNanos). + // + resultNanos = perfCounterNanos - PerfMinusTicksDeltaNanos; + + // Snap the range so that resultNanos never moves further apart then its target resolution. + // It's better to allow more slack on the high side as timeGetTime() may be updated at sporadically + // larger then 1 ms intervals even when 1 ms resolution is requested. + if (resultNanos > (mmCounterNanos + MMTimerResolutionNanos*2)) { - LARGE_INTEGER freq; - QueryPerformanceFrequency(&freq); - perfFreq = freq.QuadPart; + resultNanos = mmCounterNanos + MMTimerResolutionNanos*2; + if (resultNanos < LastResultNanos) + resultNanos = LastResultNanos; + PerfMinusTicksDeltaNanos = perfCounterNanos - resultNanos; + } + else if (resultNanos < (mmCounterNanos - MMTimerResolutionNanos)) + { + resultNanos = mmCounterNanos - MMTimerResolutionNanos; + if (resultNanos < LastResultNanos) + resultNanos = LastResultNanos; + PerfMinusTicksDeltaNanos = perfCounterNanos - resultNanos; } - return perfFreq; -} -void Timer::initializeTimerSystem() -{ - timeBeginPeriod(1); - InitializeCriticalSection(&WinAPI_GetTimeCS); + LastResultNanos = resultNanos; + ::LeaveCriticalSection(&TimeCS); -} -void Timer::shutdownTimerSystem() -{ - DeleteCriticalSection(&WinAPI_GetTimeCS); - timeEndPeriod(1); -} + //Tom's addition, to keep precision + static UInt64 initial_time = 0; + if (!initial_time) initial_time = resultNanos; + resultNanos -= initial_time; -#else // !OVR_OS_WIN32 + return resultNanos; +} -//------------------------------------------------------------------------ -// *** Standard OS Timer -UInt32 Timer::GetTicksMs() -{ - return (UInt32)(GetProfileTicks() / 1000); -} -// The profile ticks implementation is just fine for a normal timer. -UInt64 Timer::GetTicks() +// Delegate to PerformanceTimer. +UInt64 Timer::GetTicksNanos() { - return GetProfileTicks(); -} + if (useFakeSeconds) + return (UInt64) (FakeSeconds * NanosPerSecond); + return Win32_PerfTimer.GetTimeNanos(); +} void Timer::initializeTimerSystem() { + Win32_PerfTimer.Initialize(); + } void Timer::shutdownTimerSystem() { + Win32_PerfTimer.Shutdown(); } +#else // !OVR_OS_WIN32 && !OVR_OS_ANDROID + -#if !defined(OVR_OS_ANDROID) +//------------------------------------------------------------------------ +// *** Standard OS Timer -UInt64 Timer::GetRawTicks() +UInt64 Timer::GetTicksNanos() { + if (useFakeSeconds) + return (UInt64) (FakeSeconds * NanosPerSecond); + // TODO: prefer rdtsc when available? UInt64 result; @@ -207,17 +276,10 @@ UInt64 Timer::GetRawTicks() result = (UInt64)tv.tv_sec * 1000000; result += tv.tv_usec; - return result; -} - -UInt64 Timer::GetRawFrequency() -{ - return MksPerSecond; + return result * 1000; } -#endif // !OVR_OS_ANDROID - -#endif // !OVR_OS_WIN32 +#endif // OS-specific diff --git a/LibOVR/Src/Kernel/OVR_Timer.h b/LibOVR/Src/Kernel/OVR_Timer.h index 937b2cd..12cba3b 100644 --- a/LibOVR/Src/Kernel/OVR_Timer.h +++ b/LibOVR/Src/Kernel/OVR_Timer.h @@ -6,16 +6,16 @@ Content : Provides static functions for precise timing Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -43,62 +43,32 @@ class Timer public: enum { MsPerSecond = 1000, // Milliseconds in one second. - MksPerMs = 1000, // Microseconds in one millisecond. - MksPerSecond = MsPerSecond * MksPerMs + NanosPerSecond = MsPerSecond * 1000 * 1000, + MksPerSecond = MsPerSecond * 1000 }; - // ***** Timing APIs for Application + // These APIs should be used to guide animation and other program functions // that require precision. - // Returns ticks in milliseconds, as a 32-bit number. May wrap around every - // 49.2 days. Use either time difference of two values of GetTicks to avoid - // wrap-around. GetTicksMs may perform better then GetTicks. - static UInt32 OVR_STDCALL GetTicksMs(); - - // GetTicks returns general-purpose high resolution application timer value, - // measured in microseconds (mks, or 1/1000000 of a second). The actual precision - // is system-specific and may be much lower, such as 1 ms. - static UInt64 OVR_STDCALL GetTicks(); - // Returns global high-resolution application timer in seconds. - static double OVR_STDCALL GetSeconds(); - - - // ***** Profiling APIs. - // These functions should be used for profiling, but may have system specific - // artifacts that make them less appropriate for general system use. - // On Win32, for example these rely on QueryPerformanceConter may have - // problems with thread-core switching and power modes. - - // Return a hi-res timer value in mks (1/1000000 of a sec). - // Generally you want to call this at the start and end of an - // operation, and pass the difference to - // TicksToSeconds() to find out how long the operation took. - static UInt64 OVR_STDCALL GetProfileTicks(); - - // More convenient zero-based profile timer in seconds. First call initializes - // the "zero" value; future calls return the difference. Not thread safe for first call. - // Due to low precision of Double, may malfunction after long runtime. - static double OVR_STDCALL GetProfileSeconds(); - - // Get the raw cycle counter value, providing the maximum possible timer resolution. - static UInt64 OVR_STDCALL GetRawTicks(); - static UInt64 OVR_STDCALL GetRawFrequency(); - - - // ***** Tick and time unit conversion. - - // Convert micro-second ticks value into seconds value. - static inline double TicksToSeconds(UInt64 ticks) - { - return static_cast<double>(ticks) * (1.0 / (double)MksPerSecond); - } - // Convert Raw or frequency-unit ticks to seconds based on specified frequency. - static inline double RawTicksToSeconds(UInt64 rawTicks, UInt64 rawFrequency) - { - return static_cast<double>(rawTicks) * rawFrequency; + static double OVR_STDCALL GetSeconds(); + + // Returns time in Nanoseconds, using highest possible system resolution. + static UInt64 OVR_STDCALL GetTicksNanos(); + + // Kept for compatibility. + // Returns ticks in milliseconds, as a 32-bit number. May wrap around every 49.2 days. + // Use either time difference of two values of GetTicks to avoid wrap-around. + static UInt32 OVR_STDCALL GetTicksMs() + { return UInt32(GetTicksNanos() / 1000000); } + + // for recorded data playback + static void SetFakeSeconds(double fakeSeconds) + { + FakeSeconds = fakeSeconds; + useFakeSeconds = true; } private: @@ -106,12 +76,11 @@ private: // System called during program startup/shutdown. static void initializeTimerSystem(); static void shutdownTimerSystem(); -}; - - -// Global high-resolution time in seconds. This is intended to replace Timer class in OVR. -double ovr_GetTimeInSeconds(); + // for recorded data playback + static double FakeSeconds; + static bool useFakeSeconds; +}; } // OVR::Timer diff --git a/LibOVR/Src/Kernel/OVR_Types.h b/LibOVR/Src/Kernel/OVR_Types.h index 6b2922e..f45df59 100644 --- a/LibOVR/Src/Kernel/OVR_Types.h +++ b/LibOVR/Src/Kernel/OVR_Types.h @@ -6,16 +6,16 @@ Content : Standard library defines and simple types Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -140,6 +140,8 @@ limitations under the License. // MSVC 8.0 (VC2005) = 1400 // MSVC 9.0 (VC2008) = 1500 // MSVC 10.0 (VC2010) = 1600 +// MSVC 11.0 (VC2012) = 1700 +// MSVC 12.0 (VC2013) = 1800 # define OVR_CC_MSVC _MSC_VER #elif defined(__GNUC__) @@ -159,11 +161,9 @@ limitations under the License. // Disable MSVC warnings #if defined(OVR_CC_MSVC) # pragma warning(disable : 4127) // Inconsistent dll linkage -# pragma warning(disable : 4514) // Unreferenced inline function has been removed # pragma warning(disable : 4530) // Exception handling -# pragma warning(disable : 4711) // function 'x()' selected for automatic inline expansion -# pragma warning(disable : 4820) // 'n' bytes padding added after data member 'item' # if (OVR_CC_MSVC<1300) +# pragma warning(disable : 4514) // Unreferenced inline function has been removed # pragma warning(disable : 4710) // Function not inlined # pragma warning(disable : 4714) // _force_inline not inlined # pragma warning(disable : 4786) // Debug variable name longer than 255 chars @@ -195,12 +195,14 @@ limitations under the License. # include <stdlib.h> # include <crtdbg.h> +#if 0 // Uncomment this to help debug memory leaks under Visual Studio in OVR apps only. // This shouldn't be defined in customer releases. # ifndef OVR_DEFINE_NEW # define OVR_DEFINE_NEW new(__FILE__, __LINE__) # define new OVR_DEFINE_NEW # endif +#endif #endif @@ -374,6 +376,7 @@ namespace BaseTypes // If not in debug build, macros do nothing #ifndef OVR_BUILD_DEBUG +# define OVR_DEBUG_CODE(c) c # define OVR_DEBUG_BREAK ((void)0) # define OVR_ASSERT(p) ((void)0) @@ -399,6 +402,8 @@ namespace BaseTypes # define OVR_DEBUG_BREAK do { *((int *) 0) = 1; } while(0) #endif +#define OVR_DEBUG_CODE(c) + // This will cause compiler breakpoint #define OVR_ASSERT(p) do { if (!(p)) { OVR_DEBUG_BREAK; } } while(0) diff --git a/LibOVR/Src/Kernel/OVR_UTF8Util.cpp b/LibOVR/Src/Kernel/OVR_UTF8Util.cpp index 9d337a7..f8aa697 100644 --- a/LibOVR/Src/Kernel/OVR_UTF8Util.cpp +++ b/LibOVR/Src/Kernel/OVR_UTF8Util.cpp @@ -7,16 +7,16 @@ Notes : Notes : Much useful info at "UTF-8 and Unicode FAQ" http://www.cl.cam.ac.uk/~mgk25/unicode.html -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Kernel/OVR_UTF8Util.h b/LibOVR/Src/Kernel/OVR_UTF8Util.h index 9509629..6a596012 100644 --- a/LibOVR/Src/Kernel/OVR_UTF8Util.h +++ b/LibOVR/Src/Kernel/OVR_UTF8Util.h @@ -6,22 +6,23 @@ Content : UTF8 Unicode character encoding/decoding support Created : September 19, 2012 Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + ************************************************************************************/ #ifndef OVR_UTF8Util_h diff --git a/LibOVR/Src/OVR_CAPI.cpp b/LibOVR/Src/OVR_CAPI.cpp new file mode 100644 index 0000000..253bb3b --- /dev/null +++ b/LibOVR/Src/OVR_CAPI.cpp @@ -0,0 +1,911 @@ +/************************************************************************************ + +Filename : OVR_CAPI.cpp +Content : Experimental simple C interface to the HMD - version 1. +Created : November 30, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "OVR_CAPI.h" +#include "Kernel/OVR_Timer.h" +#include "Kernel/OVR_Math.h" +#include "Kernel/OVR_System.h" +#include "OVR_Stereo.h" +#include "OVR_Profile.h" + +#include "CAPI/CAPI_GlobalState.h" +#include "CAPI/CAPI_HMDState.h" +#include "CAPI/CAPI_FrameTimeManager.h" + + +using namespace OVR; +using namespace OVR::Util::Render; + +//------------------------------------------------------------------------------------- +// Math +namespace OVR { + + +// ***** FovPort + +// C-interop support: FovPort <-> ovrFovPort +FovPort::FovPort(const ovrFovPort &src) + : UpTan(src.UpTan), DownTan(src.DownTan), LeftTan(src.LeftTan), RightTan(src.RightTan) +{ } + +FovPort::operator const ovrFovPort () const +{ + ovrFovPort result; + result.LeftTan = LeftTan; + result.RightTan = RightTan; + result.UpTan = UpTan; + result.DownTan = DownTan; + return result; +} + +// Converts Fov Tan angle units to [-1,1] render target NDC space +Vector2f FovPort::TanAngleToRendertargetNDC(Vector2f const &tanEyeAngle) +{ + ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this); + return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset; +} + + +// ***** SensorState + +SensorState::SensorState(const ovrSensorState& s) +{ + Predicted = s.Predicted; + Recorded = s.Recorded; + Temperature = s.Temperature; + StatusFlags = s.StatusFlags; +} + +SensorState::operator const ovrSensorState() const +{ + ovrSensorState result; + result.Predicted = Predicted; + result.Recorded = Recorded; + result.Temperature = Temperature; + result.StatusFlags = StatusFlags; + return result; +} + + +} // namespace OVR + +//------------------------------------------------------------------------------------- + +using namespace OVR::CAPI; + +#ifdef __cplusplus +extern "C" { +#endif + + +// Used to generate projection from ovrEyeDesc::Fov +OVR_EXPORT ovrMatrix4f ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, ovrBool rightHanded) +{ + return CreateProjection(rightHanded ? true : false, fov, znear, zfar); +} + + +OVR_EXPORT ovrMatrix4f ovrMatrix4f_OrthoSubProjection(ovrMatrix4f projection, ovrVector2f orthoScale, + float orthoDistance, float eyeViewAdjustX) +{ + + float orthoHorizontalOffset = eyeViewAdjustX / orthoDistance; + + // Current projection maps real-world vector (x,y,1) to the RT. + // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to + // the physical [-orthoHalfFov,orthoHalfFov] + // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means + // we don't have to feed in Z=1 all the time. + // The horizontal offset math is a little hinky because the destination is + // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset] + // So we need to first map [-FovPixels/2,FovPixels/2] to + // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]: + // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset; + // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset; + // But then we need the sam mapping as the existing projection matrix, i.e. + // x2 = x1 * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels + + // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]; + // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels and + // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]. + + Matrix4f ortho; + ortho.M[0][0] = projection.M[0][0] * orthoScale.x; + ortho.M[0][1] = 0.0f; + ortho.M[0][2] = 0.0f; + ortho.M[0][3] = -projection.M[0][2] + ( orthoHorizontalOffset * projection.M[0][0] ); + + ortho.M[1][0] = 0.0f; + ortho.M[1][1] = -projection.M[1][1] * orthoScale.y; // Note sign flip (text rendering uses Y=down). + ortho.M[1][2] = 0.0f; + ortho.M[1][3] = -projection.M[1][2]; + + /* + if ( fabsf ( zNear - zFar ) < 0.001f ) + { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = zFar; + } + else + { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = zFar / (zNear - zFar); + ortho.M[2][3] = (zFar * zNear) / (zNear - zFar); + } + */ + + // MA: Undo effect of sign + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + //ortho.M[2][2] = projection.M[2][2] * projection.M[3][2] * -1.0f; // reverse right-handedness + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = 0.0f; + //projection.M[2][3]; + + // No perspective correction for ortho. + ortho.M[3][0] = 0.0f; + ortho.M[3][1] = 0.0f; + ortho.M[3][2] = 0.0f; + ortho.M[3][3] = 1.0f; + + return ortho; +} + + +OVR_EXPORT double ovr_GetTimeInSeconds() +{ + return Timer::GetSeconds(); +} + +// Waits until the specified absolute time. +OVR_EXPORT double ovr_WaitTillTime(double absTime) +{ + volatile int i; + double initialTime = ovr_GetTimeInSeconds(); + double newTime = initialTime; + + while(newTime < absTime) + { + for (int j = 0; j < 50; j++) + i = 0; + newTime = ovr_GetTimeInSeconds(); + } + + // How long we waited + return newTime - initialTime; +} + +//------------------------------------------------------------------------------------- + +// 1. Init/shutdown. + +static ovrBool CAPI_SystemInitCalled = FALSE; + +OVR_EXPORT ovrBool ovr_Initialize() +{ + if (OVR::CAPI::GlobalState::pInstance) + return TRUE; + + // We must set up the system for the plugin to work + if (!OVR::System::IsInitialized()) + { + OVR::System::Init(OVR::Log::ConfigureDefaultLog(OVR::LogMask_All)); + CAPI_SystemInitCalled = TRUE; + } + + // Constructor detects devices + GlobalState::pInstance = new GlobalState; + return TRUE; +} + +OVR_EXPORT void ovr_Shutdown() +{ + if (!GlobalState::pInstance) + return; + + delete GlobalState::pInstance; + GlobalState::pInstance = 0; + + // We should clean up the system to be complete + if (CAPI_SystemInitCalled) + { + OVR::System::Destroy(); + CAPI_SystemInitCalled = FALSE; + } + return; +} + + +// There is a thread safety issue with ovrHmd_Detect in that multiple calls from different +// threads can corrupt the global array state. This would lead to two problems: +// a) Create(index) enumerator may miss or overshoot items. Probably not a big deal +// as game logic can easily be written to only do Detect(s)/Creates in one place. +// The alternative would be to return list handle. +// b) TBD: Un-mutexed Detect access from two threads could lead to crash. We should +// probably check this. +// + +OVR_EXPORT int ovrHmd_Detect() +{ + if (!GlobalState::pInstance) + return 0; + return GlobalState::pInstance->EnumerateDevices(); +} + + +// ovrHmd_Create us explicitly separated from StartSensor and Configure to allow creation of +// a relatively light-weight handle that would reference the device going forward and would +// survive future ovrHmd_Detect calls. That is once ovrHMD is returned, index is no longer +// necessary and can be changed by a ovrHmd_Detect call. + +OVR_EXPORT ovrHmd ovrHmd_Create(int index) +{ + if (!GlobalState::pInstance) + return 0; + Ptr<HMDDevice> device = *GlobalState::pInstance->CreateDevice(index); + if (!device) + return 0; + + HMDState* hmds = new HMDState(device); + if (!hmds) + return 0; + + return hmds; +} + +OVR_EXPORT ovrHmd ovrHmd_CreateDebug(ovrHmdType type) +{ + if (!GlobalState::pInstance) + return 0; + + HMDState* hmds = new HMDState(type); + return hmds; +} + +OVR_EXPORT void ovrHmd_Destroy(ovrHmd hmd) +{ + if (!hmd) + return; + // TBD: Any extra shutdown? + HMDState* hmds = (HMDState*)hmd; + + { // Thread checker in its own scope, to avoid access after 'delete'. + // Essentially just checks that no other RenderAPI function is executing. + ThreadChecker::Scope checkScope(&hmds->RenderAPIThreadChecker, "ovrHmd_Destroy"); + } + + delete (HMDState*)hmd; +} + + +OVR_EXPORT const char* ovrHmd_GetLastError(ovrHmd hmd) +{ + using namespace OVR; + if (!hmd) + { + if (!GlobalState::pInstance) + return "LibOVR not initialized."; + return GlobalState::pInstance->GetLastError(); + } + HMDState* p = (HMDState*)hmd; + return p->GetLastError(); +} + + +//------------------------------------------------------------------------------------- +// *** Sensor + +// Sensor APIs are separated from Create & Configure for several reasons: +// - They need custom parameters that control allocation of heavy resources +// such as Vision tracking, which you don't want to create unless necessary. +// - A game may want to switch some sensor settings based on user input, +// or at lease enable/disable features such as Vision for debugging. +// - The same or syntactically similar sensor interface is likely to be used if we +// introduce controllers. +// +// - Sensor interface functions are all Thread-safe, unlike the frame/render API +// functions that have different rules (all frame access functions +// must be on render thread) + +OVR_EXPORT ovrBool ovrHmd_StartSensor(ovrHmd hmd, unsigned int supportedCaps, unsigned int requiredCaps) +{ + HMDState* p = (HMDState*)hmd; + // TBD: Decide if we null-check arguments. + return p->StartSensor(supportedCaps, requiredCaps); +} + +OVR_EXPORT void ovrHmd_StopSensor(ovrHmd hmd) +{ + HMDState* p = (HMDState*)hmd; + p->StopSensor(); +} + +OVR_EXPORT void ovrHmd_ResetSensor(ovrHmd hmd) +{ + HMDState* p = (HMDState*)hmd; + p->ResetSensor(); +} + +OVR_EXPORT ovrSensorState ovrHmd_GetSensorState(ovrHmd hmd, double absTime) +{ + HMDState* p = (HMDState*)hmd; + return p->PredictedSensorState(absTime); +} + +// Returns information about a sensor. Only valid after SensorStart. +OVR_EXPORT ovrBool ovrHmd_GetSensorDesc(ovrHmd hmd, ovrSensorDesc* descOut) +{ + HMDState* p = (HMDState*)hmd; + return p->GetSensorDesc(descOut) ? TRUE : FALSE; +} + + + +//------------------------------------------------------------------------------------- +// *** General Setup + + +OVR_EXPORT void ovrHmd_GetDesc(ovrHmd hmd, ovrHmdDesc* desc) +{ + HMDState* hmds = (HMDState*)hmd; + *desc = hmds->RenderState.GetDesc(); + desc->Handle = hmd; +} + +// Per HMD -> calculateIdealPixelSize +OVR_EXPORT ovrSizei ovrHmd_GetFovTextureSize(ovrHmd hmd, ovrEyeType eye, ovrFovPort fov, + float pixelsPerDisplayPixel) +{ + if (!hmd) return Sizei(0); + + HMDState* hmds = (HMDState*)hmd; + return hmds->RenderState.GetFOVTextureSize(eye, fov, pixelsPerDisplayPixel); +} + + +//------------------------------------------------------------------------------------- + + +OVR_EXPORT +ovrBool ovrHmd_ConfigureRendering( ovrHmd hmd, + const ovrRenderAPIConfig* apiConfig, + unsigned int hmdCaps, + unsigned int distortionCaps, + const ovrEyeDesc eyeDescIn[2], + ovrEyeRenderDesc eyeRenderDescOut[2] ) +{ + if (!hmd) return FALSE; + return ((HMDState*)hmd)->ConfigureRendering(eyeRenderDescOut, eyeDescIn, + apiConfig, hmdCaps, distortionCaps); +} + + + +// TBD: MA - Deprecated, need alternative +void ovrHmd_SetVsync(ovrHmd hmd, ovrBool vsync) +{ + if (!hmd) return; + + return ((HMDState*)hmd)->TimeManager.SetVsync(vsync? true : false); +} + + +OVR_EXPORT ovrFrameTiming ovrHmd_BeginFrame(ovrHmd hmd, unsigned int frameIndex) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) + { + ovrFrameTiming f; + memset(&f, 0, sizeof(f)); + return f; + } + + // Check: Proper configure and threading state for the call. + hmds->checkRenderingConfigured("ovrHmd_BeginFrame"); + OVR_ASSERT_LOG(hmds->BeginFrameCalled == false, ("ovrHmd_BeginFrame called multiple times.")); + ThreadChecker::Scope checkScope(&hmds->RenderAPIThreadChecker, "ovrHmd_BeginFrame"); + + hmds->BeginFrameCalled = true; + hmds->BeginFrameThreadId = OVR::GetCurrentThreadId(); + + return ovrHmd_BeginFrameTiming(hmd, frameIndex); +} + + +// Renders textures to frame buffer +OVR_EXPORT void ovrHmd_EndFrame(ovrHmd hmd) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return; + + // Debug state checks: Must be in BeginFrame, on the same thread. + hmds->checkBeginFrameScope("ovrHmd_EndFrame"); + ThreadChecker::Scope checkScope(&hmds->RenderAPIThreadChecker, "ovrHmd_EndFrame"); + + // TBD: Move directly into renderer + bool dk2LatencyTest = (hmds->HMDInfo.HmdType == HmdType_DK2) && + (hmds->SensorCaps & ovrHmdCap_LatencyTest); + if (dk2LatencyTest) + { + hmds->LatencyTest2DrawColor[0] = hmds->TimeManager.GetFrameLatencyTestDrawColor(); + hmds->LatencyTest2DrawColor[1] = hmds->LatencyTest2DrawColor[0]; + hmds->LatencyTest2DrawColor[2] = hmds->LatencyTest2DrawColor[0]; + } + + if (hmds->pRenderer) + { + hmds->pRenderer->EndFrame(true, + hmds->LatencyTestActive ? hmds->LatencyTestDrawColor : NULL, + + // MA: Use this color since we are running DK2 test all the time. + dk2LatencyTest ? hmds->LatencyTest2DrawColor : 0 + //hmds->LatencyTest2Active ? hmds->LatencyTest2DrawColor : NULL + ); + } + // Call after present + ovrHmd_EndFrameTiming(hmd); + + if (dk2LatencyTest) + { + hmds->TimeManager.UpdateFrameLatencyTrackingAfterEndFrame( + hmds->LatencyTest2DrawColor[0], hmds->LatencyUtil2.GetLocklessState()); + } + + // Out of BeginFrame + hmds->BeginFrameThreadId = 0; + hmds->BeginFrameCalled = false; +} + + +OVR_EXPORT ovrPosef ovrHmd_BeginEyeRender(ovrHmd hmd, ovrEyeType eye) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return ovrPosef(); + return hmds->BeginEyeRender(eye); +} + +OVR_EXPORT void ovrHmd_EndEyeRender(ovrHmd hmd, ovrEyeType eye, + ovrPosef renderPose, ovrTexture* eyeTexture) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return; + hmds->EndEyeRender(eye, renderPose, eyeTexture); +} + + +//------------------------------------------------------------------------------------- +// ***** Frame Timing logic + + +OVR_EXPORT ovrFrameTiming ovrHmd_GetFrameTiming(ovrHmd hmd, unsigned int frameIndex) +{ + ovrFrameTiming f; + memset(&f, 0, sizeof(f)); + + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + FrameTimeManager::Timing frameTiming = hmds->TimeManager.GetFrameTiming(frameIndex); + + f.ThisFrameSeconds = frameTiming.ThisFrameTime; + f.NextFrameSeconds = frameTiming.NextFrameTime; + f.TimewarpPointSeconds = frameTiming.TimewarpPointTime; + f.ScanoutMidpointSeconds= frameTiming.MidpointTime; + f.EyeScanoutSeconds[0] = frameTiming.EyeRenderTimes[0]; + f.EyeScanoutSeconds[1] = frameTiming.EyeRenderTimes[1]; + + // Compute DeltaSeconds. + f.DeltaSeconds = (hmds->LastGetFrameTimeSeconds == 0.0f) ? 0.0f : + (float) (f.ThisFrameSeconds - hmds->LastFrameTimeSeconds); + hmds->LastGetFrameTimeSeconds = f.ThisFrameSeconds; + if (f.DeltaSeconds > 1.0f) + f.DeltaSeconds = 1.0f; + } + + return f; +} + +OVR_EXPORT ovrFrameTiming ovrHmd_BeginFrameTiming(ovrHmd hmd, unsigned int frameIndex) +{ + ovrFrameTiming f; + memset(&f, 0, sizeof(f)); + + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return f; + + // Check: Proper state for the call. + OVR_ASSERT_LOG(hmds->BeginFrameTimingCalled == false, + ("ovrHmd_BeginFrameTiming called multiple times.")); + hmds->BeginFrameTimingCalled = true; + + double thisFrameTime = hmds->TimeManager.BeginFrame(frameIndex); + + const FrameTimeManager::Timing &frameTiming = hmds->TimeManager.GetFrameTiming(); + + f.ThisFrameSeconds = thisFrameTime; + f.NextFrameSeconds = frameTiming.NextFrameTime; + f.TimewarpPointSeconds = frameTiming.TimewarpPointTime; + f.ScanoutMidpointSeconds= frameTiming.MidpointTime; + f.EyeScanoutSeconds[0] = frameTiming.EyeRenderTimes[0]; + f.EyeScanoutSeconds[1] = frameTiming.EyeRenderTimes[1]; + + // Compute DeltaSeconds. + f.DeltaSeconds = (hmds->LastFrameTimeSeconds == 0.0f) ? 0.0f : + (float) (thisFrameTime - hmds->LastFrameTimeSeconds); + hmds->LastFrameTimeSeconds = thisFrameTime; + if (f.DeltaSeconds > 1.0f) + f.DeltaSeconds = 1.0f; + + return f; +} + + +OVR_EXPORT void ovrHmd_EndFrameTiming(ovrHmd hmd) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return; + + // Debug state checks: Must be in BeginFrameTiming, on the same thread. + hmds->checkBeginFrameTimingScope("ovrHmd_EndTiming"); + // MA TBD: Correct chek or not? + // ThreadChecker::Scope checkScope(&hmds->RenderAPIThreadChecker, "ovrHmd_EndFrame"); + + hmds->TimeManager.EndFrame(); + hmds->BeginFrameTimingCalled = false; +} + + +OVR_EXPORT void ovrHmd_ResetFrameTiming(ovrHmd hmd, unsigned int frameIndex, bool vsync) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return; + + hmds->TimeManager.ResetFrameTiming(frameIndex, vsync, false, + hmds->RenderingConfigured); + hmds->LastFrameTimeSeconds = 0.0; + hmds->LastGetFrameTimeSeconds = 0.0; +} + + + +OVR_EXPORT ovrPosef ovrHmd_GetEyePose(ovrHmd hmd, ovrEyeType eye) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmds) return ovrPosef(); + + hmds->checkBeginFrameTimingScope("ovrHmd_GetEyePose"); + return hmds->TimeManager.GetEyePredictionPose(hmd, eye); +} + + +OVR_EXPORT void ovrHmd_GetEyeTimewarpMatrices(ovrHmd hmd, ovrEyeType eye, + ovrPosef renderPose, ovrMatrix4f twmOut[2]) +{ + HMDState* hmds = (HMDState*)hmd; + if (!hmd) + return; + + // Debug checks: BeginFrame was called, on the same thread. + hmds->checkBeginFrameTimingScope("ovrHmd_GetTimewarpEyeMatrices"); + + hmds->TimeManager.GetTimewarpMatrices(hmd, eye, renderPose, twmOut); + + /* + // MA: Took this out because new latency test approach just sames + // the sample times in FrameTimeManager. + // TODO: if no timewarp, then test latency in begin eye render + if (eye == 0) + { + hmds->ProcessLatencyTest2(hmds->LatencyTest2DrawColor, -1.0f); + } + */ +} + + + +OVR_EXPORT ovrEyeRenderDesc ovrHmd_GetRenderDesc(ovrHmd hmd, const ovrEyeDesc eyeDesc) +{ + ovrEyeRenderDesc erd; + + HMDState* hmds = (HMDState*)hmd; + if (!hmds) + { + memset(&erd, 0, sizeof(erd)); + return erd; + } + + return hmds->RenderState.calcRenderDesc(eyeDesc); +} + + + +#define OVR_OFFSET_OF(s, field) ((size_t)&((s*)0)->field) + + + +// Generate distortion mesh per eye. +// scaleAndOffsetOut - this will be needed for shader +OVR_EXPORT ovrBool ovrHmd_CreateDistortionMesh( ovrHmd hmd, ovrEyeDesc eyeDesc, + unsigned int distortionCaps, + ovrVector2f uvScaleOffsetOut[2], + ovrDistortionMesh *meshData ) +{ + if (!meshData) + return FALSE; + HMDState* hmds = (HMDState*)hmd; + + // Not used now, but Chromatic flag or others could possibly be checked for in the future. + OVR_UNUSED1(distortionCaps); + + // TBD: We should probably be sharing some C API structures with C++ to avoid this mess... + OVR_COMPILER_ASSERT(sizeof(DistortionMeshVertexData) == sizeof(ovrDistortionVertex)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, ScreenPosNDC) == OVR_OFFSET_OF(ovrDistortionVertex, Pos)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, TimewarpLerp) == OVR_OFFSET_OF(ovrDistortionVertex, TimeWarpFactor)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, Shade) == OVR_OFFSET_OF(ovrDistortionVertex, VignetteFactor)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, TanEyeAnglesR) == OVR_OFFSET_OF(ovrDistortionVertex, TexR)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, TanEyeAnglesG) == OVR_OFFSET_OF(ovrDistortionVertex, TexG)); + OVR_COMPILER_ASSERT(OVR_OFFSET_OF(DistortionMeshVertexData, TanEyeAnglesB) == OVR_OFFSET_OF(ovrDistortionVertex, TexB)); + + + // *** Calculate a part of "StereoParams" needed for mesh generation + + // Note that mesh distortion generation is invariant of RenderTarget UVs, allowing + // render target size and location to be changed after the fact dynamically. + // eyeToSourceUV is computed here for convenience, so that users don't need + // to call ovrHmd_GetRenderScaleAndOffset unless changing RT dynamically. + + + const HmdRenderInfo& hmdri = hmds->RenderState.RenderInfo; + StereoEye stereoEye = (eyeDesc.Eye == ovrEye_Left) ? StereoEye_Left : StereoEye_Right; + + const DistortionRenderDesc& distortion = hmds->RenderState.Distortion[eyeDesc.Eye]; + + // Find the mapping from TanAngle space to target NDC space. + ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(eyeDesc.Fov); + // Find the mapping from TanAngle space to textureUV space. + ScaleAndOffset2D eyeToSourceUV = CreateUVScaleAndOffsetfromNDCScaleandOffset( + eyeToSourceNDC, + Recti(eyeDesc.RenderViewport), eyeDesc.TextureSize ); + + uvScaleOffsetOut[0] = eyeToSourceUV.Scale; + uvScaleOffsetOut[1] = eyeToSourceUV.Offset; + + int triangleCount = 0; + int vertexCount = 0; + + DistortionMeshCreate((DistortionMeshVertexData**)&meshData->pVertexData, (UInt16**)&meshData->pIndexData, + &vertexCount, &triangleCount, + (stereoEye == StereoEye_Right), + hmdri, distortion, eyeToSourceNDC); + + if (meshData->pVertexData) + { + // Convert to index + meshData->IndexCount = triangleCount * 3; + meshData->VertexCount = vertexCount; + return TRUE; + } + + return FALSE; +} + + +// Frees distortion mesh allocated by ovrHmd_GenerateDistortionMesh. meshData elements +// are set to null and 0s after the call. +OVR_EXPORT void ovrHmd_DestroyDistortionMesh(ovrDistortionMesh* meshData) +{ + if (meshData->pVertexData) + DistortionMeshDestroy((DistortionMeshVertexData*)meshData->pVertexData, + meshData->pIndexData); + meshData->pVertexData = 0; + meshData->pIndexData = 0; + meshData->VertexCount = 0; + meshData->IndexCount = 0; +} + + + +// Computes updated 'uvScaleOffsetOut' to be used with a distortion if render target size or +// viewport changes after the fact. This can be used to adjust render size every frame, if desired. +OVR_EXPORT void ovrHmd_GetRenderScaleAndOffset( ovrHmd hmd, ovrEyeDesc eyeDesc, + unsigned int distortionCaps, + ovrVector2f uvScaleOffsetOut[2] ) +{ + OVR_UNUSED2(hmd, distortionCaps); + // TBD: We could remove dependency on HMD here, but what if we need it in the future? + //HMDState* hmds = (HMDState*)hmd; + + // Find the mapping from TanAngle space to target NDC space. + ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(eyeDesc.Fov); + // Find the mapping from TanAngle space to textureUV space. + ScaleAndOffset2D eyeToSourceUV = CreateUVScaleAndOffsetfromNDCScaleandOffset( + eyeToSourceNDC, + eyeDesc.RenderViewport, eyeDesc.TextureSize ); + + uvScaleOffsetOut[0] = eyeToSourceUV.Scale; + uvScaleOffsetOut[1] = eyeToSourceUV.Offset; +} + + +//------------------------------------------------------------------------------------- +// ***** Latency Test interface + +OVR_EXPORT ovrBool ovrHmd_GetLatencyTestDrawColor(ovrHmd hmd, unsigned char rgbColorOut[3]) +{ + HMDState* p = (HMDState*)hmd; + rgbColorOut[0] = p->LatencyTestDrawColor[0]; + rgbColorOut[1] = p->LatencyTestDrawColor[1]; + rgbColorOut[2] = p->LatencyTestDrawColor[2]; + return p->LatencyTestActive; +} + +OVR_EXPORT const char* ovrHmd_GetLatencyTestResult(ovrHmd hmd) +{ + HMDState* p = (HMDState*)hmd; + return p->LatencyUtil.GetResultsString(); +} + +OVR_EXPORT double ovrHmd_GetMeasuredLatencyTest2(ovrHmd hmd) +{ + HMDState* p = (HMDState*)hmd; + + // MA Test + float latencies[3]; + p->TimeManager.GetLatencyTimings(latencies); + return latencies[2]; + // return p->LatencyUtil2.GetMeasuredLatency(); +} + + +// ----------------------------------------------------------------------------------- +// ***** Property Access + +OVR_EXPORT float ovrHmd_GetFloat(ovrHmd hmd, const char* propertyName, float defaultVal) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + return hmds->getFloatValue(propertyName, defaultVal); + } + + return defaultVal; +} + +OVR_EXPORT ovrBool ovrHmd_SetFloat(ovrHmd hmd, const char* propertyName, float value) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + return hmds->setFloatValue(propertyName, value); + } + return false; +} + + + +OVR_EXPORT unsigned int ovrHmd_GetFloatArray(ovrHmd hmd, const char* propertyName, + float values[], unsigned int arraySize) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + return hmds->getFloatArray(propertyName, values, arraySize); + } + + return 0; +} + + +// Modify float[] property; false if property doesn't exist or is readonly. +OVR_EXPORT ovrBool ovrHmd_SetFloatArray(ovrHmd hmd, const char* propertyName, + float values[], unsigned int arraySize) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + return hmds->setFloatArray(propertyName, values, arraySize); + } + + return 0; +} + +OVR_EXPORT const char* ovrHmd_GetString(ovrHmd hmd, const char* propertyName, + const char* defaultVal) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds) + { + return hmds->getString(propertyName, defaultVal); + } + + return defaultVal; +} + +/* Not needed yet. + +// Get array of strings, i.e. const char* [] property. +// Returns the number of elements filled in, 0 if property doesn't exist. +// Maximum of arraySize elements will be written. +// String memory is guaranteed to exist until next call to GetString or GetStringArray, or HMD is destroyed. +OVR_EXPORT +unsigned int ovrHmd_GetStringArray(ovrHmd hmd, const char* propertyName, + const char* values[], unsigned int arraySize) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds && hmds->pHMD && arraySize) + { + Profile* p = hmds->pHMD->GetProfile(); + + hmds->LastGetStringValue[0] = 0; + if (p && p->GetValue(propertyName, hmds->LastGetStringValue, sizeof(hmds->LastGetStringValue))) + { + values[0] = hmds->LastGetStringValue; + return 1; + } + } + + return 0; +} +*/ + +// Returns array size of a property, 0 if property doesn't exist. +// Can be used to check existence of a property. +OVR_EXPORT unsigned int ovrHmd_GetArraySize(ovrHmd hmd, const char* propertyName) +{ + HMDState* hmds = (HMDState*)hmd; + if (hmds && hmds->pHMD) + { + // For now, just access the profile. + Profile* p = hmds->pHMD->GetProfile(); + + if (p) + return p->GetNumValues(propertyName); + } + return 0; +} + + +#ifdef __cplusplus +} // extern "C" +#endif + + +//------------------------------------------------------------------------------------- +// ****** Special access for VRConfig + +// Return the sensor fusion object for the purposes of magnetometer calibration. The +// function is private and is only exposed through VRConfig header declarations +OVR::SensorFusion* ovrHmd_GetSensorFusion(ovrHmd hmd) +{ + HMDState* p = (HMDState*)hmd; + return &p->SFusion; +} + + diff --git a/LibOVR/Src/OVR_CAPI.h b/LibOVR/Src/OVR_CAPI.h new file mode 100644 index 0000000..d5cce01 --- /dev/null +++ b/LibOVR/Src/OVR_CAPI.h @@ -0,0 +1,762 @@ +/************************************************************************************ + +Filename : OVR_CAPI.h +Content : C Interface to Oculus sensors and rendering. +Created : November 23, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ +#ifndef OVR_CAPI_h +#define OVR_CAPI_h + +#include <stdint.h> + +typedef char ovrBool; + +//----------------------------------------------------------------------------------- +// ***** OVR_EXPORT definition + +#if !defined(OVR_EXPORT) + #if defined(WIN32) + #define OVR_EXPORT __declspec(dllexport) + #else + #define OVR_EXPORT + #endif +#endif + +//----------------------------------------------------------------------------------- +// ***** Simple Math Structures + +// 2D integer +typedef struct ovrVector2i_ +{ + int x, y; +} ovrVector2i; +typedef struct ovrSizei_ +{ + int w, h; +} ovrSizei; +typedef struct ovrRecti_ +{ + ovrVector2i Pos; + ovrSizei Size; +} ovrRecti; + +// 3D +typedef struct ovrQuatf_ +{ + float x, y, z, w; +} ovrQuatf; +typedef struct ovrVector2f_ +{ + float x, y; +} ovrVector2f; +typedef struct ovrVector3f_ +{ + float x, y, z; +} ovrVector3f; +typedef struct ovrMatrix4f_ +{ + float M[4][4]; +} ovrMatrix4f; +// Position and orientation together. +typedef struct ovrPosef_ +{ + ovrQuatf Orientation; + ovrVector3f Position; +} ovrPosef; + +// Full pose (rigid body) configuration with first and second derivatives. +typedef struct ovrPoseStatef_ +{ + ovrPosef Pose; + ovrVector3f AngularVelocity; + ovrVector3f LinearVelocity; + ovrVector3f AngularAcceleration; + ovrVector3f LinearAcceleration; + double TimeInSeconds; // Absolute time of this state sample. +} ovrPoseStatef; + +// Field Of View (FOV) in tangent of the angle units. +// As an example, for a standard 90 degree vertical FOV, we would +// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }. +typedef struct ovrFovPort_ +{ + float UpTan; + float DownTan; + float LeftTan; + float RightTan; +} ovrFovPort; + + +//----------------------------------------------------------------------------------- +// ***** HMD Types + +// Enumerates all HMD types that we support. +typedef enum +{ + ovrHmd_None = 0, + ovrHmd_DK1 = 3, + ovrHmd_DKHD = 4, + ovrHmd_CrystalCoveProto = 5, + ovrHmd_DK2 = 6, + ovrHmd_Other // Some HMD other then the one in the enumeration. +} ovrHmdType; + +// HMD capability bits reported by device. +typedef enum +{ + ovrHmdCap_Present = 0x0001, // This HMD exists (as opposed to being unplugged). + ovrHmdCap_Available = 0x0002, // HMD and is sensor is available for use + // (if not owned by another app). + ovrHmdCap_Orientation = 0x0010, // Support orientation tracking (IMU). + ovrHmdCap_YawCorrection = 0x0020, // Supports yaw correction through magnetometer or other means. + ovrHmdCap_Position = 0x0040, // Supports positional tracking. + ovrHmdCap_LowPersistence = 0x0080, // Supports low persistence mode. + ovrHmdCap_LatencyTest = 0x0100, // Supports pixel reading for continous latency testing. + ovrHmdCap_DynamicPrediction = 0x0200, // Adjust prediction dynamically based on DK2 Latency. + + // Support rendering without VSync for debugging + ovrHmdCap_NoVSync = 0x1000 +} ovrHmdCapBits; + +// Describes distortion rendering parameters for ovrHmd_ConfigureRenderAPI or for +// ovrHmd_GenerateDistortionMesh. +typedef enum +{ + ovrDistortion_Chromatic = 0x01, + ovrDistortion_TimeWarp = 0x02, + ovrDistortion_Vignette = 0x08 +} ovrDistortionCaps; + + +// Specifies which eye is being used for rendering. +// This type explicitly does not include a third "NoStereo" option, as such is +// not required for an HMD-centered API. +typedef enum +{ + ovrEye_Left = 0, + ovrEye_Right = 1, + ovrEye_Count = 2 +} ovrEyeType; + + +// Handle to HMD; returned by ovrHmd_Create. +typedef struct ovrHmdStruct* ovrHmd; + +// This is a complete descriptor of the HMD. +typedef struct ovrHmdDesc_ +{ + ovrHmd Handle; // Handle of this HMD. + ovrHmdType Type; + + // Name string describing the product: "Oculus Rift DK1", etc. + const char* ProductName; + const char* Manufacturer; + + // Capability bits described by ovrHmdCapBits. + unsigned int Caps; + unsigned int DistortionCaps; + + // Resolution of the entire HMD screen (for both eyes) in pixels. + ovrSizei Resolution; + // Where monitor window should be on screen or (0,0). + ovrVector2i WindowsPos; + + // These define the recommended and maximum optical FOVs for the HMD. + ovrFovPort DefaultEyeFov[ovrEye_Count]; + ovrFovPort MaxEyeFov[ovrEye_Count]; + + // Preferred eye rendering order for best performance. + // Can help reduce latency on sideways-scanned screens. + ovrEyeType EyeRenderOrder[ovrEye_Count]; + + // Display that HMD should present on. + // TBD: It may be good to remove this information relying on WidowPos instead. + // Ultimately, we may need to come up with a more convenient alternative, + // such as a API-specific functions that return adapter, ot something that will + // work with our monitor driver. + + // Windows: "\\\\.\\DISPLAY3", etc. Can be used in EnumDisplaySettings/CreateDC. + const char* DisplayDeviceName; + // MacOS + long DisplayId; +} ovrHmdDesc; + +// Describes the type of positional tracking being done. +/* +typedef enum +{ + ovrPose_None, + ovrPose_HeadModel, + ovrPose_Positional +} ovrPoseType; +*/ + + +// Bit flags describing the current status of sensor tracking. +typedef enum +{ + ovrStatus_OrientationTracked = 0x0001, // Orientation is currently tracked (connected and in use). + ovrStatus_PositionTracked = 0x0002, // Position is currently tracked (FALSE if out of range). + ovrStatus_PositionConnected = 0x0020, // Position tracking HW is connected. + ovrStatus_HmdConnected = 0x0080 // HMD Display is available & connected. +} ovrStatusBits; + + +// State of the sensor at given a absolute time. +typedef struct ovrSensorState_ +{ + // Predicted pose configuration at requested absolute time. + // One can determine the time difference between predicted and actual + // readings by comparing ovrPoseState.TimeInSeconds. + ovrPoseStatef Predicted; + // Actual recorded pose configuration based on the sensor sample at a + // moment closest to the requested time. + ovrPoseStatef Recorded; + + // Sensor temperature reading, in degrees Celsius, as sample time. + float Temperature; + // Sensor status described by ovrStatusBits. + unsigned int StatusFlags; +} ovrSensorState; + +// For now. +// TBD: Decide if this becomes a part of HMDDesc +typedef struct ovrSensorDesc_ +{ + // HID Vendor and ProductId of the device. + short VendorId; + short ProductId; + // Sensor (and display) serial number. + char SerialNumber[24]; +} ovrSensorDesc; + + + +// Frame data reported by ovrHmd_BeginFrameTiming(). +typedef struct ovrFrameTiming_ +{ + // The amount of time that has passed since the previous frame returned + // BeginFrameSeconds value, usable for movement scaling. + // This will be clamped to no more than 0.1 seconds to prevent + // excessive movement after pauses for loading or initialization. + float DeltaSeconds; + + // It is generally expected that the following hold: + // ThisFrameSeconds < TimewarpPointSeconds < NextFrameSeconds < + // EyeScanoutSeconds[EyeOrder[0]] <= ScanoutMidpointSeconds <= EyeScanoutSeconds[EyeOrder[1]] + + // Absolute time value of when rendering of this frame began or is expected to + // begin; generally equal to NextFrameSeconds of the previous frame. Can be used + // for animation timing. + double ThisFrameSeconds; + // Absolute point when IMU expects to be sampled for this frame. + double TimewarpPointSeconds; + // Absolute time when frame Present + GPU Flush will finish, and the next frame starts. + double NextFrameSeconds; + + // Time when when half of the screen will be scanned out. Can be passes as a prediction + // value to ovrHmd_GetSensorState() go get general orientation. + double ScanoutMidpointSeconds; + // Timing points when each eye will be scanned out to display. Used for rendering each eye. + double EyeScanoutSeconds[2]; + +} ovrFrameTiming; + + + + +// Describes an eye for ovrHmd_Configure(). +// Configure will generate more complete ovrEyeRenderDesc based on this data. +// Users must fill in both render target TextureSize and a RenderViewport within it +// to specify the rectangle from which pre-distorted eye image will be taken. +// A different RenderViewport may be used during rendering by specifying either +// (a) calling ovrHmd_GetRenderScaleAndOffset with game-rendered api, +// or (b) passing different values in ovrTexture in case of SDK-rendered distortion. +typedef struct ovrEyeDesc_ +{ + ovrEyeType Eye; + ovrSizei TextureSize; // Absolute size of render texture. + ovrRecti RenderViewport; // Viewport within texture where eye rendering takes place. + // If specified as (0,0,0,0), it will be initialized to TextureSize. + ovrFovPort Fov; +} ovrEyeDesc; + +// Rendering information for each eye, computed by ovrHmd_Configure(). +typedef struct ovrEyeRenderDesc_ +{ + ovrEyeDesc Desc; + ovrRecti DistortedViewport; // Distortion viewport + ovrVector2f PixelsPerTanAngleAtCenter; // How many display pixels will fit in tan(angle) = 1. + ovrVector3f ViewAdjust; // Translation to be applied to view matrix. +} ovrEyeRenderDesc; + + +//----------------------------------------------------------------------------------- +// ***** Platform-independent Rendering Configuration + +// These types are used to hide platform-specific details when passing +// render device, OS and texture data to the APIs. +// +// The benefit of having these wrappers vs. platform-specific API functions is +// that they allow game glue code to be portable. A typical example is an +// engine that has multiple back ends, say GL and D3D. Portable code that calls +// these back ends may also use LibOVR. To do this, back ends can be modified +// to return portable types such as ovrTexture and ovrRenderAPIConfig. + +typedef enum +{ + ovrRenderAPI_None, + ovrRenderAPI_OpenGL, + ovrRenderAPI_Android_GLES, // May include extra native window pointers, etc. + ovrRenderAPI_D3D9, + ovrRenderAPI_D3D10, + ovrRenderAPI_D3D11, + ovrRenderAPI_Count +} ovrRenderAPIType; + +// Platform-independent part of rendering API-configuration data. +// It is a part of ovrRenderAPIConfig, passed to ovrHmd_Configure. +typedef struct ovrRenderAPIConfigHeader_ +{ + ovrRenderAPIType API; + ovrSizei RTSize; + int Multisample; +} ovrRenderAPIConfigHeader; + +typedef struct ovrRenderAPIConfig_ +{ + ovrRenderAPIConfigHeader Header; + uintptr_t PlatformData[8]; +} ovrRenderAPIConfig; + +// Platform-independent part of eye texture descriptor. +// It is a part of ovrTexture, passed to ovrHmd_EndFrame. +// - If RenderViewport is all zeros, will be used. +typedef struct ovrTextureHeader_ +{ + ovrRenderAPIType API; + ovrSizei TextureSize; + ovrRecti RenderViewport; // Pixel viewport in texture that holds eye image. +} ovrTextureHeader; + +typedef struct ovrTexture_ +{ + ovrTextureHeader Header; + uintptr_t PlatformData[8]; +} ovrTexture; + + +// ----------------------------------------------------------------------------------- +// ***** API Interfaces + +// Basic steps to use the API: +// +// Setup: +// 1. ovrInitialize(); +// 2. ovrHMD hmd = ovrHmd_Create(0); ovrHmd_GetDesc(hmd, &hmdDesc); +// 3. Use hmdDesc and ovrHmd_GetFovTextureSize() to determine graphics configuration. +// 4. Call ovrHmd_StartSensor() to configure and initialize tracking. +// 5. Call ovrHmd_ConfigureRendering() to setup graphics for SDK rendering, +// which is the preferred approach. +// Please refer to "Game-Side Rendering" below if you prefer to do that instead. +// 5. Allocate textures as needed. +// +// Game Loop: +// 6. Call ovrHmd_BeginFrame() to get frame timing and orientation information. +// 7. Render each eye in between ovrHmd_BeginEyeRender and ovrHmd_EndEyeRender calls, +// providing the result texture to the API. +// 8. Call ovrHmd_EndFrame() to render distorted textures to the back buffer +// and present them on the Hmd. +// +// Shutdown: +// 9. ovrHmd_Destroy(hmd) +// 10. ovr_Shutdown() +// + +#ifdef __cplusplus +extern "C" { +#endif + +// Library init/shutdown, must be called around all other OVR code. +// No other functions calls are allowed before ovr_Initialize succeeds or after ovr_Shutdown. +OVR_EXPORT ovrBool ovr_Initialize(); +OVR_EXPORT void ovr_Shutdown(); + + +// Detects or re-detects HMDs and reports the total number detected. +// Users can get information about each HMD by calling ovrHmd_Create with an index. +OVR_EXPORT int ovrHmd_Detect(); + + +// Creates a handle to an HMD and optionally fills in data about it. +// Index can [0 .. ovrHmd_Detect()-1]; index mappings can cange after each ovrHmd_Detect call. +// If not null, returned handle must be freed with ovrHmd_Destroy. +OVR_EXPORT ovrHmd ovrHmd_Create(int index); +OVR_EXPORT void ovrHmd_Destroy(ovrHmd hmd); + +// Creates a "fake" HMD used for debugging only. This is not tied to specific hardware, +// but may be used to debug some of the related rendering. +OVR_EXPORT ovrHmd ovrHmd_CreateDebug(ovrHmdType type); + + +// Returns last error for HMD state. Returns null for no error. +// String is valid until next call or GetLastError or HMD is destroyed. +// Pass null hmd to get global error (for create, etc). +OVR_EXPORT const char* ovrHmd_GetLastError(ovrHmd hmd); + + +//------------------------------------------------------------------------------------- +// ***** Sensor Interface + +// All sensor interface functions are thread-safe, allowing sensor state to be sampled +// from different threads. + +// Starts sensor sampling, enabling specified capabilities, described by ovrHmdCapBits. +// - supportedCaps specifies support that is requested. The function will succeed even if, +// if these caps are not available (i.e. sensor or camera is unplugged). Support will +// automatically be enabled if such device is plugged in later. Software should check +// ovrSensorState.StatusFlags for real-time status. +// - requiredCaps specify sensor capabilities required at the time of the call. If they +// are not available, the function will fail. Pass 0 if only specifying SupportedCaps. +OVR_EXPORT ovrBool ovrHmd_StartSensor(ovrHmd hmd, unsigned int supportedCaps, + unsigned int requiredCaps); +// Stops sensor sampling, shutting down internal resources. +OVR_EXPORT void ovrHmd_StopSensor(ovrHmd hmd); +// Resets sensor orientation. +OVR_EXPORT void ovrHmd_ResetSensor(ovrHmd hmd); + +// Returns sensor state reading based on the specified absolute system time. +// Pass absTime value of 0.0 to request the most recent sensor reading; in this case +// both PredictedPose and SamplePose will have the same value. +// ovrHmd_GetEyePredictedSensorState relies on this internally. +// This may also be used for more refined timing of FrontBuffer rendering logic, etc. +OVR_EXPORT ovrSensorState ovrHmd_GetSensorState(ovrHmd hmd, double absTime); + +// Returns information about a sensor. +// Only valid after StartSensor. +OVR_EXPORT ovrBool ovrHmd_GetSensorDesc(ovrHmd hmd, ovrSensorDesc* descOut); + + +//------------------------------------------------------------------------------------- +// ***** Graphics Setup + +// Fills in description about HMD; this is the same as filled in by ovrHmd_Create. +OVR_EXPORT void ovrHmd_GetDesc(ovrHmd hmd, ovrHmdDesc* desc); + +// Calculates texture size recommended for rendering one eye within HMD, given FOV cone. +// Higher FOV will generally require larger textures to maintain quality. +// - pixelsPerDisplayPixel specifies that number of render target pixels per display +// pixel at center of distortion; 1.0 is the default value. Lower values +// can improve performance. +OVR_EXPORT ovrSizei ovrHmd_GetFovTextureSize(ovrHmd hmd, ovrEyeType eye, ovrFovPort fov, + float pixelsPerDisplayPixel); + + + +//------------------------------------------------------------------------------------- +// ***** Rendering API Thread Safety + +// All of rendering APIs, inclusing Configure and frame functions are *NOT +// Thread Safe*. It is ok to use ConfigureRendering on one thread and handle +// frames on another thread, but explicit synchronization must be done since +// functions that depend on configured state are not reentrant. +// +// As an extra requirement, any of the following calls must be done on +// the render thread, which is the same thread that calls ovrHmd_BeginFrame +// or ovrHmd_BeginFrameTiming. +// - ovrHmd_EndFrame +// - ovrHmd_BeginEyeRender +// - ovrHmd_EndEyeRender +// - ovrHmd_GetFramePointTime +// - ovrHmd_GetEyePose +// - ovrHmd_GetEyeTimewarpMatrices + + +//------------------------------------------------------------------------------------- +// ***** SDK-Rendering Functions + +// These functions support rendering of distortion by the SDK through direct +// access to the underlying rendering HW, such as D3D or GL. +// This is the recommended approach, as it allows for better support or future +// Oculus hardware and a range of low-level optimizations. + + +// Configures rendering; fills in computed render parameters. +// This function can be called multiple times to change rendering settings. +// The users pass in two eye view descriptors that are used to +// generate complete rendering information for each eye in eyeRenderDescOut[2]. +// +// - apiConfig provides D3D/OpenGL specific parameters. Pass null +// to shutdown rendering and release all resources. +// - distortionCaps describe distortion settings that will be applied. +// +OVR_EXPORT ovrBool ovrHmd_ConfigureRendering( ovrHmd hmd, + const ovrRenderAPIConfig* apiConfig, + unsigned int hmdCaps, + unsigned int distortionCaps, + const ovrEyeDesc eyeDescIn[2], + ovrEyeRenderDesc eyeRenderDescOut[2] ); + + +// Begins a frame, returning timing and orientation information useful for simulation. +// This should be called in the beginning of game rendering loop (on render thread). +// This function relies on ovrHmd_BeginFrameTiming for some of its functionality. +// Pass 0 for frame index if not using GetFrameTiming. +OVR_EXPORT ovrFrameTiming ovrHmd_BeginFrame(ovrHmd hmd, unsigned int frameIndex); + +// Ends frame, rendering textures to frame buffer. This may perform distortion and scaling +// internally, assuming is it not delegated to another thread. +// Must be called on the same thread as BeginFrame. Calls ovrHmd_BeginEndTiming internally. +// *** This Function will to Present/SwapBuffers and potentially wait for GPU Sync ***. +OVR_EXPORT void ovrHmd_EndFrame(ovrHmd hmd); + + +// Marks beginning of eye rendering. Must be called on the same thread as BeginFrame. +// This function uses ovrHmd_GetEyePose to predict sensor state that should be +// used rendering the specified eye. +// This combines current absolute time with prediction that is appropriate for this HMD. +// It is ok to call ovrHmd_BeginEyeRender() on both eyes before calling ovrHmd_EndEyeRender. +// If rendering one eye at a time, it is best to render eye specified by +// HmdDesc.EyeRenderOrder[0] first. +OVR_EXPORT ovrPosef ovrHmd_BeginEyeRender(ovrHmd hmd, ovrEyeType eye); + +// Marks the end of eye rendering and submits eye texture for display after it is ready. +// Rendering viewport within the texture can change per frame if necessary. +// Specified texture may be presented immediately or wait till ovrHmd_EndFrame based +// on implementation. The API may performs distortion and scaling internally. +// 'renderPose' will typically be the value returned from ovrHmd_BeginEyeRender, but can +// be different if different pose was used for rendering. +OVR_EXPORT void ovrHmd_EndEyeRender(ovrHmd hmd, ovrEyeType eye, + ovrPosef renderPose, ovrTexture* eyeTexture); + + + +//------------------------------------------------------------------------------------- +// ***** Game-Side Rendering Functions + +// These functions provide distortion data and render timing support necessary to allow +// game rendering of distortion. Game-side rendering involves the following steps: +// +// 1. Setup ovrEyeDesc based on desired texture size and Fov. +// Call ovrHmd_GetRenderDesc to get the necessary rendering parameters for each eye. +// +// 2. Use ovrHmd_CreateDistortionMesh to generate distortion mesh. +// +// 3. Use ovrHmd_BeginFrameTiming, ovrHmd_GetEyePose and ovrHmd_BeginFrameTiming +// in the rendering loop to obtain timing and predicted view orientation for +// each eye. +// - If relying on timewarp, use ovr_WaitTillTime after rendering+flush, followed +// by ovrHmd_GetEyeTimewarpMatrices to obtain timewarp matrices used +// in distortion pixel shader to reduce latency. +// + +// Computes distortion viewport, view adjust and other rendering for the specified +// eye. This can be used instead of ovrHmd_ConfigureRendering to help setup rendering on +// the game side. +OVR_EXPORT ovrEyeRenderDesc ovrHmd_GetRenderDesc(ovrHmd hmd, ovrEyeDesc eyeDesc); + + +// Describes a vertex used for distortion; this is intended to be converted into +// the engine-specific format. +// Some fields may be unused based on ovrDistortionCaps selected. TexG and TexB, for example, +// are not used if chromatic correction is not requested. +typedef struct ovrDistortionVertex_ +{ + ovrVector2f Pos; + float TimeWarpFactor; // Lerp factor between time-warp matrices. Can be encoded in Pos.z. + float VignetteFactor; // Vignette fade factor. Can be encoded in Pos.w. + ovrVector2f TexR; + ovrVector2f TexG; + ovrVector2f TexB; +} ovrDistortionVertex; + +// Describes a full set of distortion mesh data, filled in by ovrHmd_CreateDistortionMesh. +// Contents of this data structure, if not null, should be freed by ovrHmd_DestroyDistortionMesh. +typedef struct ovrDistortionMesh_ +{ + ovrDistortionVertex* pVertexData; + unsigned short* pIndexData; + unsigned int VertexCount; + unsigned int IndexCount; +} ovrDistortionMesh; + +// Generate distortion mesh per eye. +// Distortion capabilities will depend on 'distortionCaps' flags; user should rely on +// appropriate shaders based on their settings. +// Distortion mesh data will be allocated and stored into the ovrDistortionMesh data structure, +// which should be explicitly freed with ovrHmd_DestroyDistortionMesh. +// uvScaleOffsetOut[] are filled in based on render target settings of eyeDesc. +// The function shouldn't fail unless theres is a configuration or memory error, in which case +// ovrDistortionMesh values will be set to null. +OVR_EXPORT ovrBool ovrHmd_CreateDistortionMesh( ovrHmd hmd, ovrEyeDesc eyeDesc, + unsigned int distortionCaps, + ovrVector2f uvScaleOffsetOut[2], + ovrDistortionMesh *meshData ); + +// Frees distortion mesh allocated by ovrHmd_GenerateDistortionMesh. meshData elements +// are set to null and zeroes after the call. +OVR_EXPORT void ovrHmd_DestroyDistortionMesh( ovrDistortionMesh* meshData ); + +// Computes updated 'uvScaleOffsetOut' to be used with a distortion if render target size or +// viewport changes after the fact. This can be used to adjust render size every frame, if desired. +OVR_EXPORT void ovrHmd_GetRenderScaleAndOffset( ovrHmd hmd, ovrEyeDesc eyeDesc, + unsigned int distortionCaps, + ovrVector2f uvScaleOffsetOut[2] ); + + +// Thread-safe timing function for the main thread. Caller should increment frameIndex +// with every frame and pass the index to RenderThread for processing. +OVR_EXPORT ovrFrameTiming ovrHmd_GetFrameTiming(ovrHmd hmd, unsigned int frameIndex); + +// Called at the beginning of the frame on the Render Thread. +// Pass frameIndex == 0 if ovrHmd_GetFrameTiming isn't being used. Otherwise, +// pass the same frame index as was used for GetFrameTiming on the main thread. +OVR_EXPORT ovrFrameTiming ovrHmd_BeginFrameTiming(ovrHmd hmd, unsigned int frameIndex); + +// Marks the end of game-rendered frame, tracking the necessary timing information. This +// function must be called immediately after Present/SwapBuffers + GPU sync. GPU sync is important +// before this call to reduce latency and ensure proper timing. +OVR_EXPORT void ovrHmd_EndFrameTiming(ovrHmd hmd); + +// Initializes and resets frame time tracking. This is typically not necessary, but +// is helpful if game changes vsync state or video mode. vsync is assumed to be on if this +// isn't called. Resets internal frame index to the specified number. +OVR_EXPORT void ovrHmd_ResetFrameTiming(ovrHmd hmd, unsigned int frameIndex, bool vsync); + + +// Predicts and returns Pose that should be used rendering the specified eye. +// Must be called between ovrHmd_BeginFrameTiming & ovrHmd_EndFrameTiming. +OVR_EXPORT ovrPosef ovrHmd_GetEyePose(ovrHmd hmd, ovrEyeType eye); + +// Computes timewarp matrices used by distortion mesh shader, these are used to adjust +// for orientation change since the last call to ovrHmd_GetEyePose for this eye. +// The ovrDistortionVertex::TimeWarpFactor is used to blend between the matrices, +// usually representing two different sides of the screen. +// Must be called on the same thread as ovrHmd_BeginFrameTiming. +OVR_EXPORT void ovrHmd_GetEyeTimewarpMatrices(ovrHmd hmd, ovrEyeType eye, + ovrPosef renderPose, ovrMatrix4f twmOut[2]); + + + +//------------------------------------------------------------------------------------- +// ***** Stateless math setup functions + +// Used to generate projection from ovrEyeDesc::Fov. +OVR_EXPORT ovrMatrix4f ovrMatrix4f_Projection( ovrFovPort fov, + float znear, float zfar, ovrBool rightHanded ); + +// Used for 2D rendering, Y is down +// orthoScale = 1.0f / pixelsPerTanAngleAtCenter +// orthoDistance = distance from camera, such as 0.8m +OVR_EXPORT ovrMatrix4f ovrMatrix4f_OrthoSubProjection(ovrMatrix4f projection, ovrVector2f orthoScale, + float orthoDistance, float eyeViewAdjustX); + +// Returns global, absolute high-resolution time in seconds. This is the same +// value as used in sensor messages. +OVR_EXPORT double ovr_GetTimeInSeconds(); + +// Waits until the specified absolute time. +OVR_EXPORT double ovr_WaitTillTime(double absTime); + + + +// ----------------------------------------------------------------------------------- +// ***** Latency Test interface + +// Does latency test processing and returns 'TRUE' if specified rgb color should +// be used to clear the screen. +OVR_EXPORT ovrBool ovrHmd_ProcessLatencyTest(ovrHmd hmd, unsigned char rgbColorOut[3]); + +// Returns non-null string once with latency test result, when it is available. +// Buffer is valid until next call. +OVR_EXPORT const char* ovrHmd_GetLatencyTestResult(ovrHmd hmd); + +// Returns latency for HMDs that support internal latency testing via the +// pixel-read back method (-1 for invalid or N/A) +OVR_EXPORT double ovrHmd_GetMeasuredLatencyTest2(ovrHmd hmd); + + +// ----------------------------------------------------------------------------------- +// ***** Property Access + +// NOTICE: This is experimental part of API that is likely to go away or change. + +// These allow accessing different properties of the HMD and profile. +// Some of the properties may go away with profile/HMD versions, so software should +// use defaults and/or proper fallbacks. +// + +// For now, access profile entries; this will change. +#if !defined(OVR_KEY_USER) + + #define OVR_KEY_USER "User" + #define OVR_KEY_NAME "Name" + #define OVR_KEY_GENDER "Gender" + #define OVR_KEY_PLAYER_HEIGHT "PlayerHeight" + #define OVR_KEY_EYE_HEIGHT "EyeHeight" + #define OVR_KEY_IPD "IPD" + #define OVR_KEY_NECK_TO_EYE_HORIZONTAL "NeckEyeHori" + #define OVR_KEY_NECK_TO_EYE_VERTICAL "NeckEyeVert" + + #define OVR_DEFAULT_GENDER "Male" + #define OVR_DEFAULT_PLAYER_HEIGHT 1.778f + #define OVR_DEFAULT_EYE_HEIGHT 1.675f + #define OVR_DEFAULT_IPD 0.064f + #define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL 0.12f + #define OVR_DEFAULT_NECK_TO_EYE_VERTICAL 0.12f +#endif + + +// Get float property. Returns first element if property is a float array. +// Returns defaultValue if property doesn't exist. +OVR_EXPORT float ovrHmd_GetFloat(ovrHmd hmd, const char* propertyName, float defaultVal); + +// Modify float property; false if property doesn't exist or is readonly. +OVR_EXPORT ovrBool ovrHmd_SetFloat(ovrHmd hmd, const char* propertyName, float value); + + +// Get float[] property. Returns the number of elements filled in, 0 if property doesn't exist. +// Maximum of arraySize elements will be written. +OVR_EXPORT unsigned int ovrHmd_GetFloatArray(ovrHmd hmd, const char* propertyName, + float values[], unsigned int arraySize); + +// Modify float[] property; false if property doesn't exist or is readonly. +OVR_EXPORT ovrBool ovrHmd_SetFloatArray(ovrHmd hmd, const char* propertyName, + float values[], unsigned int arraySize); + +// Get string property. Returns first element if property is a string array. +// Returns defaultValue if property doesn't exist. +// String memory is guaranteed to exist until next call to GetString or GetStringArray, or HMD is destroyed. +OVR_EXPORT const char* ovrHmd_GetString(ovrHmd hmd, const char* propertyName, + const char* defaultVal); + +// Returns array size of a property, 0 if property doesn't exist. +// Can be used to check existence of a property. +OVR_EXPORT unsigned int ovrHmd_GetArraySize(ovrHmd hmd, const char* propertyName); + + +#ifdef __cplusplus +} // extern "C" +#endif + + +#endif // OVR_CAPI_h diff --git a/LibOVR/Src/OVR_CAPI_D3D.h b/LibOVR/Src/OVR_CAPI_D3D.h new file mode 100644 index 0000000..75c383a --- /dev/null +++ b/LibOVR/Src/OVR_CAPI_D3D.h @@ -0,0 +1,156 @@ +/************************************************************************************ + +Filename : OVR_CAPI_D3D.h +Content : D3D specific structures used by the CAPI interface. +Created : November 7, 2013 +Authors : Michael Antonov + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ +#ifndef OVR_CAPI_D3D_h +#define OVR_CAPI_D3D_h + +#include "OVR_CAPI.h" + +#ifndef OVR_D3D_VERSION +#error Please define OVR_D3D_VERSION to 9 or 10 or 11 before including OVR_CAPI_D3D.h +#endif + + +#if defined(OVR_D3D_VERSION) && (OVR_D3D_VERSION == 11) + +//----------------------------------------------------------------------------------- +// ***** D3D11 Specific + +#include <d3d11.h> + +// Used to configure slave D3D rendering (i.e. for devices created externally). +struct ovrD3D11ConfigData +{ + // General device settings. + ovrRenderAPIConfigHeader Header; + ID3D11Device* pDevice; + ID3D11DeviceContext* pDeviceContext; + ID3D11RenderTargetView* pBackBufferRT; + IDXGISwapChain* pSwapChain; +}; + +union ovrD3D11Config +{ + ovrRenderAPIConfig Config; + ovrD3D11ConfigData D3D11; +}; + +// Used to pass D3D11 eye texture data to ovrHmd_EndFrame. +struct ovrD3D11TextureData +{ + // General device settings. + ovrTextureHeader Header; + ID3D11Texture2D* pTexture; + ID3D11ShaderResourceView* pSRView; +}; + +union ovrD3D11Texture +{ + ovrTexture Texture; + ovrD3D11TextureData D3D11; +}; + + + +#elif defined(OVR_D3D_VERSION) && (OVR_D3D_VERSION == 10) + +//----------------------------------------------------------------------------------- +// ***** D3D10 Specific + +// Used to configure slave D3D rendering (i.e. for devices created externally). +struct ovrD3D10ConfigData +{ + // General device settings. + ovrRenderAPIConfigHeader Header; + ID3D10Device* pDevice; + void* Unused; + ID3D10RenderTargetView* pBackBufferRT; + IDXGISwapChain* pSwapChain; +}; + +union ovrD3D10Config +{ + ovrRenderAPIConfig Config; + ovrD3D10ConfigData D3D10; +}; + +// Used to pass D3D10 eye texture data to ovrHmd_EndFrame. +struct ovrD3D10TextureData +{ + // General device settings. + ovrTextureHeader Header; + ID3D10Texture2D* pTexture; + ID3D10ShaderResourceView* pSRView; +}; + +union ovrD3D10Texture +{ + ovrTexture Texture; + ovrD3D10TextureData D3D10; +}; + +#elif defined(OVR_D3D_VERSION) && (OVR_D3D_VERSION == 9) + +//----------------------------------------------------------------------------------- +// ***** D3D9 Specific + +// Used to configure D3D9 rendering +struct ovrD3D9ConfigData +{ + // General device settings. + ovrRenderAPIConfigHeader Header; + + IDirect3DDevice9 * pDevice; + ///ID3D10RenderTargetView* pBackBufferRT; + ///IDXGISwapChain* pSwapChain; +}; + +union ovrD3D9Config +{ + ovrRenderAPIConfig Config; + ovrD3D9ConfigData D3D9; +}; + +// Used to pass D3D9 eye texture data to ovrHmd_EndFrame. +struct ovrD3D9TextureData +{ + // General device settings. + ovrTextureHeader Header; + IDirect3DTexture9 * pTexture; + ///ID3D10ShaderResourceView* pSRView; +}; + +union ovrD3D9Texture +{ + ovrTexture Texture; + ovrD3D9TextureData D3D9; +}; + + + +#endif + + +#endif // OVR_CAPI_h diff --git a/LibOVR/Src/OVR_CAPI_GL.h b/LibOVR/Src/OVR_CAPI_GL.h new file mode 100644 index 0000000..c042b5d --- /dev/null +++ b/LibOVR/Src/OVR_CAPI_GL.h @@ -0,0 +1,59 @@ +/************************************************************************************ + +Filename : OVR_CAPI_GL.h +Content : GL specific structures used by the CAPI interface. +Created : November 7, 2013 +Authors : Lee Cooper + +Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. + +Use of this software is subject to the terms of the Oculus Inc license +agreement provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +************************************************************************************/ +#ifndef OVR_CAPI_GL_h +#define OVR_CAPI_GL_h + +#include "OVR_CAPI.h" + +//----------------------------------------------------------------------------------- +// ***** GL Specific + +#if defined(OVR_OS_WIN32) +#include <GL/gl.h> +#include <GL/wglext.h> +#endif + + +// Used to configure slave GL rendering (i.e. for devices created externally). +typedef struct ovrGLConfigData_s +{ + // General device settings. + ovrRenderAPIConfigHeader Header; + HWND Window; + HGLRC WglContext; + HDC GdiDc; +} ovrGLConfigData; + +union ovrGLConfig +{ + ovrRenderAPIConfig Config; + ovrGLConfigData OGL; +}; + +// Used to pass GL eye texture data to ovrHmd_EndFrame. +typedef struct ovrGLTextureData_s +{ + // General device settings. + ovrTextureHeader Header; + GLuint TexId; +} ovrGLTextureData; + +typedef union ovrGLTexture_s +{ + ovrTexture Texture; + ovrGLTextureData OGL; +} ovrGLTexture; + +#endif // OVR_CAPI_GL_h diff --git a/LibOVR/Src/OVR_Common_HMDDevice.cpp b/LibOVR/Src/OVR_Common_HMDDevice.cpp new file mode 100644 index 0000000..b4ef177 --- /dev/null +++ b/LibOVR/Src/OVR_Common_HMDDevice.cpp @@ -0,0 +1,383 @@ +/************************************************************************************ + +Filename : OVR_Common_HMDDevice.cpp +Content : +Created : +Authors : + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +// Should be #included from the relevant OVR_YourPlatformHere_HMDDevice.cpp + +#include "Kernel/OVR_Alg.h" + +//------------------------------------------------------------------------------------- +// ***** HMDDeviceCreateDesc + +DeviceBase* HMDDeviceCreateDesc::NewDeviceInstance() +{ + return new HMDDevice(this); +} + +void HMDDeviceCreateDesc::SetScreenParameters(int x, int y, + int hres, int vres, + float hsize, float vsize, + float vCenterFromTopInMeters, float lensSeparationInMeters) +{ + Desktop.X = x; + Desktop.Y = y; + ResolutionInPixels = Sizei(hres, vres); + ScreenSizeInMeters = Sizef(hsize, vsize); + VCenterFromTopInMeters = vCenterFromTopInMeters; + LensSeparationInMeters = lensSeparationInMeters; + + Contents |= Contents_Screen; +} + + +void HMDDeviceCreateDesc::SetDistortion(const float* dks) +{ + for (int i = 0; i < 4; i++) + DistortionK[i] = dks[i]; + // TODO: add DistortionEqn + Contents |= Contents_Distortion; +} + +HmdTypeEnum HMDDeviceCreateDesc::GetHmdType() const +{ + // Determine the HMD model + // The closest thing we have to a dependable model indicator are the + // the screen characteristics. Additionally we can check the sensor + // (on attached devices) to further refine our guess + HmdTypeEnum hmdType = HmdType_Unknown; + + if ( ResolutionInPixels.w == 1280 ) + { + if ( ScreenSizeInMeters.w > 0.1497f && ScreenSizeInMeters.w < 0.1498f ) + hmdType = HmdType_DK1; + else + hmdType = HmdType_DKProto; + } + else if ( ResolutionInPixels.w == 1920 ) + { + // DKHD protoypes, all 1920x1080 + if ( ScreenSizeInMeters.w > 0.1209f && ScreenSizeInMeters.w < 0.1210f ) + { + // Screen size 0.12096 x 0.06804 + hmdType = HmdType_DKHDProto; + } + else if ( ScreenSizeInMeters.w > 0.1257f && ScreenSizeInMeters.w < 0.1258f ) + { + // Screen size 0.125 x 0.071 + // Could be a HmdType_DKHDProto566Mi, HmdType_CrystalCoveProto, or DK2 + // - most likely the latter. + hmdType = HmdType_DK2; + + // If available, check the sensor to determine exactly which variant this is + if (pDevice) + { + Ptr<SensorDevice> sensor = *((HMDDevice*)pDevice)->GetSensor(); + + SensorInfo sinfo; + if (sensor && sensor->GetDeviceInfo(&sinfo)) + { + if (sinfo.ProductId == 1) + { + hmdType = HmdType_DKHDProto566Mi; + } + else + { // Crystal Cove uses 0.# firmware, DK2 uses 1.# + int firm_major = Alg::DecodeBCD((sinfo.Version >> 8) & 0x00ff); + int firm_minor = Alg::DecodeBCD(sinfo.Version & 0xff); + OVR_UNUSED(firm_minor); + if (firm_major == 0) + hmdType = HmdType_CrystalCoveProto; + else + hmdType = HmdType_DK2; + } + } + } + } + else if (ScreenSizeInMeters.w > 0.1295f && ScreenSizeInMeters.w < 0.1297f) + { + // Screen size 0.1296 x 0.0729 + hmdType = HmdType_DKHD2Proto; + } + } + + OVR_ASSERT( hmdType != HmdType_Unknown ); + return hmdType; +} + +bool HMDDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const +{ + if ((info->InfoClassType != Device_HMD) && + (info->InfoClassType != Device_None)) + return false; + + HmdTypeEnum hmdType = GetHmdType(); + char const* deviceName = "Oculus HMD"; + switch (hmdType) + { + case HmdType_DKProto: deviceName = "Oculus Rift Prototype"; break; + case HmdType_DK1: deviceName = "Oculus Rift DK1"; break; + case HmdType_DKHDProto: deviceName = "Oculus Rift DKHD"; break; + case HmdType_DKHD2Proto: deviceName = "Oculus Rift DKHD2"; break; + case HmdType_DKHDProto566Mi: deviceName = "Oculus Rift DKHD 566 Mi"; break; + case HmdType_CrystalCoveProto: deviceName = "Oculus Rift Crystal Cove"; break; + case HmdType_DK2: deviceName = "Oculus Rift DK2"; break; + } + + info->ProductName = deviceName; + info->Manufacturer = "Oculus VR"; + info->Type = Device_HMD; + info->Version = 0; + + // Display detection. + if (info->InfoClassType == Device_HMD) + { + HMDInfo* hmdInfo = static_cast<HMDInfo*>(info); + + hmdInfo->HmdType = hmdType; + hmdInfo->DesktopX = Desktop.X; + hmdInfo->DesktopY = Desktop.Y; + hmdInfo->ResolutionInPixels = ResolutionInPixels; + hmdInfo->ScreenSizeInMeters = ScreenSizeInMeters; // Includes ScreenGapSizeInMeters + hmdInfo->ScreenGapSizeInMeters = 0.0f; + hmdInfo->CenterFromTopInMeters = VCenterFromTopInMeters; + hmdInfo->LensSeparationInMeters = LensSeparationInMeters; + // TODO: any other information we get from the hardware itself should be added to this list + + switch ( hmdInfo->HmdType ) + { + case HmdType_DKProto: + // WARNING - estimated. + hmdInfo->Shutter.Type = HmdShutter_RollingTopToBottom; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.000052f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.016580f; + hmdInfo->Shutter.PixelSettleTime = 0.015f; // estimated. + hmdInfo->Shutter.PixelPersistence = hmdInfo->Shutter.VsyncToNextVsync; // Full persistence + break; + case HmdType_DK1: + // Data from specs. + hmdInfo->Shutter.Type = HmdShutter_RollingTopToBottom; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.00018226f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.01620089f; + hmdInfo->Shutter.PixelSettleTime = 0.017f; // estimated. + hmdInfo->Shutter.PixelPersistence = hmdInfo->Shutter.VsyncToNextVsync; // Full persistence + break; + case HmdType_DKHDProto: + // Data from specs. + hmdInfo->Shutter.Type = HmdShutter_RollingRightToLeft; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.0000859f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.0164948f; + hmdInfo->Shutter.PixelSettleTime = 0.012f; // estimated. + hmdInfo->Shutter.PixelPersistence = hmdInfo->Shutter.VsyncToNextVsync; // Full persistence + break; + case HmdType_DKHD2Proto: + // Data from specs. + hmdInfo->Shutter.Type = HmdShutter_RollingRightToLeft; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.000052f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.016580f; + hmdInfo->Shutter.PixelSettleTime = 0.015f; // estimated. + hmdInfo->Shutter.PixelPersistence = hmdInfo->Shutter.VsyncToNextVsync; // Full persistence + break; + case HmdType_DKHDProto566Mi: +#if 0 + // Low-persistence global shutter + hmdInfo->Shutter.Type = HmdShutter_Global; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.0000273f + 0.0131033f; // Global shutter - first visible scan line is actually the last! + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.000f; // Global shutter - all visible at once. + hmdInfo->Shutter.PixelSettleTime = 0.0f; // <100us + hmdInfo->Shutter.PixelPersistence = 0.18f * hmdInfo->Shutter.VsyncToNextVsync; // Confgurable - currently set to 18% of total frame. +#else + // Low-persistence rolling shutter + hmdInfo->Shutter.Type = HmdShutter_RollingRightToLeft; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.0000273f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.0131033f; + hmdInfo->Shutter.PixelSettleTime = 0.0f; // <100us + hmdInfo->Shutter.PixelPersistence = 0.18f * hmdInfo->Shutter.VsyncToNextVsync; // Confgurable - currently set to 18% of total frame. +#endif + break; + case HmdType_CrystalCoveProto: + // Low-persistence rolling shutter + hmdInfo->Shutter.Type = HmdShutter_RollingRightToLeft; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.0000273f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.0131033f; + hmdInfo->Shutter.PixelSettleTime = 0.0f; // <100us + hmdInfo->Shutter.PixelPersistence = 0.18f * hmdInfo->Shutter.VsyncToNextVsync; // Confgurable - currently set to 18% of total frame. + break; + case HmdType_DK2: + // Low-persistence rolling shutter + hmdInfo->Shutter.Type = HmdShutter_RollingRightToLeft; + hmdInfo->Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + hmdInfo->Shutter.VsyncToFirstScanline = 0.0000273f; + hmdInfo->Shutter.FirstScanlineToLastScanline = 0.0131033f; + hmdInfo->Shutter.PixelSettleTime = 0.0f; // <100us + hmdInfo->Shutter.PixelPersistence = 0.18f * hmdInfo->Shutter.VsyncToNextVsync; // Confgurable - currently set to 18% of total frame. + break; + default: OVR_ASSERT ( false ); break; + } + + + OVR_strcpy(hmdInfo->DisplayDeviceName, sizeof(hmdInfo->DisplayDeviceName), + DisplayDeviceName.ToCStr()); +#if defined(OVR_OS_WIN32) + // Nothing special for Win32. +#elif defined(OVR_OS_MAC) + hmdInfo->DisplayId = DisplayId; +#elif defined(OVR_OS_LINUX) + hmdInfo->DisplayId = DisplayId; +#elif defined(OVR_OS_ANDROID) + hmdInfo->DisplayId = DisplayId; +#else +#error Unknown platform +#endif + + } + + return true; +} + + + + + +//------------------------------------------------------------------------------------- +// ***** HMDDevice + +HMDDevice::HMDDevice(HMDDeviceCreateDesc* createDesc) + : OVR::DeviceImpl<OVR::HMDDevice>(createDesc, 0) +{ +} +HMDDevice::~HMDDevice() +{ +} + +bool HMDDevice::Initialize(DeviceBase* parent) +{ + pParent = parent; + return true; +} +void HMDDevice::Shutdown() +{ + ProfileName.Clear(); + pCachedProfile.Clear(); + pParent.Clear(); +} + +Profile* HMDDevice::GetProfile() +{ + // Loads and returns a cached profile based on this device and current user + if (pCachedProfile == NULL) + { + ProfileManager* mgr = GetManager()->GetProfileManager(); + const char* profile_name = GetProfileName(); + if (profile_name && profile_name[0]) + pCachedProfile = *mgr->GetProfile(this, profile_name); + + if (pCachedProfile == NULL) + pCachedProfile = *mgr->GetDefaultProfile(this); + + } + return pCachedProfile.GetPtr(); +} + +const char* HMDDevice::GetProfileName() +{ + if (ProfileName.IsEmpty()) + { // If the profile name has not been initialized then + // retrieve the stored default user for this specific device + ProfileManager* mgr = GetManager()->GetProfileManager(); + const char* name = mgr->GetDefaultUser(this); + ProfileName = name; + } + + return ProfileName.ToCStr(); +} + +bool HMDDevice::SetProfileName(const char* name) +{ + if (ProfileName == name) + return true; // already set + + // Flush the old profile + pCachedProfile.Clear(); + if (!name) + { + ProfileName.Clear(); + return false; + } + + // Set the name and attempt to cache the profile + ProfileName = name; + if (GetProfile()) + { + return true; + } + else + { + ProfileName.Clear(); + return false; + } +} + +OVR::SensorDevice* HMDDevice::GetSensor() +{ + // Just return first sensor found since we have no way to match it yet. + + // Create DK2 sensor if it exists otherwise create first DK1 sensor. + SensorDevice* sensor = NULL; + + DeviceEnumerator<SensorDevice> enumerator = GetManager()->EnumerateDevices<SensorDevice>(); + + while(enumerator.GetType() != Device_None) + { + SensorInfo info; + enumerator.GetDeviceInfo(&info); + + if (info.ProductId == Device_Tracker2_ProductId) + { + sensor = enumerator.CreateDevice(); + break; + } + + enumerator.Next(); + } + + if (sensor == NULL) + { + sensor = GetManager()->EnumerateDevices<SensorDevice>().CreateDevice(); + } + + if (sensor) + { + sensor->SetCoordinateFrame(SensorDevice::Coord_HMD); + } + + return sensor; +} diff --git a/LibOVR/Src/OVR_Device.h b/LibOVR/Src/OVR_Device.h index d0a39d0..4ddc7a2 100644 --- a/LibOVR/Src/OVR_Device.h +++ b/LibOVR/Src/OVR_Device.h @@ -6,16 +6,16 @@ Content : Definition of HMD-related Device interfaces Created : September 21, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -37,6 +37,7 @@ limitations under the License. #include "Kernel/OVR_RefCount.h" #include "Kernel/OVR_String.h" + namespace OVR { // Declared externally @@ -79,7 +80,7 @@ public: virtual bool SupportsMessageType(MessageType) const { return true; } private: - UPInt Internal[4]; + UPInt Internal[8]; }; @@ -112,12 +113,14 @@ public: virtual DeviceBase* GetParent() const; virtual DeviceManager* GetManager() const; - virtual void SetMessageHandler(MessageHandler* handler); - virtual MessageHandler* GetMessageHandler() const; + virtual void AddMessageHandler(MessageHandler* handler); virtual DeviceType GetType() const; virtual bool GetDeviceInfo(DeviceInfo* info) const; + // Returns true if device is connected and usable + virtual bool IsConnected(); + // returns the MessageHandler's lock Lock* GetHandlerLock() const; protected: @@ -138,25 +141,23 @@ class DeviceInfo { public: DeviceInfo() : InfoClassType(Device_None), Type(Device_None), Version(0) - { ProductName[0] = Manufacturer[0] = 0; } - - enum { MaxNameLength = 32 }; + {} // Type of device for which DeviceInfo is intended. // This will be set to Device_HMD for HMDInfo structure, note that this may be // different form the actual device type since (Device_None) is valid. - const DeviceType InfoClassType; + const DeviceType InfoClassType; // Type of device this describes. This must be the same as InfoClassType when // InfoClassType != Device_None. - DeviceType Type; + DeviceType Type; // Name string describing the product: "Oculus Rift DK1", etc. - char ProductName[MaxNameLength]; - char Manufacturer[MaxNameLength]; - unsigned Version; + String ProductName; + String Manufacturer; + unsigned Version; protected: DeviceInfo(DeviceType type) : InfoClassType(type), Type(type), Version(0) - { ProductName[0] = Manufacturer[0] = 0; } + {} void operator = (const DeviceInfo&) { OVR_ASSERT(0); } // Assignment not allowed. }; @@ -318,35 +319,28 @@ protected: class HMDInfo : public DeviceInfo { public: - // Size of the entire screen, in pixels. - unsigned HResolution, VResolution; - // Physical dimensions of the active screen in meters. Can be used to calculate - // projection center while considering IPD. - float HScreenSize, VScreenSize; - // Physical offset from the top of the screen to the eye center, in meters. - // This will usually, but not necessarily be half of VScreenSize. - float VScreenCenter; - // Distance from the eye to screen surface, in meters. - // Useful for calculating FOV and projection. - float EyeToScreenDistance; - // Distance between physical lens centers useful for calculating distortion center. - float LensSeparationDistance; - // Configured distance between the user's eye centers, in meters. Defaults to 0.064. - float InterpupillaryDistance; - - // Radial distortion correction coefficients. - // The distortion assumes that the input texture coordinates will be scaled - // by the following equation: - // uvResult = uvInput * (K0 + K1 * uvLength^2 + K2 * uvLength^4) - // Where uvInput is the UV vector from the center of distortion in direction - // of the mapped pixel, uvLength is the magnitude of that vector, and uvResult - // the corresponding location after distortion. - float DistortionK[4]; - - float ChromaAbCorrection[4]; + // Characteristics of the HMD screen and enclosure + HmdTypeEnum HmdType; + Size<int> ResolutionInPixels; + Size<float> ScreenSizeInMeters; + float ScreenGapSizeInMeters; + float CenterFromTopInMeters; + float LensSeparationInMeters; + + // Timing & shutter data. All values in seconds. + struct ShutterInfo + { + HmdShutterTypeEnum Type; + float VsyncToNextVsync; // 1/framerate + float VsyncToFirstScanline; // for global shutter, vsync->shutter open. + float FirstScanlineToLastScanline; // for global shutter, will be zero. + float PixelSettleTime; // estimated. + float PixelPersistence; // Full persistence = 1/framerate. + } Shutter; // Desktop coordinate position of the screen (can be negative; may not be present on all platforms) - int DesktopX, DesktopY; + int DesktopX; + int DesktopY; // Windows: // "\\\\.\\DISPLAY3", etc. Can be used in EnumDisplaySettings/CreateDC. @@ -356,43 +350,44 @@ public: long DisplayId; + // Constructor initializes all values to 0s. + // To create a "virtualized" HMDInfo, use CreateDebugHMDInfo instead. HMDInfo() - : DeviceInfo(Device_HMD), - HResolution(0), VResolution(0), HScreenSize(0), VScreenSize(0), - VScreenCenter(0), EyeToScreenDistance(0), - LensSeparationDistance(0), InterpupillaryDistance(0), - DesktopX(0), DesktopY(0), DisplayId(0) + : DeviceInfo(Device_HMD), + HmdType(HmdType_None), + ResolutionInPixels(0), + ScreenSizeInMeters(0.0f), + ScreenGapSizeInMeters(0.0f), + CenterFromTopInMeters(0), + LensSeparationInMeters(0), + DisplayId(0) { + DesktopX = 0; + DesktopY = 0; DisplayDeviceName[0] = 0; - memset(DistortionK, 0, sizeof(DistortionK)); - DistortionK[0] = 1; - ChromaAbCorrection[0] = ChromaAbCorrection[2] = 1; - ChromaAbCorrection[1] = ChromaAbCorrection[3] = 0; + Shutter.Type = HmdShutter_LAST; + Shutter.VsyncToNextVsync = 0.0f; + Shutter.VsyncToFirstScanline = 0.0f; + Shutter.FirstScanlineToLastScanline = 0.0f; + Shutter.PixelSettleTime = 0.0f; + Shutter.PixelPersistence = 0.0f; } // Operator = copies local fields only (base class must be correct already) void operator = (const HMDInfo& src) { - HResolution = src.HResolution; - VResolution = src.VResolution; - HScreenSize = src.HScreenSize; - VScreenSize = src.VScreenSize; - VScreenCenter = src.VScreenCenter; - EyeToScreenDistance = src.EyeToScreenDistance; - LensSeparationDistance = src.LensSeparationDistance; - InterpupillaryDistance = src.InterpupillaryDistance; - DistortionK[0] = src.DistortionK[0]; - DistortionK[1] = src.DistortionK[1]; - DistortionK[2] = src.DistortionK[2]; - DistortionK[3] = src.DistortionK[3]; - ChromaAbCorrection[0] = src.ChromaAbCorrection[0]; - ChromaAbCorrection[1] = src.ChromaAbCorrection[1]; - ChromaAbCorrection[2] = src.ChromaAbCorrection[2]; - ChromaAbCorrection[3] = src.ChromaAbCorrection[3]; - DesktopX = src.DesktopX; - DesktopY = src.DesktopY; + HmdType = src.HmdType; + ResolutionInPixels = src.ResolutionInPixels; + ScreenSizeInMeters = src.ScreenSizeInMeters; + ScreenGapSizeInMeters = src.ScreenGapSizeInMeters; + CenterFromTopInMeters = src.CenterFromTopInMeters; + LensSeparationInMeters = src.LensSeparationInMeters; + DesktopX = src.DesktopX; + DesktopY = src.DesktopY; + Shutter = src.Shutter; memcpy(DisplayDeviceName, src.DisplayDeviceName, sizeof(DisplayDeviceName)); - DisplayId = src.DisplayId; + + DisplayId = src.DisplayId; } bool IsSameDisplay(const HMDInfo& o) const @@ -431,10 +426,10 @@ public: // Requests the currently used profile. This profile affects the // settings reported by HMDInfo. - virtual Profile* GetProfile() const = 0; + virtual Profile* GetProfile() = 0; // Obtains the currently used profile name. This is initialized to the default // profile name, if any; it can then be changed per-device by SetProfileName. - virtual const char* GetProfileName() const = 0; + virtual const char* GetProfileName() = 0; // Sets the profile user name, changing the data returned by GetProfileInfo. virtual bool SetProfileName(const char* name) = 0; @@ -483,7 +478,6 @@ class SensorInfo : public DeviceInfo public: SensorInfo() : DeviceInfo(Device_Sensor), VendorId(0), ProductId(0) { - SerialNumber[0] = 0; } // HID Vendor and ProductId of the device. @@ -492,12 +486,399 @@ public: // MaxRanges report maximum sensor range values supported by HW. SensorRange MaxRanges; // Sensor (and display) serial number. - char SerialNumber[20]; + String SerialNumber; private: void operator = (const SensorInfo&) { OVR_ASSERT(0); } // Assignment not allowed. }; +// Tracking settings (DK2). +struct TrackingReport +{ + TrackingReport() + : CommandId(0), Pattern(0), + Enable(0), Autoincrement(0), UseCarrier(0), + SyncInput(0), VsyncLock(0), CustomPattern(0), + ExposureLength(0), FrameInterval(0), + VsyncOffset(0), DutyCycle(0) + {} + + TrackingReport( UInt16 commandId, + UByte pattern, + bool enable, + bool autoincrement, + bool useCarrier, + bool syncInput, + bool vsyncLock, + bool customPattern, + UInt16 exposureLength, + UInt16 frameInterval, + UInt16 vsyncOffset, + UByte dutyCycle) + : CommandId(commandId), Pattern(pattern), + Enable(enable), Autoincrement(autoincrement), UseCarrier(useCarrier), + SyncInput(syncInput), VsyncLock(vsyncLock), CustomPattern(customPattern), + ExposureLength(exposureLength), FrameInterval(frameInterval), + VsyncOffset(vsyncOffset), DutyCycle(dutyCycle) + { } + + UInt16 CommandId; + UByte Pattern; // Tracking LED pattern index. + bool Enable; // Enables the tracking LED exposure and updating. + bool Autoincrement; // Autoincrement pattern after each exposure. + bool UseCarrier; // Modulate tracking LEDs at 85kHz. + bool SyncInput; // Trigger LED exposure from wired sync signal. + bool VsyncLock; // Trigger LED exposure from panel Vsync. + bool CustomPattern; // Use custom LED sequence. + UInt16 ExposureLength; // Tracking LED illumination (and exposure) length in microseconds. + UInt16 FrameInterval; // LED exposure interval in microseconds when in + // 'internal timer' mode (when SyncInput = VsyncLock = false). + UInt16 VsyncOffset; // Exposure offset in microseconds from vsync when in + // 'vsync lock' mode (when VsyncLock = true). + UByte DutyCycle; // Duty cycle of 85kHz modulation when in 'use carrier' mode + // (when UseCarrier = true). 128 = 50% duty cycle. +}; + +// Display settings (DK2). +struct DisplayReport +{ + enum ShutterTypeEnum + { + // These are not yet defined. + ShutterType_Default = 0, + }; + + enum CurrentLimitEnum + { + // These are not yet defined. + CurrentLimit_Default = 0, + }; + + DisplayReport() + : CommandId(0), Brightness(0), + ShutterType(ShutterType_Default), CurrentLimit(CurrentLimit_Default), UseRolling(0), + ReverseRolling(0), HighBrightness(0), SelfRefresh(0), + ReadPixel(0), DirectPentile(0), + Persistence(0), LightingOffset(0), + PixelSettle(0), TotalRows(0) + {} + + DisplayReport( UInt16 commandId, + UByte brightness, + ShutterTypeEnum shutterType, + CurrentLimitEnum currentLimit, + bool useRolling, + bool reverseRolling, + bool highBrightness, + bool selfRefresh, + bool readPixel, + bool directPentile, + UInt16 persistence, + UInt16 lightingOffset, + UInt16 pixelSettle, + UInt16 totalRows) + : CommandId(commandId), Brightness(brightness), + ShutterType(shutterType), CurrentLimit(currentLimit), UseRolling(useRolling), + ReverseRolling(reverseRolling), HighBrightness(highBrightness), SelfRefresh(selfRefresh), + ReadPixel(readPixel), DirectPentile(directPentile), + Persistence(persistence), LightingOffset(lightingOffset), + PixelSettle(pixelSettle), TotalRows(totalRows) + { } + + UInt16 CommandId; + UByte Brightness; // See 'DK2 Firmware Specification' document for a description of + ShutterTypeEnum ShutterType; // display settings. + CurrentLimitEnum CurrentLimit; + bool UseRolling; + bool ReverseRolling; + bool HighBrightness; + bool SelfRefresh; + bool ReadPixel; + bool DirectPentile; + UInt16 Persistence; + UInt16 LightingOffset; + UInt16 PixelSettle; + UInt16 TotalRows; +}; + +// MagCalibration matrix (DK2). +struct MagCalibrationReport +{ + MagCalibrationReport() + : CommandId(0), Version(0), Calibration() + {} + + MagCalibrationReport( UInt16 commandId, + UByte version, + const Matrix4f& calibration) + : CommandId(commandId), Version(version), Calibration(calibration) + { } + + UInt16 CommandId; + UByte Version; // Version of the calibration procedure used to generate the calibration matrix. + Matrix4f Calibration; // Calibration matrix. Note only the first three rows are used by the feature report. +}; + +// PositionCalibration values (DK2). +// - Sensor interface versions before 5 do not support Normal and Rotation. +struct PositionCalibrationReport +{ + enum PositionTypeEnum + { + PositionType_LED = 0, + PositionType_IMU = 1 + }; + + PositionCalibrationReport() + : CommandId(0), Version(0), + Position(0), Normal(0), Rotation(0), + PositionIndex(0), NumPositions(0), PositionType(PositionType_LED) + {} + + PositionCalibrationReport(UInt16 commandId, + UByte version, + const Vector3f& position, + const Vector3f& normal, + float rotation, + UInt16 positionIndex, + UInt16 numPositions, + PositionTypeEnum positionType) + : CommandId(commandId), Version(version), + Position(position), Normal(normal), Rotation(rotation), + PositionIndex(positionIndex), NumPositions(numPositions), PositionType(positionType) + { + } + + UInt16 CommandId; + UByte Version; // The version of the calibration procedure used to generate the stored positions. + Vector3d Position; // Position of the LED or inertial tracker in meters. This is relative to the + // center of the emitter plane of the display at nominal focus. + Vector3d Normal; // Normal of the LED or inertial tracker. This is a signed integer in + // meters. The normal is relative to the position. + double Rotation; // The rotation about the normal. This is in radians. + UInt16 PositionIndex; // The current position being read or written to. Autoincrements on reads, gets set + // to the written value on writes. + UInt16 NumPositions; // The read-only number of items with positions stored. The last position is that of + // the inertial tracker, all others are LED positions. + PositionTypeEnum PositionType; // The type of the item which has its position reported in the current report +}; + +// CustomPattern values (DK2). +struct CustomPatternReport +{ + CustomPatternReport() + : CommandId(0), SequenceLength(0), Sequence(0), + LEDIndex(0), NumLEDs(0) + {} + + CustomPatternReport(UInt16 commandId, + UByte sequenceLength, + UInt32 sequence, + UInt16 ledIndex, + UInt16 numLEDs) + : CommandId(commandId), SequenceLength(sequenceLength), Sequence(sequence), + LEDIndex(ledIndex), NumLEDs(numLEDs) + { } + + UInt16 CommandId; + UByte SequenceLength; // See 'DK2 Firmware Specification' document for a description of + UInt32 Sequence; // LED custom patterns. + UInt16 LEDIndex; + UInt16 NumLEDs; +}; + +// KeepAliveMux settings (DK2). +struct KeepAliveMuxReport +{ + KeepAliveMuxReport() + : CommandId(0), INReport(0), Interval(0) + {} + + KeepAliveMuxReport( UInt16 commandId, + UByte inReport, + UInt16 interval) + : CommandId(commandId), INReport(inReport), Interval(interval) + { } + + UInt16 CommandId; + UByte INReport; // Requested IN report type (1 = DK1, 11 = DK2). + UInt16 Interval; // Keep alive period in milliseconds. +}; + +// Manufacturing test result (DK2). +struct ManufacturingReport +{ + ManufacturingReport() + : CommandId(0), NumStages(0), Stage(0), + StageLocation(0), StageTime(0), Result(0), StageVersion(0) + {} + + ManufacturingReport( UInt16 commandId, + UByte numStages, + UByte stage, + UByte version, + UInt16 stageLocation, + UInt32 stageTime, + UInt32 result) + : CommandId(commandId), NumStages(numStages), Stage(stage), + StageLocation(stageLocation), StageTime(stageTime), Result(result), StageVersion(version) + { } + + UInt16 CommandId; + UByte NumStages; // See 'DK2 Firmware Specification' document for a description of + UByte Stage; // manufacturing test results. + UByte StageVersion; + UInt16 StageLocation; + UInt32 StageTime; + UInt32 Result; +}; + +// UUID (DK2). +struct UUIDReport +{ + static const int UUID_SIZE = 20; + + UUIDReport() + : CommandId(0) + { + memset(UUIDValue, 0, sizeof(UUIDValue)); + } + + UUIDReport( UInt16 commandId, + UByte uuid[UUID_SIZE]) + : CommandId(commandId) + { + for (int i=0; i<UUID_SIZE; i++) + { + UUIDValue[i] = uuid[i]; + } + } + + UInt16 CommandId; + UByte UUIDValue[UUID_SIZE]; // See 'DK2 Firmware Specification' document for + // a description of UUID. +}; + +// Lens Distortion (DK2). +struct LensDistortionReport +{ + LensDistortionReport() + : CommandId(0), + NumDistortions(0), + DistortionIndex(0), + Bitmask(0), + LensType(0), + Version(0), + EyeRelief(0), + MaxR(0), + MetersPerTanAngleAtCenter(0) + {} + + LensDistortionReport( UInt16 commandId, + UByte numDistortions, + UByte distortionIndex, + UByte bitmask, + UInt16 lensType, + UInt16 version, + UInt16 eyeRelief, + UInt16 kCoefficients[11], + UInt16 maxR, + UInt16 metersPerTanAngleAtCenter, + UInt16 chromaticAberration[4]) + : CommandId(commandId), + NumDistortions(numDistortions), + DistortionIndex(distortionIndex), + Bitmask(bitmask), + LensType(lensType), + Version(version), + EyeRelief(eyeRelief), + MaxR(maxR), + MetersPerTanAngleAtCenter(metersPerTanAngleAtCenter) + { + memcpy(KCoefficients, kCoefficients, sizeof(KCoefficients)); + memcpy(ChromaticAberration, chromaticAberration, sizeof(ChromaticAberration)); + } + + UInt16 CommandId; + UByte NumDistortions; + UByte DistortionIndex; + UByte Bitmask; + UInt16 LensType; + UInt16 Version; + UInt16 EyeRelief; + UInt16 KCoefficients[11]; + UInt16 MaxR; + UInt16 MetersPerTanAngleAtCenter; + UInt16 ChromaticAberration[4]; +}; + +// Temperature calibration result (DK2). +struct TemperatureReport +{ + TemperatureReport() + : CommandId(0), Version(0), + NumBins(0), Bin(0), NumSamples(0), Sample(0), + TargetTemperature(0), ActualTemperature(0), + Time(0), Offset(0) + {} + + TemperatureReport( UInt16 commandId, + UByte version, + UByte numBins, + UByte bin, + UByte numSamples, + UByte sample, + double targetTemperature, + double actualTemperature, + UInt32 time, + Vector3d offset) + : CommandId(commandId), Version(version), + NumBins(numBins), Bin(bin), NumSamples(numSamples), Sample(sample), + TargetTemperature(targetTemperature), ActualTemperature(actualTemperature), + Time(time), Offset(offset) + { } + + UInt16 CommandId; + UByte Version; // See 'DK2 Firmware Specification' document for a description of + UByte NumBins; // temperature calibration data. + UByte Bin; + UByte NumSamples; + UByte Sample; + double TargetTemperature; + double ActualTemperature; + UInt32 Time; // Better hope nobody tries to use this in 2038 + Vector3d Offset; +}; + +// Gyro autocalibration result (DK2). +struct GyroOffsetReport +{ + enum VersionEnum + { + // These are not yet defined. + Version_NoOffset = 0, + Version_ShortAvg = 1, + Version_LongAvg = 2 + }; + + GyroOffsetReport() + : CommandId(0), Version(Version_NoOffset), + Offset(0), Temperature(0) + {} + + GyroOffsetReport( UInt16 commandId, + VersionEnum version, + Vector3d offset, + double temperature) + : CommandId(commandId), Version(version), + Offset(offset), Temperature(temperature) + {} + + UInt16 CommandId; + VersionEnum Version; + Vector3d Offset; + double Temperature; +}; //------------------------------------------------------------------------------------- // ***** SensorDevice @@ -519,7 +900,9 @@ public: virtual DeviceType GetType() const { return Device_Sensor; } - + virtual UByte GetDeviceInterfaceVersion() = 0; + + // CoordinateFrame defines whether messages come in the coordinate frame // of the sensor device or HMD, which has a different internal sensor. // Sensors obtained form the HMD will automatically use HMD coordinates. @@ -542,7 +925,7 @@ public: // Returns currently set report rate, in Hz. If 0 - error occurred. // Note, this value may be different from the one provided for SetReportRate. The return // value will contain the actual rate. - virtual unsigned GetReportRate() const = 0; + virtual unsigned GetReportRate() const = 0; // Sets maximum range settings for the sensor described by SensorRange. // The function will fail if you try to pass values outside Maximum supported @@ -550,11 +933,52 @@ public: // Pass waitFlag == true to wait for command completion. For waitFlag == true, // returns true if the range was applied successfully (no HW error). // For waitFlag = false, return 'true' means that command was enqueued successfully. - virtual bool SetRange(const SensorRange& range, bool waitFlag = false) = 0; + virtual bool SetRange(const SensorRange& range, bool waitFlag = false) = 0; // Return the current sensor range settings for the device. These may not exactly // match the values applied through SetRange. - virtual void GetRange(SensorRange* range) const = 0; + virtual void GetRange(SensorRange* range) const = 0; + + // Return the factory calibration parameters for the IMU + virtual void GetFactoryCalibration(Vector3f* AccelOffset, Vector3f* GyroOffset, + Matrix4f* AccelMatrix, Matrix4f* GyroMatrix, + float* Temperature) = 0; + // Enable/disable onboard IMU calibration + // If set to false, the device will return raw values + virtual void SetOnboardCalibrationEnabled(bool enabled) = 0; + + // Get/set feature reports added to DK2. See 'DK2 Firmware Specification' document for details. + virtual bool SetTrackingReport(const TrackingReport&) { return false; } + virtual bool GetTrackingReport(TrackingReport*) { return false; } + + virtual bool SetDisplayReport(const DisplayReport&) { return false; } + virtual bool GetDisplayReport(DisplayReport*) { return false; } + + virtual bool SetMagCalibrationReport(const MagCalibrationReport&) { return false; } + virtual bool GetMagCalibrationReport(MagCalibrationReport*) { return false; } + + virtual bool SetPositionCalibrationReport(const PositionCalibrationReport&) { return false; } + virtual bool GetAllPositionCalibrationReports(Array<PositionCalibrationReport>*) { return false; } + + virtual bool SetCustomPatternReport(const CustomPatternReport&) { return false; } + virtual bool GetCustomPatternReport(CustomPatternReport*) { return false; } + + virtual bool SetKeepAliveMuxReport(const KeepAliveMuxReport&) { return false; } + virtual bool GetKeepAliveMuxReport(KeepAliveMuxReport*) { return false; } + + virtual bool SetManufacturingReport(const ManufacturingReport&) { return false; } + virtual bool GetManufacturingReport(ManufacturingReport*) { return false; } + + virtual bool SetUUIDReport(const UUIDReport&) { return false; } + virtual bool GetUUIDReport(UUIDReport*) { return false; } + + virtual bool SetTemperatureReport(const TemperatureReport&) { return false; } + virtual bool GetAllTemperatureReports(Array<Array<TemperatureReport> >*) { return false; } + + virtual bool GetGyroOffsetReport(GyroOffsetReport*) { return false; } + + virtual bool SetLensDistortionReport(const LensDistortionReport&) { return false; } + virtual bool GetLensDistortionReport(LensDistortionReport*) { return false; } }; //------------------------------------------------------------------------------------- @@ -627,4 +1051,7 @@ public: } // namespace OVR + + + #endif diff --git a/LibOVR/Src/OVR_DeviceConstants.h b/LibOVR/Src/OVR_DeviceConstants.h index d5c2418..6b40b7d 100644 --- a/LibOVR/Src/OVR_DeviceConstants.h +++ b/LibOVR/Src/OVR_DeviceConstants.h @@ -6,16 +6,16 @@ Content : Device constants Created : February 5, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -42,9 +42,101 @@ enum DeviceType Device_Sensor = 3, Device_LatencyTester = 4, Device_BootLoader = 5, + Device_Camera = 6, + Device_Display = 7, Device_All = 0xFF // Set for enumeration only, to enumerate all device types. }; + + +//------------------------------------------------------------------------------------- +// Different lens distortion types supported by devices. +// +enum DistortionEqnType +{ + Distortion_No_Override = -1, + // These two are leagcy and deprecated. + Distortion_Poly4 = 0, // scale = (K0 + K1*r^2 + K2*r^4 + K3*r^6) + Distortion_RecipPoly4 = 1, // scale = 1/(K0 + K1*r^2 + K2*r^4 + K3*r^6) + + // CatmullRom10 is the preferred distortion format. + Distortion_CatmullRom10 = 2, // scale = Catmull-Rom spline through points (1.0, K[1]...K[9]) + + Distortion_LAST // For ease of enumeration. +}; + + +//------------------------------------------------------------------------------------- +// HMD types. +// +enum HmdTypeEnum +{ + HmdType_None, + + HmdType_DKProto, // First duct-tape model, never sold. + HmdType_DK1, // DevKit1 - on sale to developers. + HmdType_DKHDProto, // DKHD - shown at various shows, never sold. + HmdType_DKHD2Proto, // DKHD2, 5.85-inch panel, never sold. + HmdType_DKHDProto566Mi, // DKHD, 5.66-inch panel, never sold. + HmdType_CrystalCoveProto, // Crystal Cove, 5.66-inch panel, shown at shows but never sold. + HmdType_DK2, + + // Reminder - this header file is public - codenames only! + + HmdType_Unknown, // Used for unnamed HW lab experiments. + + HmdType_LAST +}; + + +//------------------------------------------------------------------------------------- +// HMD shutter types. +// +enum HmdShutterTypeEnum +{ + HmdShutter_Global, + HmdShutter_RollingTopToBottom, + HmdShutter_RollingLeftToRight, + HmdShutter_RollingRightToLeft, + // TODO: + // color-sequential e.g. LCOS? + // alternate eyes? + // alternate columns? + // outside-in? + + HmdShutter_LAST +}; + + + +//------------------------------------------------------------------------------------- +// For headsets that use eye cups +// +enum EyeCupType +{ + // Public lenses + EyeCup_DK1A = 0, + EyeCup_DK1B = 1, + EyeCup_DK1C = 2, + + EyeCup_DK2A = 3, + + // Internal R&D codenames. + // Reminder - this header file is public - codenames only! + EyeCup_DKHD2A, + EyeCup_OrangeA, + EyeCup_RedA, + EyeCup_PinkA, + EyeCup_BlueA, + EyeCup_Delilah1A, + EyeCup_Delilah2A, + EyeCup_JamesA, + EyeCup_SunMandalaA, + + EyeCup_LAST +}; + + } // namespace OVR #endif diff --git a/LibOVR/Src/OVR_DeviceHandle.cpp b/LibOVR/Src/OVR_DeviceHandle.cpp index 4a98897..cf6f05f 100644 --- a/LibOVR/Src/OVR_DeviceHandle.cpp +++ b/LibOVR/Src/OVR_DeviceHandle.cpp @@ -5,16 +5,16 @@ Content : Implementation of device handle class Created : February 5, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_DeviceHandle.h b/LibOVR/Src/OVR_DeviceHandle.h index e811d23..dd3e92b 100644 --- a/LibOVR/Src/OVR_DeviceHandle.h +++ b/LibOVR/Src/OVR_DeviceHandle.h @@ -6,16 +6,16 @@ Content : Handle to a device that was enumerated Created : February 5, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_DeviceImpl.cpp b/LibOVR/Src/OVR_DeviceImpl.cpp index 140598f..5b77708 100644 --- a/LibOVR/Src/OVR_DeviceImpl.cpp +++ b/LibOVR/Src/OVR_DeviceImpl.cpp @@ -5,16 +5,16 @@ Content : Partial back-end independent implementation of Device interfaces Created : October 10, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -37,75 +37,6 @@ namespace OVR { //------------------------------------------------------------------------------------- -// ***** SharedLock - -// This is a general purpose globally shared Lock implementation that should probably be -// moved to Kernel. -// May in theory busy spin-wait if we hit contention on first lock creation, -// but this shouldn't matter in practice since Lock* should be cached. - - -enum { LockInitMarker = 0xFFFFFFFF }; - -Lock* SharedLock::GetLockAddRef() -{ - int oldUseCount; - - do { - oldUseCount = UseCount; - if (oldUseCount == LockInitMarker) - continue; - - if (oldUseCount == 0) - { - // Initialize marker - if (AtomicOps<int>::CompareAndSet_Sync(&UseCount, 0, LockInitMarker)) - { - Construct<Lock>(Buffer); - do { } - while (!AtomicOps<int>::CompareAndSet_Sync(&UseCount, LockInitMarker, 1)); - return toLock(); - } - continue; - } - - } while (!AtomicOps<int>::CompareAndSet_NoSync(&UseCount, oldUseCount, oldUseCount + 1)); - - return toLock(); -} - -void SharedLock::ReleaseLock(Lock* plock) -{ - OVR_UNUSED(plock); - OVR_ASSERT(plock == toLock()); - - int oldUseCount; - - do { - oldUseCount = UseCount; - OVR_ASSERT(oldUseCount != LockInitMarker); - - if (oldUseCount == 1) - { - // Initialize marker - if (AtomicOps<int>::CompareAndSet_Sync(&UseCount, 1, LockInitMarker)) - { - Destruct<Lock>(toLock()); - - do { } - while (!AtomicOps<int>::CompareAndSet_Sync(&UseCount, LockInitMarker, 0)); - - return; - } - continue; - } - - } while (!AtomicOps<int>::CompareAndSet_NoSync(&UseCount, oldUseCount, oldUseCount - 1)); -} - - - -//------------------------------------------------------------------------------------- // ***** MessageHandler // Threading notes: @@ -119,8 +50,13 @@ static SharedLock MessageHandlerSharedLock; class MessageHandlerImpl { public: + enum + { + MaxHandlerRefsCount = 4 + }; + MessageHandlerImpl() - : pLock(MessageHandlerSharedLock.GetLockAddRef()) + : pLock(MessageHandlerSharedLock.GetLockAddRef()), HandlerRefsCount(0) { } ~MessageHandlerImpl() @@ -136,14 +72,15 @@ public: // This lock is held while calling a handler and when we are applied/ // removed from a device. - Lock* pLock; - // List of device we are applied to. - List<MessageHandlerRef> UseList; + Lock* pLock; + // List of devices we are applied to. + int HandlerRefsCount; + MessageHandlerRef* pHandlerRefs[MaxHandlerRefsCount]; }; MessageHandlerRef::MessageHandlerRef(DeviceBase* device) - : pLock(MessageHandlerSharedLock.GetLockAddRef()), pDevice(device), pHandler(0) + : pLock(MessageHandlerSharedLock.GetLockAddRef()), pDevice(device), HandlersCount(0) { } @@ -151,41 +88,87 @@ MessageHandlerRef::~MessageHandlerRef() { { Lock::Locker lockScope(pLock); - if (pHandler) - { - pHandler = 0; - RemoveNode(); - } + + while (HandlersCount > 0) + removeHandler(0); } MessageHandlerSharedLock.ReleaseLock(pLock); pLock = 0; } -void MessageHandlerRef::SetHandler(MessageHandler* handler) +void MessageHandlerRef::Call(const Message& msg) +{ + Lock::Locker lockScope(pLock); + + for (int i = 0; i < HandlersCount; i++) + pHandlers[i]->OnMessage(msg); +} + +void MessageHandlerRef::AddHandler(MessageHandler* handler) { OVR_ASSERT(!handler || MessageHandlerImpl::FromHandler(handler)->pLock == pLock); Lock::Locker lockScope(pLock); - SetHandler_NTS(handler); + AddHandler_NTS(handler); +} + +void MessageHandlerRef::AddHandler_NTS(MessageHandler* handler) +{ + OVR_ASSERT(handler != NULL); + + OVR_ASSERT(HandlersCount < MaxHandlersCount); + for (int i = 0; i < HandlersCount; i++) + if (pHandlers[i] == handler) + // handler already installed - do nothing + return; + pHandlers[HandlersCount] = handler; + HandlersCount++; + + MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(handler); + OVR_ASSERT(handlerImpl->HandlerRefsCount < MessageHandlerImpl::MaxHandlerRefsCount); + handlerImpl->pHandlerRefs[handlerImpl->HandlerRefsCount] = this; + handlerImpl->HandlerRefsCount++; + + // TBD: Call notifier on device? } -void MessageHandlerRef::SetHandler_NTS(MessageHandler* handler) -{ - if (pHandler != handler) +bool MessageHandlerRef::RemoveHandler(MessageHandler* handler) +{ + Lock::Locker lockScope(pLock); + + for (int i = 0; i < HandlersCount; i++) { - if (pHandler) - RemoveNode(); - pHandler = handler; + if (pHandlers[i] == handler) + return removeHandler(i); + } + return false; +} - if (handler) +bool MessageHandlerRef::removeHandler(int idx) +{ + OVR_ASSERT(idx < HandlersCount); + + MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(pHandlers[idx]); + for (int i = 0; i < handlerImpl->HandlerRefsCount; i++) + if (handlerImpl->pHandlerRefs[i] == this) { - MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(handler); - handlerImpl->UseList.PushBack(this); + handlerImpl->pHandlerRefs[i] = handlerImpl->pHandlerRefs[handlerImpl->HandlerRefsCount - 1]; + handlerImpl->HandlerRefsCount--; + + pHandlers[idx] = pHandlers[HandlersCount - 1]; + HandlersCount--; + + return true; } - // TBD: Call notifier on device? - } -} + // couldn't find a link in the opposite direction, assert in Debug + OVR_ASSERT(0); + + pHandlers[idx] = pHandlers[HandlersCount - 1]; + HandlersCount--; + + return true; +} MessageHandler::MessageHandler() { @@ -198,7 +181,7 @@ MessageHandler::~MessageHandler() MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(this); { Lock::Locker lockedScope(handlerImpl->pLock); - OVR_ASSERT_LOG(handlerImpl->UseList.IsEmpty(), + OVR_ASSERT_LOG(handlerImpl->HandlerRefsCount == 0, ("~MessageHandler %p - Handler still active; call RemoveHandlerFromDevices", this)); } @@ -209,19 +192,19 @@ bool MessageHandler::IsHandlerInstalled() const { const MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(this); Lock::Locker lockedScope(handlerImpl->pLock); - return handlerImpl->UseList.IsEmpty() != true; -} + return handlerImpl->HandlerRefsCount > 0; +} void MessageHandler::RemoveHandlerFromDevices() { MessageHandlerImpl* handlerImpl = MessageHandlerImpl::FromHandler(this); Lock::Locker lockedScope(handlerImpl->pLock); - while(!handlerImpl->UseList.IsEmpty()) + while (handlerImpl->HandlerRefsCount > 0) { - MessageHandlerRef* use = handlerImpl->UseList.GetFirst(); - use->SetHandler_NTS(0); + MessageHandlerRef* use = handlerImpl->pHandlerRefs[0]; + use->RemoveHandler(this); } } @@ -255,13 +238,9 @@ DeviceManager* DeviceBase::GetManager() const return getDeviceCommon()->pCreateDesc->GetManagerImpl(); } -void DeviceBase::SetMessageHandler(MessageHandler* handler) -{ - getDeviceCommon()->HandlerRef.SetHandler(handler); -} -MessageHandler* DeviceBase::GetMessageHandler() const +void DeviceBase::AddMessageHandler(MessageHandler* handler) { - return getDeviceCommon()->HandlerRef.GetHandler(); + getDeviceCommon()->HandlerRef.AddHandler(handler); } DeviceType DeviceBase::GetType() const @@ -276,6 +255,12 @@ bool DeviceBase::GetDeviceInfo(DeviceInfo* info) const //return false; } +// Returns true if device is connected and usable +bool DeviceBase::IsConnected() +{ + return getDeviceCommon()->ConnectedFlag; +} + // returns the MessageHandler's lock Lock* DeviceBase::GetHandlerLock() const { diff --git a/LibOVR/Src/OVR_DeviceImpl.h b/LibOVR/Src/OVR_DeviceImpl.h index 7f85c78..80b227b 100644 --- a/LibOVR/Src/OVR_DeviceImpl.h +++ b/LibOVR/Src/OVR_DeviceImpl.h @@ -5,16 +5,16 @@ Content : Partial back-end independent implementation of Device interfaces Created : October 10, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -43,61 +43,44 @@ class DeviceFactory; enum { - Oculus_VendorId = 0x2833 -}; - -//------------------------------------------------------------------------------------- -// Globally shared Lock implementation used for MessageHandlers. - -class SharedLock -{ -public: - SharedLock() : UseCount(0) {} - - Lock* GetLockAddRef(); - void ReleaseLock(Lock* plock); - -private: - Lock* toLock() { return (Lock*)Buffer; } - - // UseCount and max alignment. - volatile int UseCount; - UInt64 Buffer[(sizeof(Lock)+sizeof(UInt64)-1)/sizeof(UInt64)]; + Oculus_VendorId = 0x2833, + Device_Tracker_ProductId = 0x0001, + Device_Tracker2_ProductId = 0x0021, + Device_KTracker_ProductId = 0x0010, }; // Wrapper for MessageHandler that includes synchronization logic. -// References to MessageHandlers are organized in a list to allow for them to -// easily removed with MessageHandler::RemoveAllHandlers. -class MessageHandlerRef : public ListNode<MessageHandlerRef> -{ +class MessageHandlerRef +{ + enum + { + MaxHandlersCount = 4 + }; public: MessageHandlerRef(DeviceBase* device); ~MessageHandlerRef(); - void SetHandler(MessageHandler* hander); - + bool HasHandlers() const { return HandlersCount > 0; }; + void AddHandler(MessageHandler* handler); + // returns false if the handler is not found + bool RemoveHandler(MessageHandler* handler); // Not-thread-safe version - void SetHandler_NTS(MessageHandler* hander); + void AddHandler_NTS(MessageHandler* handler); - void Call(const Message& msg) - { - Lock::Locker lockScope(pLock); - if (pHandler) - pHandler->OnMessage(msg); - } + void Call(const Message& msg); Lock* GetLock() const { return pLock; } - - // GetHandler() is not thread safe if used out of order across threads; nothing can be done - // about that. - MessageHandler* GetHandler() const { return pHandler; } DeviceBase* GetDevice() const { return pDevice; } private: Lock* pLock; // Cached global handler lock. DeviceBase* pDevice; - MessageHandler* pHandler; + + int HandlersCount; + MessageHandler* pHandlers[MaxHandlersCount]; + + bool removeHandler(int idx); }; @@ -221,12 +204,15 @@ public: AtomicInt<UInt32> RefCount; Ptr<DeviceCreateDesc> pCreateDesc; Ptr<DeviceBase> pParent; + volatile bool ConnectedFlag; MessageHandlerRef HandlerRef; DeviceCommon(DeviceCreateDesc* createDesc, DeviceBase* device, DeviceBase* parent) - : RefCount(1), pCreateDesc(createDesc), pParent(parent), HandlerRef(device) + : RefCount(1), pCreateDesc(createDesc), pParent(parent), + ConnectedFlag(true), HandlerRef(device) { } + virtual ~DeviceCommon() {} // Device reference counting delegates to Manager thread to actually kill devices. void DeviceAddRef(); diff --git a/LibOVR/Src/OVR_DeviceMessages.h b/LibOVR/Src/OVR_DeviceMessages.h index 6d525b3..0fe0a3c 100644 --- a/LibOVR/Src/OVR_DeviceMessages.h +++ b/LibOVR/Src/OVR_DeviceMessages.h @@ -6,16 +6,16 @@ Content : Definition of messages generated by devices Created : February 5, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -35,10 +35,12 @@ limitations under the License. #include "Kernel/OVR_Array.h" #include "Kernel/OVR_Color.h" + namespace OVR { class DeviceBase; class DeviceHandle; +class String; #define OVR_MESSAGETYPE(devName, msgIndex) ((Device_##devName << 8) | msgIndex) @@ -55,12 +57,17 @@ enum MessageType Message_DeviceRemoved = OVR_MESSAGETYPE(Manager, 1), // Existing device has been plugged/unplugged. // Sensor Messages Message_BodyFrame = OVR_MESSAGETYPE(Sensor, 0), // Emitted by sensor at regular intervals. + Message_ExposureFrame = OVR_MESSAGETYPE(Sensor, 1), + Message_PixelRead = OVR_MESSAGETYPE(Sensor, 2), + // Latency Tester Messages Message_LatencyTestSamples = OVR_MESSAGETYPE(LatencyTester, 0), Message_LatencyTestColorDetected = OVR_MESSAGETYPE(LatencyTester, 1), Message_LatencyTestStarted = OVR_MESSAGETYPE(LatencyTester, 2), Message_LatencyTestButton = OVR_MESSAGETYPE(LatencyTester, 3), - + + Message_CameraFrame = OVR_MESSAGETYPE(Camera, 0), + Message_CameraAdded = OVR_MESSAGETYPE(Camera, 1), }; //------------------------------------------------------------------------------------- @@ -89,19 +96,37 @@ public: // - Yaw is rotation around Y, positive for turning left. // - Pitch is rotation around X, positive for pitching up. +//------------------------------------------------------------------------------------- +// ***** Sensor + class MessageBodyFrame : public Message { public: MessageBodyFrame(DeviceBase* dev) - : Message(Message_BodyFrame, dev), Temperature(0.0f), TimeDelta(0.0f) + : Message(Message_BodyFrame, dev), Temperature(0.0f), TimeDelta(0.0f), MagCalibrated(false) { } Vector3f Acceleration; // Acceleration in m/s^2. - Vector3f RotationRate; // Angular velocity in rad/s^2. + Vector3f RotationRate; // Angular velocity in rad/s. Vector3f MagneticField; // Magnetic field strength in Gauss. float Temperature; // Temperature reading on sensor surface, in degrees Celsius. float TimeDelta; // Time passed since last Body Frame, in seconds. + + bool MagCalibrated; // True if MagneticField is calibrated, false if raw + + // The absolute time from the host computers perspective that the message should be + // interpreted as. This is based on incoming timestamp and processed by a filter + // that syncs the clocks while attempting to keep the distance between messages + // device clock matching. + // + // Integration should use TimeDelta, but prediction into the future should derive + // the delta time from PredictToSeconds - AbsoluteTimeSeconds. + // + // This value will generally be <= the return from a call to ovr_GetTimeInSeconds(), + // but could be greater by under 1 ms due to system time update interrupt delays. + // + double AbsoluteTimeSeconds; }; // Sent when we receive a device status changes (e.g.: @@ -109,10 +134,36 @@ public: class MessageDeviceStatus : public Message { public: - MessageDeviceStatus(MessageType type, DeviceBase* dev, const DeviceHandle &hdev) - : Message(type, dev), Handle(hdev) { } + MessageDeviceStatus(MessageType type, DeviceBase* dev, const DeviceHandle &hdev) + : Message(type, dev), Handle(hdev) { } - DeviceHandle Handle; + DeviceHandle Handle; +}; + +class MessageExposureFrame : public Message +{ +public: + MessageExposureFrame(DeviceBase* dev) + : Message(Message_ExposureFrame, dev), + CameraPattern(0), CameraFrameCount(0), CameraTimeSeconds(0) { } + + UByte CameraPattern; + UInt32 CameraFrameCount; + double CameraTimeSeconds; +}; + +class MessagePixelRead : public Message +{ +public: + MessagePixelRead(DeviceBase* dev) + : Message(Message_PixelRead, dev), + PixelReadValue(0), SensorTimeSeconds(0), FrameTimeSeconds(0) { } + + UByte PixelReadValue; + UInt32 RawSensorTime; + UInt32 RawFrameTime; + double SensorTimeSeconds; + double FrameTimeSeconds; }; //------------------------------------------------------------------------------------- @@ -167,6 +218,50 @@ public: }; +//------------------------------------------------------------------------------------- +// ***** Camera + +// Sent by camera, frame. +class MessageCameraFrame : public Message +{ +public: + MessageCameraFrame(DeviceBase* dev) + : Message(Message_CameraFrame, dev) + { + LostFrames = 0; + } + + void SetInfo(UInt32 frameNumber, double timeSeconds, UInt32 width, UInt32 height, UInt32 format) + { + FrameNumber = frameNumber; + ArrivalTimeSeconds = timeSeconds; + Width = width; + Height = height; + Format = format; + } + + void SetData(const UByte* pdata, UInt32 sizeInBytes) + { + pFrameData = pdata; + FrameSizeInBytes = sizeInBytes; + } + + UInt32 FrameNumber; // an index of the frame + double ArrivalTimeSeconds; // frame time in seconds, as recorded by the host computer + const UByte* pFrameData; // a ptr to frame data. + UInt32 FrameSizeInBytes; // size of the data in the pFrameData. + UInt32 Width, Height; // width & height in pixels. + UInt32 Format; // format of pixel, see CameraDevice::PixelFormat enum + UInt32 LostFrames; // number of lost frames before this frame +}; + +// Sent when a new camera is connected +class MessageCameraAdded : public Message +{ +public: + MessageCameraAdded(DeviceBase* dev) + : Message(Message_CameraAdded, dev) { } +}; } // namespace OVR diff --git a/LibOVR/Src/OVR_HIDDevice.h b/LibOVR/Src/OVR_HIDDevice.h index 7fc6fee..657758b 100644 --- a/LibOVR/Src/OVR_HIDDevice.h +++ b/LibOVR/Src/OVR_HIDDevice.h @@ -5,16 +5,16 @@ Content : Cross platform HID device interface. Created : February 22, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -129,8 +129,8 @@ public: virtual void OnInputReport(UByte* pData, UInt32 length) { OVR_UNUSED2(pData, length); } - virtual UInt64 OnTicks(UInt64 ticksMks) - { OVR_UNUSED1(ticksMks); return Timer::MksPerSecond * 1000; ; } + virtual double OnTicks(double tickSeconds) + { OVR_UNUSED1(tickSeconds); return 1000.0 ; } enum HIDDeviceMessageType { diff --git a/LibOVR/Src/OVR_HIDDeviceBase.h b/LibOVR/Src/OVR_HIDDeviceBase.h index 9d20dfc..7dfd6b4 100644 --- a/LibOVR/Src/OVR_HIDDeviceBase.h +++ b/LibOVR/Src/OVR_HIDDeviceBase.h @@ -6,16 +6,16 @@ Content : Definition of HID device interface. Created : March 11, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_HIDDeviceImpl.h b/LibOVR/Src/OVR_HIDDeviceImpl.h index 598adba..1399da6 100644 --- a/LibOVR/Src/OVR_HIDDeviceImpl.h +++ b/LibOVR/Src/OVR_HIDDeviceImpl.h @@ -5,16 +5,16 @@ Content : Implementation of HIDDevice. Created : March 7, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -66,26 +66,21 @@ public: MessageType handlerMessageType; switch (messageType) { case HIDDeviceMessage_DeviceAdded: - handlerMessageType = Message_DeviceAdded; + handlerMessageType = Message_DeviceAdded; + DeviceImpl<B>::ConnectedFlag = true; break; case HIDDeviceMessage_DeviceRemoved: - handlerMessageType = Message_DeviceRemoved; + handlerMessageType = Message_DeviceRemoved; + DeviceImpl<B>::ConnectedFlag = false; break; default: OVR_ASSERT(0); return; } // Do device notification. - { - Lock::Locker scopeLock(this->HandlerRef.GetLock()); - - if (this->HandlerRef.GetHandler()) - { - MessageDeviceStatus status(handlerMessageType, this, OVR::DeviceHandle(this->pCreateDesc)); - this->HandlerRef.GetHandler()->OnMessage(status); - } - } + MessageDeviceStatus status(handlerMessageType, this, OVR::DeviceHandle(this->pCreateDesc)); + this->HandlerRef.Call(status); // Do device manager notification. DeviceManagerImpl* manager = this->GetManagerImpl(); @@ -128,9 +123,6 @@ public: { InternalDevice->SetHandler(NULL); - // Remove the handler, if any. - this->HandlerRef.SetHandler(0); - DeviceImpl<B>::pParent.Clear(); } @@ -144,37 +136,21 @@ public: return DeviceImpl<B>::pCreateDesc->GetManagerImpl()->GetHIDDeviceManager(); } - - struct WriteData - { - enum { BufferSize = 64 }; - UByte Buffer[64]; - UPInt Size; - - WriteData(UByte* data, UPInt size) : Size(size) - { - OVR_ASSERT(size <= BufferSize); - memcpy(Buffer, data, size); - } - }; - bool SetFeatureReport(UByte* data, UInt32 length) { - WriteData writeData(data, length); - // Push call with wait. bool result = false; ThreadCommandQueue* pQueue = this->GetManagerImpl()->GetThreadQueue(); - if (!pQueue->PushCallAndWaitResult(this, &HIDDeviceImpl::setFeatureReport, &result, writeData)) + if (!pQueue->PushCallAndWaitResult(this, &HIDDeviceImpl::setFeatureReport, &result, data, length)) return false; return result; } - bool setFeatureReport(const WriteData& data) + bool setFeatureReport(UByte* data, UInt32 length) { - return InternalDevice->SetFeatureReport((UByte*) data.Buffer, (UInt32) data.Size); + return InternalDevice->SetFeatureReport(data, length); } bool GetFeatureReport(UByte* data, UInt32 length) @@ -193,6 +169,17 @@ public: return InternalDevice->GetFeatureReport(data, length); } + UByte GetDeviceInterfaceVersion() + { + UInt16 versionNumber = getHIDDesc()->VersionNumber; + + // Our interface and hardware versions are represented as two BCD digits each. + // Interface version is in the last two digits. + UByte interfaceVersion = (UByte) ((versionNumber & 0x000F) >> 0) * 1 + + ((versionNumber & 0x00F0) >> 4) * 10; + return interfaceVersion; + } + protected: HIDDevice* GetInternalDevice() const { diff --git a/LibOVR/Src/OVR_JSON.cpp b/LibOVR/Src/OVR_JSON.cpp index 0625f6d..209a41f 100644 --- a/LibOVR/Src/OVR_JSON.cpp +++ b/LibOVR/Src/OVR_JSON.cpp @@ -30,16 +30,16 @@ Notes : THE SOFTWARE. -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -949,6 +949,17 @@ void JSON::ReplaceItem(unsigned int index, JSON* new_item) } */ +// Removes and frees the last child item +void JSON::RemoveLast() +{ + JSON* child = Children.GetLast(); + if (!Children.IsNull(child)) + { + child->RemoveNode(); + child->Release(); + } +} + // Helper function to simplify creation of a typed object JSON* JSON::createHelper(JSONItemType itemType, double dval, const char* strVal) { @@ -973,6 +984,31 @@ void JSON::AddArrayElement(JSON *item) Children.PushBack(item); } +// Inserts an element into a valid array position +void JSON::InsertArrayElement(int index, JSON *item) +{ + if (!item) + return; + + if (index == 0) + { + Children.PushFront(item); + return; + } + + JSON* iter = Children.GetFirst(); + int i=0; + while (iter && i<index) + { + iter = Children.GetNext(iter); + i++; + } + + if (iter) + iter->InsertNodeBefore(item); + else + Children.PushBack(item); +} // Returns the size of an array int JSON::GetArraySize() @@ -1011,6 +1047,23 @@ const char* JSON::GetArrayString(int index) } } +JSON* JSON::Copy() +{ + JSON* copy = new JSON(Type); + copy->Name = Name; + copy->Value = Value; + copy->dValue = dValue; + + JSON* child = Children.GetFirst(); + while (!Children.IsNull(child)) + { + copy->Children.PushBack(child->Copy()); + child = Children.GetNext(child); + } + + return copy; +} + //----------------------------------------------------------------------------- // Loads and parses the given JSON file pathname and returns a JSON object tree. // The returned object must be Released after use. @@ -1051,7 +1104,7 @@ bool JSON::Save(const char* path) if (text) { SPInt len = OVR_strlen(text); - OVR_ASSERT(len < (SPInt)(int)len); + OVR_ASSERT(len <= (SPInt)(int)len); int bytes = f.Write((UByte*)text, (int)len); f.Close(); diff --git a/LibOVR/Src/OVR_JSON.h b/LibOVR/Src/OVR_JSON.h index ece84be..7a2e939 100644 --- a/LibOVR/Src/OVR_JSON.h +++ b/LibOVR/Src/OVR_JSON.h @@ -7,16 +7,16 @@ Created : April 9, 2013 Author : Brant Lewis Notes : -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -48,7 +48,6 @@ enum JSONItemType JSON_Object = 6 }; - //----------------------------------------------------------------------------- // ***** JSON @@ -90,7 +89,6 @@ public: // Saves a JSON object to a file. bool Save(const char* path); - // *** Object Member Access // These provide access to child items of the list. @@ -117,11 +115,13 @@ public: void AddStringItem(const char* name, const char* s) { AddItem(name, CreateString(s)); } // void ReplaceItem(unsigned index, JSON* new_item); // void DeleteItem(unsigned index); + void RemoveLast(); // *** Array Element Access // Add new elements to the end of array. void AddArrayElement(JSON *item); + void InsertArrayElement(int index, JSON* item); void AddArrayNumber(double n) { AddArrayElement(CreateNumber(n)); } void AddArrayString(const char* s) { AddArrayElement(CreateString(s)); } @@ -130,6 +130,7 @@ public: double GetArrayNumber(int index); const char* GetArrayString(int index); + JSON* Copy(); // Create a copy of this object protected: JSON(JSONItemType itemType = JSON_Object); diff --git a/LibOVR/Src/OVR_LatencyTestImpl.cpp b/LibOVR/Src/OVR_LatencyTestImpl.cpp index 209487c..9385a37 100644 --- a/LibOVR/Src/OVR_LatencyTestImpl.cpp +++ b/LibOVR/Src/OVR_LatencyTestImpl.cpp @@ -5,16 +5,16 @@ Content : Oculus Latency Tester device implementation. Created : March 7, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -25,9 +25,12 @@ limitations under the License. *************************************************************************************/ #include "OVR_LatencyTestImpl.h" +#include "Kernel/OVR_Alg.h" namespace OVR { +using namespace Alg; + //------------------------------------------------------------------------------------- // ***** Oculus Latency Tester specific packet data structures @@ -36,18 +39,6 @@ enum { LatencyTester_ProductId = 0x0101, }; -// Reported data is little-endian now -static UInt16 DecodeUInt16(const UByte* buffer) -{ - return (UInt16(buffer[1]) << 8) | UInt16(buffer[0]); -} - -/* Unreferenced -static SInt16 DecodeSInt16(const UByte* buffer) -{ - return (SInt16(buffer[1]) << 8) | SInt16(buffer[0]); -}*/ - static void UnpackSamples(const UByte* buffer, UByte* r, UByte* g, UByte* b) { *r = buffer[0]; @@ -355,8 +346,7 @@ struct LatencyTestStartTestImpl UInt16 commandID = 1; Buffer[0] = 8; - Buffer[1] = UByte(commandID & 0xFF); - Buffer[2] = UByte(commandID >> 8); + EncodeUInt16(Buffer+1, commandID); Buffer[3] = TargetColor.R; Buffer[4] = TargetColor.G; Buffer[5] = TargetColor.B; @@ -364,7 +354,7 @@ struct LatencyTestStartTestImpl void Unpack() { -// UInt16 commandID = Buffer[1] | (UInt16(Buffer[2]) << 8); +// UInt16 commandID = DecodeUInt16(Buffer+1); TargetColor.R = Buffer[3]; TargetColor.G = Buffer[4]; TargetColor.B = Buffer[5]; @@ -388,19 +378,13 @@ struct LatencyTestDisplayImpl { Buffer[0] = 9; Buffer[1] = Display.Mode; - Buffer[2] = UByte(Display.Value & 0xFF); - Buffer[3] = UByte((Display.Value >> 8) & 0xFF); - Buffer[4] = UByte((Display.Value >> 16) & 0xFF); - Buffer[5] = UByte((Display.Value >> 24) & 0xFF); + EncodeUInt32(Buffer+2, Display.Value); } void Unpack() { Display.Mode = Buffer[1]; - Display.Value = UInt32(Buffer[2]) | - (UInt32(Buffer[3]) << 8) | - (UInt32(Buffer[4]) << 16) | - (UInt32(Buffer[5]) << 24); + Display.Value = DecodeUInt32(Buffer+2); } }; @@ -471,17 +455,17 @@ bool LatencyTestDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const (info->InfoClassType != Device_None)) return false; - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, HIDDesc.Product.ToCStr()); - OVR_strcpy(info->Manufacturer, DeviceInfo::MaxNameLength, HIDDesc.Manufacturer.ToCStr()); - info->Type = Device_LatencyTester; + info->Type = Device_LatencyTester; + info->ProductName = HIDDesc.Product; + info->Manufacturer = HIDDesc.Manufacturer; + info->Version = HIDDesc.VersionNumber; if (info->InfoClassType == Device_LatencyTester) { SensorInfo* sinfo = (SensorInfo*)info; sinfo->VendorId = HIDDesc.VendorId; sinfo->ProductId = HIDDesc.ProductId; - sinfo->Version = HIDDesc.VersionNumber; - OVR_strcpy(sinfo->SerialNumber, sizeof(sinfo->SerialNumber),HIDDesc.SerialNumber.ToCStr()); + sinfo->SerialNumber = HIDDesc.SerialNumber; } return true; } @@ -712,7 +696,7 @@ void LatencyTestDeviceImpl::onLatencyTestSamplesMessage(LatencyTestSamplesMessag // Call OnMessage() within a lock to avoid conflicts with handlers. Lock::Locker scopeLock(HandlerRef.GetLock()); - if (HandlerRef.GetHandler()) + if (HandlerRef.HasHandlers()) { MessageLatencyTestSamples samples(this); for (UByte i = 0; i < s.SampleCount; i++) @@ -720,7 +704,7 @@ void LatencyTestDeviceImpl::onLatencyTestSamplesMessage(LatencyTestSamplesMessag samples.Samples.PushBack(Color(s.Samples[i].Value[0], s.Samples[i].Value[1], s.Samples[i].Value[2])); } - HandlerRef.GetHandler()->OnMessage(samples); + HandlerRef.Call(samples); } } @@ -734,14 +718,14 @@ void LatencyTestDeviceImpl::onLatencyTestColorDetectedMessage(LatencyTestColorDe // Call OnMessage() within a lock to avoid conflicts with handlers. Lock::Locker scopeLock(HandlerRef.GetLock()); - if (HandlerRef.GetHandler()) + if (HandlerRef.HasHandlers()) { MessageLatencyTestColorDetected detected(this); detected.Elapsed = s.Elapsed; detected.DetectedValue = Color(s.TriggerValue[0], s.TriggerValue[1], s.TriggerValue[2]); detected.TargetValue = Color(s.TargetValue[0], s.TargetValue[1], s.TargetValue[2]); - HandlerRef.GetHandler()->OnMessage(detected); + HandlerRef.Call(detected); } } @@ -755,12 +739,12 @@ void LatencyTestDeviceImpl::onLatencyTestStartedMessage(LatencyTestStartedMessag // Call OnMessage() within a lock to avoid conflicts with handlers. Lock::Locker scopeLock(HandlerRef.GetLock()); - if (HandlerRef.GetHandler()) + if (HandlerRef.HasHandlers()) { MessageLatencyTestStarted started(this); started.TargetValue = Color(ts.TargetValue[0], ts.TargetValue[1], ts.TargetValue[2]); - HandlerRef.GetHandler()->OnMessage(started); + HandlerRef.Call(started); } } @@ -774,11 +758,11 @@ void LatencyTestDeviceImpl::onLatencyTestButtonMessage(LatencyTestButtonMessage* // Call OnMessage() within a lock to avoid conflicts with handlers. Lock::Locker scopeLock(HandlerRef.GetLock()); - if (HandlerRef.GetHandler()) + if (HandlerRef.HasHandlers()) { MessageLatencyTestButton button(this); - HandlerRef.GetHandler()->OnMessage(button); + HandlerRef.Call(button); } } diff --git a/LibOVR/Src/OVR_LatencyTestImpl.h b/LibOVR/Src/OVR_LatencyTestImpl.h index bab0180..21ef331 100644 --- a/LibOVR/Src/OVR_LatencyTestImpl.h +++ b/LibOVR/Src/OVR_LatencyTestImpl.h @@ -5,16 +5,16 @@ Content : Latency Tester specific implementation. Created : March 7, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_Linux_DeviceManager.cpp b/LibOVR/Src/OVR_Linux_DeviceManager.cpp deleted file mode 100644 index 298534b..0000000 --- a/LibOVR/Src/OVR_Linux_DeviceManager.cpp +++ /dev/null @@ -1,330 +0,0 @@ -/************************************************************************************ - -Filename : OVR_Linux_DeviceManager.h -Content : Linux implementation of DeviceManager. -Created : -Authors : - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_Linux_DeviceManager.h" - -// Sensor & HMD Factories -#include "OVR_LatencyTestImpl.h" -#include "OVR_SensorImpl.h" -#include "OVR_Linux_HIDDevice.h" -#include "OVR_Linux_HMDDevice.h" - -#include "Kernel/OVR_Timer.h" -#include "Kernel/OVR_Std.h" -#include "Kernel/OVR_Log.h" - -namespace OVR { namespace Linux { - - -//------------------------------------------------------------------------------------- -// **** Linux::DeviceManager - -DeviceManager::DeviceManager() -{ -} - -DeviceManager::~DeviceManager() -{ -} - -bool DeviceManager::Initialize(DeviceBase*) -{ - if (!DeviceManagerImpl::Initialize(0)) - return false; - - pThread = *new DeviceManagerThread(); - if (!pThread || !pThread->Start()) - return false; - - // Wait for the thread to be fully up and running. - pThread->StartupEvent.Wait(); - - // Do this now that we know the thread's run loop. - HidDeviceManager = *HIDDeviceManager::CreateInternal(this); - - pCreateDesc->pDevice = this; - LogText("OVR::DeviceManager - initialized.\n"); - return true; -} - -void DeviceManager::Shutdown() -{ - LogText("OVR::DeviceManager - shutting down.\n"); - - // Set Manager shutdown marker variable; this prevents - // any existing DeviceHandle objects from accessing device. - pCreateDesc->pLock->pManager = 0; - - // Push for thread shutdown *WITH NO WAIT*. - // This will have the following effect: - // - Exit command will get enqueued, which will be executed later on the thread itself. - // - Beyond this point, this DeviceManager object may be deleted by our caller. - // - Other commands, such as CreateDevice, may execute before ExitCommand, but they will - // fail gracefully due to pLock->pManager == 0. Future commands can't be enqued - // after pManager is null. - // - Once ExitCommand executes, ThreadCommand::Run loop will exit and release the last - // reference to the thread object. - pThread->PushExitCommand(false); - pThread.Clear(); - - DeviceManagerImpl::Shutdown(); -} - -ThreadCommandQueue* DeviceManager::GetThreadQueue() -{ - return pThread; -} - -ThreadId DeviceManager::GetThreadId() const -{ - return pThread->GetThreadId(); -} - -bool DeviceManager::GetDeviceInfo(DeviceInfo* info) const -{ - if ((info->InfoClassType != Device_Manager) && - (info->InfoClassType != Device_None)) - return false; - - info->Type = Device_Manager; - info->Version = 0; - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, "DeviceManager"); - OVR_strcpy(info->Manufacturer,DeviceInfo::MaxNameLength, "Oculus VR, Inc."); - return true; -} - -DeviceEnumerator<> DeviceManager::EnumerateDevicesEx(const DeviceEnumerationArgs& args) -{ - // TBD: Can this be avoided in the future, once proper device notification is in place? - pThread->PushCall((DeviceManagerImpl*)this, - &DeviceManager::EnumerateAllFactoryDevices, true); - - return DeviceManagerImpl::EnumerateDevicesEx(args); -} - - -//------------------------------------------------------------------------------------- -// ***** DeviceManager Thread - -DeviceManagerThread::DeviceManagerThread() - : Thread(ThreadStackSize) -{ - int result = pipe(CommandFd); - OVR_ASSERT(!result); - - AddSelectFd(NULL, CommandFd[0]); -} - -DeviceManagerThread::~DeviceManagerThread() -{ - if (CommandFd[0]) - { - RemoveSelectFd(NULL, CommandFd[0]); - close(CommandFd[0]); - close(CommandFd[1]); - } -} - -bool DeviceManagerThread::AddSelectFd(Notifier* notify, int fd) -{ - struct pollfd pfd; - pfd.fd = fd; - pfd.events = POLLIN|POLLHUP|POLLERR; - pfd.revents = 0; - - FdNotifiers.PushBack(notify); - PollFds.PushBack(pfd); - - OVR_ASSERT(FdNotifiers.GetSize() == PollFds.GetSize()); - return true; -} - -bool DeviceManagerThread::RemoveSelectFd(Notifier* notify, int fd) -{ - // [0] is reserved for thread commands with notify of null, but we still - // can use this function to remove it. - for (UPInt i = 0; i < FdNotifiers.GetSize(); i++) - { - if ((FdNotifiers[i] == notify) && (PollFds[i].fd == fd)) - { - FdNotifiers.RemoveAt(i); - PollFds.RemoveAt(i); - return true; - } - } - return false; -} - - - -int DeviceManagerThread::Run() -{ - ThreadCommand::PopBuffer command; - - SetThreadName("OVR::DeviceManagerThread"); - LogText("OVR::DeviceManagerThread - running (ThreadId=%p).\n", GetThreadId()); - - // Signal to the parent thread that initialization has finished. - StartupEvent.SetEvent(); - - while(!IsExiting()) - { - // PopCommand will reset event on empty queue. - if (PopCommand(&command)) - { - command.Execute(); - } - else - { - bool commands = 0; - do - { - int waitMs = -1; - - // If devices have time-dependent logic registered, get the longest wait - // allowed based on current ticks. - if (!TicksNotifiers.IsEmpty()) - { - UInt64 ticksMks = Timer::GetTicks(); - int waitAllowed; - - for (UPInt j = 0; j < TicksNotifiers.GetSize(); j++) - { - waitAllowed = (int)(TicksNotifiers[j]->OnTicks(ticksMks) / Timer::MksPerMs); - if (waitAllowed < waitMs) - waitMs = waitAllowed; - } - } - - // wait until there is data available on one of the devices or the timeout expires - int n = poll(&PollFds[0], PollFds.GetSize(), waitMs); - - if (n > 0) - { - // Iterate backwards through the list so the ordering will not be - // affected if the called object gets removed during the callback - // Also, the HID data streams are located toward the back of the list - // and servicing them first will allow a disconnect to be handled - // and cleaned directly at the device first instead of the general HID monitor - for (int i=PollFds.GetSize()-1; i>=0; i--) - { - if (PollFds[i].revents & POLLERR) - { - OVR_DEBUG_LOG(("poll: error on [%d]: %d", i, PollFds[i].fd)); - } - else if (PollFds[i].revents & POLLIN) - { - if (FdNotifiers[i]) - FdNotifiers[i]->OnEvent(i, PollFds[i].fd); - else if (i == 0) // command - { - char dummy[128]; - read(PollFds[i].fd, dummy, 128); - commands = 1; - } - } - - if (PollFds[i].revents & POLLHUP) - PollFds[i].events = 0; - - if (PollFds[i].revents != 0) - { - n--; - if (n == 0) - break; - } - } - } - } while (PollFds.GetSize() > 0 && !commands); - } - } - - LogText("OVR::DeviceManagerThread - exiting (ThreadId=%p).\n", GetThreadId()); - return 0; -} - -bool DeviceManagerThread::AddTicksNotifier(Notifier* notify) -{ - TicksNotifiers.PushBack(notify); - return true; -} - -bool DeviceManagerThread::RemoveTicksNotifier(Notifier* notify) -{ - for (UPInt i = 0; i < TicksNotifiers.GetSize(); i++) - { - if (TicksNotifiers[i] == notify) - { - TicksNotifiers.RemoveAt(i); - return true; - } - } - return false; -} - -} // namespace Linux - - -//------------------------------------------------------------------------------------- -// ***** Creation - - -// Creates a new DeviceManager and initializes OVR. -DeviceManager* DeviceManager::Create() -{ - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "DeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<Linux::DeviceManager> manager = *new Linux::DeviceManager; - - if (manager) - { - if (manager->Initialize(0)) - { - manager->AddFactory(&LatencyTestDeviceFactory::Instance); - manager->AddFactory(&SensorDeviceFactory::Instance); - manager->AddFactory(&Linux::HMDDeviceFactory::Instance); - - manager->AddRef(); - } - else - { - manager.Clear(); - } - - } - - return manager.GetPtr(); -} - - -} // namespace OVR - diff --git a/LibOVR/Src/OVR_Linux_DeviceManager.h b/LibOVR/Src/OVR_Linux_DeviceManager.h deleted file mode 100644 index 101f871..0000000 --- a/LibOVR/Src/OVR_Linux_DeviceManager.h +++ /dev/null @@ -1,122 +0,0 @@ -/************************************************************************************ - -Filename : OVR_Linux_DeviceManager.h -Content : Linux-specific DeviceManager header. -Created : -Authors : - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_Linux_DeviceManager_h -#define OVR_Linux_DeviceManager_h - -#include "OVR_DeviceImpl.h" - -#include <unistd.h> -#include <sys/poll.h> - - -namespace OVR { namespace Linux { - -class DeviceManagerThread; - -//------------------------------------------------------------------------------------- -// ***** Linux DeviceManager - -class DeviceManager : public DeviceManagerImpl -{ -public: - DeviceManager(); - ~DeviceManager(); - - // Initialize/Shutdowncreate and shutdown manger thread. - virtual bool Initialize(DeviceBase* parent); - virtual void Shutdown(); - - virtual ThreadCommandQueue* GetThreadQueue(); - virtual ThreadId GetThreadId() const; - - virtual DeviceEnumerator<> EnumerateDevicesEx(const DeviceEnumerationArgs& args); - - virtual bool GetDeviceInfo(DeviceInfo* info) const; - - Ptr<DeviceManagerThread> pThread; -}; - -//------------------------------------------------------------------------------------- -// ***** Device Manager Background Thread - -class DeviceManagerThread : public Thread, public ThreadCommandQueue -{ - friend class DeviceManager; - enum { ThreadStackSize = 64 * 1024 }; -public: - DeviceManagerThread(); - ~DeviceManagerThread(); - - virtual int Run(); - - // ThreadCommandQueue notifications for CommandEvent handling. - virtual void OnPushNonEmpty_Locked() { write(CommandFd[1], this, 1); } - virtual void OnPopEmpty_Locked() { } - - class Notifier - { - public: - // Called when I/O is received - virtual void OnEvent(int i, int fd) = 0; - - // Called when timing ticks are updated. - // Returns the largest number of microseconds this function can - // wait till next call. - virtual UInt64 OnTicks(UInt64 ticksMks) - { - OVR_UNUSED1(ticksMks); - return Timer::MksPerSecond * 1000; - } - }; - - // Add I/O notifier - bool AddSelectFd(Notifier* notify, int fd); - bool RemoveSelectFd(Notifier* notify, int fd); - - // Add notifier that will be called at regular intervals. - bool AddTicksNotifier(Notifier* notify); - bool RemoveTicksNotifier(Notifier* notify); - -private: - - bool threadInitialized() { return CommandFd[0] != 0; } - - // pipe used to signal commands - int CommandFd[2]; - - Array<struct pollfd> PollFds; - Array<Notifier*> FdNotifiers; - - Event StartupEvent; - - // Ticks notifiers - used for time-dependent events such as keep-alive. - Array<Notifier*> TicksNotifiers; -}; - -}} // namespace Linux::OVR - -#endif // OVR_Linux_DeviceManager_h diff --git a/LibOVR/Src/OVR_Linux_HIDDevice.cpp b/LibOVR/Src/OVR_Linux_HIDDevice.cpp deleted file mode 100644 index ed4db0e..0000000 --- a/LibOVR/Src/OVR_Linux_HIDDevice.cpp +++ /dev/null @@ -1,815 +0,0 @@ -/************************************************************************************ -Filename : OVR_Linux_HIDDevice.cpp -Content : Linux HID device implementation. -Created : February 26, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_Linux_HIDDevice.h" - -#include <sys/ioctl.h> -#include <fcntl.h> -#include <errno.h> -#include <linux/hidraw.h> -#include "OVR_HIDDeviceImpl.h" - -namespace OVR { namespace Linux { - -static const UInt32 MAX_QUEUED_INPUT_REPORTS = 5; - -//------------------------------------------------------------------------------------- -// **** Linux::DeviceManager -//----------------------------------------------------------------------------- -HIDDeviceManager::HIDDeviceManager(DeviceManager* manager) : DevManager(manager) -{ - UdevInstance = NULL; - HIDMonitor = NULL; - HIDMonHandle = -1; -} - -//----------------------------------------------------------------------------- -HIDDeviceManager::~HIDDeviceManager() -{ -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::initializeManager() -{ - if (HIDMonitor) - { - return true; - } - - // Create a udev_monitor handle to watch for device changes (hot-plug detection) - HIDMonitor = udev_monitor_new_from_netlink(UdevInstance, "udev"); - if (HIDMonitor == NULL) - { - return false; - } - - udev_monitor_filter_add_match_subsystem_devtype(HIDMonitor, "hidraw", NULL); // filter for hidraw only - - int err = udev_monitor_enable_receiving(HIDMonitor); - if (err) - { - udev_monitor_unref(HIDMonitor); - HIDMonitor = NULL; - return false; - } - - // Get the file descriptor (fd) for the monitor. - HIDMonHandle = udev_monitor_get_fd(HIDMonitor); - if (HIDMonHandle < 0) - { - udev_monitor_unref(HIDMonitor); - HIDMonitor = NULL; - return false; - } - - // This file handle will be polled along-side with the device hid handles for changes - // Add the handle to the polling list - if (!DevManager->pThread->AddSelectFd(this, HIDMonHandle)) - { - close(HIDMonHandle); - HIDMonHandle = -1; - - udev_monitor_unref(HIDMonitor); - HIDMonitor = NULL; - return false; - } - - return true; -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::Initialize() -{ - // Get a udev library handle. This handle must stay active during the - // duration the lifetime of device monitoring handles - UdevInstance = udev_new(); - if (!UdevInstance) - return false; - - return initializeManager(); -} - -//----------------------------------------------------------------------------- -void HIDDeviceManager::Shutdown() -{ - OVR_ASSERT_LOG((UdevInstance), ("Should have called 'Initialize' before 'Shutdown'.")); - - if (HIDMonitor) - { - DevManager->pThread->RemoveSelectFd(this, HIDMonHandle); - close(HIDMonHandle); - HIDMonHandle = -1; - - udev_monitor_unref(HIDMonitor); - HIDMonitor = NULL; - } - - udev_unref(UdevInstance); // release the library - - LogText("OVR::Linux::HIDDeviceManager - shutting down.\n"); -} - -//------------------------------------------------------------------------------- -bool HIDDeviceManager::AddNotificationDevice(HIDDevice* device) -{ - NotificationDevices.PushBack(device); - return true; -} - -//------------------------------------------------------------------------------- -bool HIDDeviceManager::RemoveNotificationDevice(HIDDevice* device) -{ - for (UPInt i = 0; i < NotificationDevices.GetSize(); i++) - { - if (NotificationDevices[i] == device) - { - NotificationDevices.RemoveAt(i); - return true; - } - } - return false; -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::getIntProperty(udev_device* device, - const char* propertyName, - SInt32* pResult) -{ - const char* str = udev_device_get_sysattr_value(device, propertyName); - if (str) - { - *pResult = strtol(str, NULL, 16); - return true; - } - else - { - *pResult = 0; - return true; - } -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::initVendorProductVersion(udev_device* device, HIDDeviceDesc* pDevDesc) -{ - SInt32 result; - if (getIntProperty(device, "idVendor", &result)) - pDevDesc->VendorId = result; - else - return false; - - if (getIntProperty(device, "idProduct", &result)) - pDevDesc->ProductId = result; - else - return false; - - if (getIntProperty(device, "bcdDevice", &result)) - pDevDesc->VersionNumber = result; - else - return false; - - return true; -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::getStringProperty(udev_device* device, - const char* propertyName, - OVR::String* pResult) -{ - // Get the attribute in UTF8 - const char* str = udev_device_get_sysattr_value(device, propertyName); - if (str) - { // Copy the string into the return value - *pResult = String(str); - return true; - } - else - { - return false; - } -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::Enumerate(HIDEnumerateVisitor* enumVisitor) -{ - - if (!initializeManager()) - { - return false; - } - - // Get a list of hid devices - udev_enumerate* devices = udev_enumerate_new(UdevInstance); - udev_enumerate_add_match_subsystem(devices, "hidraw"); - udev_enumerate_scan_devices(devices); - - udev_list_entry* entry = udev_enumerate_get_list_entry(devices); - - // Search each device for the matching vid/pid - while (entry != NULL) - { - // Get the device file name - const char* sysfs_path = udev_list_entry_get_name(entry); - udev_device* hid; // The device's HID udev node. - hid = udev_device_new_from_syspath(UdevInstance, sysfs_path); - const char* dev_path = udev_device_get_devnode(hid); - - // Get the USB device - hid = udev_device_get_parent_with_subsystem_devtype(hid, "usb", "usb_device"); - if (hid) - { - HIDDeviceDesc devDesc; - - // Check the VID/PID for a match - if (dev_path && - initVendorProductVersion(hid, &devDesc) && - enumVisitor->MatchVendorProduct(devDesc.VendorId, devDesc.ProductId)) - { - devDesc.Path = dev_path; - getFullDesc(hid, &devDesc); - - // Look for the device to check if it is already opened. - Ptr<DeviceCreateDesc> existingDevice = DevManager->FindHIDDevice(devDesc, true); - // if device exists and it is opened then most likely the device open() - // will fail; therefore, we just set Enumerated to 'true' and continue. - if (existingDevice && existingDevice->pDevice) - { - existingDevice->Enumerated = true; - } - else - { // open the device temporarily for startup communication - int device_handle = open(dev_path, O_RDWR); - if (device_handle >= 0) - { - // Construct minimal device that the visitor callback can get feature reports from - Linux::HIDDevice device(this, device_handle); - enumVisitor->Visit(device, devDesc); - - close(device_handle); // close the file handle - } - } - } - - udev_device_unref(hid); - entry = udev_list_entry_get_next(entry); - } - } - - // Free the enumerator and udev objects - udev_enumerate_unref(devices); - - return true; -} - -//----------------------------------------------------------------------------- -OVR::HIDDevice* HIDDeviceManager::Open(const String& path) -{ - Ptr<Linux::HIDDevice> device = *new Linux::HIDDevice(this); - - if (device->HIDInitialize(path)) - { - device->AddRef(); - return device; - } - - return NULL; -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::getFullDesc(udev_device* device, HIDDeviceDesc* desc) -{ - - if (!initVendorProductVersion(device, desc)) - { - return false; - } - - if (!getStringProperty(device, "serial", &(desc->SerialNumber))) - { - return false; - } - - getStringProperty(device, "manufacturer", &(desc->Manufacturer)); - getStringProperty(device, "product", &(desc->Product)); - - return true; -} - -//----------------------------------------------------------------------------- -bool HIDDeviceManager::GetDescriptorFromPath(const char* dev_path, HIDDeviceDesc* desc) -{ - if (!initializeManager()) - { - return false; - } - - // Search for the udev device from the given pathname so we can - // have a handle to query device properties - - udev_enumerate* devices = udev_enumerate_new(UdevInstance); - udev_enumerate_add_match_subsystem(devices, "hidraw"); - udev_enumerate_scan_devices(devices); - - udev_list_entry* entry = udev_enumerate_get_list_entry(devices); - - bool success = false; - // Search for the device with the matching path - while (entry != NULL) - { - // Get the device file name - const char* sysfs_path = udev_list_entry_get_name(entry); - udev_device* hid; // The device's HID udev node. - hid = udev_device_new_from_syspath(UdevInstance, sysfs_path); - const char* path = udev_device_get_devnode(hid); - - if (OVR_strcmp(dev_path, path) == 0) - { // Found the device so lets collect the device descriptor - - // Get the USB device - hid = udev_device_get_parent_with_subsystem_devtype(hid, "usb", "usb_device"); - if (hid) - { - desc->Path = dev_path; - success = getFullDesc(hid, desc); - } - - } - - udev_device_unref(hid); - entry = udev_list_entry_get_next(entry); - } - - // Free the enumerator - udev_enumerate_unref(devices); - - return success; -} - -//----------------------------------------------------------------------------- -void HIDDeviceManager::OnEvent(int i, int fd) -{ - // There is a device status change - udev_device* hid = udev_monitor_receive_device(HIDMonitor); - if (hid) - { - const char* dev_path = udev_device_get_devnode(hid); - const char* action = udev_device_get_action(hid); - - HIDDeviceDesc device_info; - device_info.Path = dev_path; - - MessageType notify_type; - if (OVR_strcmp(action, "add") == 0) - { - notify_type = Message_DeviceAdded; - - // Retrieve the device info. This can only be done on a connected - // device and is invalid for a disconnected device - - // Get the USB device - hid = udev_device_get_parent_with_subsystem_devtype(hid, "usb", "usb_device"); - if (!hid) - { - return; - } - - getFullDesc(hid, &device_info); - } - else if (OVR_strcmp(action, "remove") == 0) - { - notify_type = Message_DeviceRemoved; - } - else - { - return; - } - - bool error = false; - bool deviceFound = false; - for (UPInt i = 0; i < NotificationDevices.GetSize(); i++) - { - if (NotificationDevices[i] && - NotificationDevices[i]->OnDeviceNotification(notify_type, &device_info, &error)) - { - // The notification was for an existing device - deviceFound = true; - break; - } - } - - if (notify_type == Message_DeviceAdded && !deviceFound) - { - DevManager->DetectHIDDevice(device_info); - } - - udev_device_unref(hid); - } -} - -//============================================================================= -// Linux::HIDDevice -//============================================================================= -HIDDevice::HIDDevice(HIDDeviceManager* manager) - : HIDManager(manager), InMinimalMode(false) -{ - DeviceHandle = -1; -} - -//----------------------------------------------------------------------------- -// This is a minimal constructor used during enumeration for us to pass -// a HIDDevice to the visit function (so that it can query feature reports). -HIDDevice::HIDDevice(HIDDeviceManager* manager, int device_handle) -: HIDManager(manager), DeviceHandle(device_handle), InMinimalMode(true) -{ -} - -//----------------------------------------------------------------------------- -HIDDevice::~HIDDevice() -{ - if (!InMinimalMode) - { - HIDShutdown(); - } -} - -//----------------------------------------------------------------------------- -bool HIDDevice::HIDInitialize(const String& path) -{ - const char* hid_path = path.ToCStr(); - if (!openDevice(hid_path)) - { - LogText("OVR::Linux::HIDDevice - Failed to open HIDDevice: %s", hid_path); - return false; - } - - HIDManager->DevManager->pThread->AddTicksNotifier(this); - HIDManager->AddNotificationDevice(this); - - LogText("OVR::Linux::HIDDevice - Opened '%s'\n" - " Manufacturer:'%s' Product:'%s' Serial#:'%s'\n", - DevDesc.Path.ToCStr(), - DevDesc.Manufacturer.ToCStr(), DevDesc.Product.ToCStr(), - DevDesc.SerialNumber.ToCStr()); - - return true; -} - -//----------------------------------------------------------------------------- -bool HIDDevice::initInfo() -{ - // Device must have been successfully opened. - OVR_ASSERT(DeviceHandle >= 0); - - int desc_size = 0; - hidraw_report_descriptor rpt_desc; - memset(&rpt_desc, 0, sizeof(rpt_desc)); - - // get report descriptor size - int r = ioctl(DeviceHandle, HIDIOCGRDESCSIZE, &desc_size); - if (r < 0) - { - OVR_ASSERT_LOG(false, ("Failed to get report descriptor size.")); - return false; - } - - // Get the report descriptor - rpt_desc.size = desc_size; - r = ioctl(DeviceHandle, HIDIOCGRDESC, &rpt_desc); - if (r < 0) - { - OVR_ASSERT_LOG(false, ("Failed to get report descriptor.")); - return false; - } - - /* - // Get report lengths. - SInt32 bufferLength; - bool getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxInputReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - InputReportBufferLength = (UInt16) bufferLength; - - getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxOutputReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - OutputReportBufferLength = (UInt16) bufferLength; - - getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxFeatureReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - FeatureReportBufferLength = (UInt16) bufferLength; - - - if (ReadBufferSize < InputReportBufferLength) - { - OVR_ASSERT_LOG(false, ("Input report buffer length is bigger than read buffer.")); - return false; - } - - // Get device desc. - if (!HIDManager->getFullDesc(Device, &DevDesc)) - { - OVR_ASSERT_LOG(false, ("Failed to get device desc while initializing device.")); - return false; - } - - return true; - */ - - // Get report lengths. -// TODO: hard-coded for now. Need to interpret these values from the report descriptor - InputReportBufferLength = 62; - OutputReportBufferLength = 0; - FeatureReportBufferLength = 69; - - if (ReadBufferSize < InputReportBufferLength) - { - OVR_ASSERT_LOG(false, ("Input report buffer length is bigger than read buffer.")); - return false; - } - - return true; -} - -//----------------------------------------------------------------------------- -bool HIDDevice::openDevice(const char* device_path) -{ - // First fill out the device descriptor - if (!HIDManager->GetDescriptorFromPath(device_path, &DevDesc)) - { - return false; - } - - // Now open the device - DeviceHandle = open(device_path, O_RDWR); - if (DeviceHandle < 0) - { - OVR_DEBUG_LOG(("Failed 'CreateHIDFile' while opening device, error = 0x%X.", errno)); - DeviceHandle = -1; - return false; - } - - // fill out some values from the feature report descriptor - if (!initInfo()) - { - OVR_ASSERT_LOG(false, ("Failed to get HIDDevice info.")); - - close(DeviceHandle); - DeviceHandle = -1; - return false; - } - - // Add the device to the polling list - if (!HIDManager->DevManager->pThread->AddSelectFd(this, DeviceHandle)) - { - OVR_ASSERT_LOG(false, ("Failed to initialize polling for HIDDevice.")); - - close(DeviceHandle); - DeviceHandle = -1; - return false; - } - - return true; -} - -//----------------------------------------------------------------------------- -void HIDDevice::HIDShutdown() -{ - - HIDManager->DevManager->pThread->RemoveTicksNotifier(this); - HIDManager->RemoveNotificationDevice(this); - - if (DeviceHandle >= 0) // Device may already have been closed if unplugged. - { - closeDevice(false); - } - - LogText("OVR::Linux::HIDDevice - HIDShutdown '%s'\n", DevDesc.Path.ToCStr()); -} - -//----------------------------------------------------------------------------- -void HIDDevice::closeDevice(bool wasUnplugged) -{ - OVR_ASSERT(DeviceHandle >= 0); - - - HIDManager->DevManager->pThread->RemoveSelectFd(this, DeviceHandle); - - close(DeviceHandle); // close the file handle - DeviceHandle = -1; - - LogText("OVR::Linux::HIDDevice - HID Device Closed '%s'\n", DevDesc.Path.ToCStr()); -} - -//----------------------------------------------------------------------------- -void HIDDevice::closeDeviceOnIOError() -{ - LogText("OVR::Linux::HIDDevice - Lost connection to '%s'\n", DevDesc.Path.ToCStr()); - closeDevice(false); -} - -//----------------------------------------------------------------------------- -bool HIDDevice::SetFeatureReport(UByte* data, UInt32 length) -{ - - if (DeviceHandle < 0) - return false; - - UByte reportID = data[0]; - - if (reportID == 0) - { - // Not using reports so remove from data packet. - data++; - length--; - } - - int r = ioctl(DeviceHandle, HIDIOCSFEATURE(length), data); - return (r >= 0); -} - -//----------------------------------------------------------------------------- -bool HIDDevice::GetFeatureReport(UByte* data, UInt32 length) -{ - if (DeviceHandle < 0) - return false; - - int r = ioctl(DeviceHandle, HIDIOCGFEATURE(length), data); - return (r >= 0); -} - -//----------------------------------------------------------------------------- -UInt64 HIDDevice::OnTicks(UInt64 ticksMks) -{ - if (Handler) - { - return Handler->OnTicks(ticksMks); - } - - return DeviceManagerThread::Notifier::OnTicks(ticksMks); -} - -//----------------------------------------------------------------------------- -void HIDDevice::OnEvent(int i, int fd) -{ - // We have data to read from the device - int bytes = read(fd, ReadBuffer, ReadBufferSize); - if (bytes >= 0) - { -// TODO: I need to handle partial messages and package reconstruction - if (Handler) - { - Handler->OnInputReport(ReadBuffer, bytes); - } - } - else - { // Close the device on read error. - closeDeviceOnIOError(); - } -} - -//----------------------------------------------------------------------------- -bool HIDDevice::OnDeviceNotification(MessageType messageType, - HIDDeviceDesc* device_info, - bool* error) -{ - const char* device_path = device_info->Path.ToCStr(); - - if (messageType == Message_DeviceAdded && DeviceHandle < 0) - { - // Is this the correct device? - if (!(device_info->VendorId == DevDesc.VendorId - && device_info->ProductId == DevDesc.ProductId - && device_info->SerialNumber == DevDesc.SerialNumber)) - { - return false; - } - - // A closed device has been re-added. Try to reopen. - if (!openDevice(device_path)) - { - LogError("OVR::Linux::HIDDevice - Failed to reopen a device '%s' that was re-added.\n", - device_path); - *error = true; - return true; - } - - LogText("OVR::Linux::HIDDevice - Reopened device '%s'\n", device_path); - - if (Handler) - { - Handler->OnDeviceMessage(HIDHandler::HIDDeviceMessage_DeviceAdded); - } - } - else if (messageType == Message_DeviceRemoved) - { - // Is this the correct device? - // For disconnected device, the device description will be invalid so - // checking the path is the only way to match them - if (DevDesc.Path.CompareNoCase(device_path) != 0) - { - return false; - } - - if (DeviceHandle >= 0) - { - closeDevice(true); - } - - if (Handler) - { - Handler->OnDeviceMessage(HIDHandler::HIDDeviceMessage_DeviceRemoved); - } - } - else - { - OVR_ASSERT(0); - } - - *error = false; - return true; -} - -//----------------------------------------------------------------------------- -HIDDeviceManager* HIDDeviceManager::CreateInternal(Linux::DeviceManager* devManager) -{ - - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "HIDDeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<Linux::HIDDeviceManager> manager = *new Linux::HIDDeviceManager(devManager); - - if (manager) - { - if (manager->Initialize()) - { - manager->AddRef(); - } - else - { - manager.Clear(); - } - } - - return manager.GetPtr(); -} - -} // namespace Linux - -//------------------------------------------------------------------------------------- -// ***** Creation - -// Creates a new HIDDeviceManager and initializes OVR. -HIDDeviceManager* HIDDeviceManager::Create() -{ - OVR_ASSERT_LOG(false, ("Standalone mode not implemented yet.")); - - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "HIDDeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<Linux::HIDDeviceManager> manager = *new Linux::HIDDeviceManager(NULL); - - if (manager) - { - if (manager->Initialize()) - { - manager->AddRef(); - } - else - { - manager.Clear(); - } - } - - return manager.GetPtr(); -} - -} // namespace OVR diff --git a/LibOVR/Src/OVR_Linux_HIDDevice.h b/LibOVR/Src/OVR_Linux_HIDDevice.h deleted file mode 100644 index 0f4c7f0..0000000 --- a/LibOVR/Src/OVR_Linux_HIDDevice.h +++ /dev/null @@ -1,135 +0,0 @@ -/************************************************************************************ -Filename : OVR_Linux_HIDDevice.h -Content : Linux HID device implementation. -Created : June 13, 2013 -Authors : Brant Lewis - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_LINUX_HIDDevice_h -#define OVR_LINUX_HIDDevice_h - -#include "OVR_HIDDevice.h" -#include "OVR_Linux_DeviceManager.h" -#include <libudev.h> - -namespace OVR { namespace Linux { - -class HIDDeviceManager; - -//------------------------------------------------------------------------------------- -// ***** Linux HIDDevice - -class HIDDevice : public OVR::HIDDevice, public DeviceManagerThread::Notifier -{ -private: - friend class HIDDeviceManager; - -public: - HIDDevice(HIDDeviceManager* manager); - - // This is a minimal constructor used during enumeration for us to pass - // a HIDDevice to the visit function (so that it can query feature reports). - HIDDevice(HIDDeviceManager* manager, int device_handle); - - virtual ~HIDDevice(); - - bool HIDInitialize(const String& path); - void HIDShutdown(); - - virtual bool SetFeatureReport(UByte* data, UInt32 length); - virtual bool GetFeatureReport(UByte* data, UInt32 length); - - // DeviceManagerThread::Notifier - void OnEvent(int i, int fd); - UInt64 OnTicks(UInt64 ticksMks); - - bool OnDeviceNotification(MessageType messageType, - HIDDeviceDesc* device_info, - bool* error); - -private: - bool initInfo(); - bool openDevice(const char* dev_path); - void closeDevice(bool wasUnplugged); - void closeDeviceOnIOError(); - bool setupDevicePluggedInNotification(); - - bool InMinimalMode; - HIDDeviceManager* HIDManager; - int DeviceHandle; // file handle to the device - HIDDeviceDesc DevDesc; - - enum { ReadBufferSize = 96 }; - UByte ReadBuffer[ReadBufferSize]; - - UInt16 InputReportBufferLength; - UInt16 OutputReportBufferLength; - UInt16 FeatureReportBufferLength; -}; - - -//------------------------------------------------------------------------------------- -// ***** Linux HIDDeviceManager - -class HIDDeviceManager : public OVR::HIDDeviceManager, public DeviceManagerThread::Notifier -{ - friend class HIDDevice; - -public: - HIDDeviceManager(Linux::DeviceManager* Manager); - virtual ~HIDDeviceManager(); - - virtual bool Initialize(); - virtual void Shutdown(); - - virtual bool Enumerate(HIDEnumerateVisitor* enumVisitor); - virtual OVR::HIDDevice* Open(const String& path); - - static HIDDeviceManager* CreateInternal(DeviceManager* manager); - - void OnEvent(int i, int fd); - -private: - bool initializeManager(); - bool initVendorProductVersion(udev_device* device, HIDDeviceDesc* pDevDesc); - bool getPath(udev_device* device, String* pPath); - bool getIntProperty(udev_device* device, const char* key, int32_t* pResult); - bool getStringProperty(udev_device* device, - const char* propertyName, - OVR::String* pResult); - bool getFullDesc(udev_device* device, HIDDeviceDesc* desc); - bool GetDescriptorFromPath(const char* dev_path, HIDDeviceDesc* desc); - - bool AddNotificationDevice(HIDDevice* device); - bool RemoveNotificationDevice(HIDDevice* device); - - DeviceManager* DevManager; - - udev* UdevInstance; // a handle to the udev library instance - udev_monitor* HIDMonitor; - int HIDMonHandle; // the udev_monitor file handle - - Array<HIDDevice*> NotificationDevices; -}; - -}} // namespace OVR::Linux - -#endif // OVR_Linux_HIDDevice_h diff --git a/LibOVR/Src/OVR_Linux_HMDDevice.cpp b/LibOVR/Src/OVR_Linux_HMDDevice.cpp deleted file mode 100644 index 633e665..0000000 --- a/LibOVR/Src/OVR_Linux_HMDDevice.cpp +++ /dev/null @@ -1,408 +0,0 @@ -/************************************************************************************ - -Filename : OVR_Linux_HMDDevice.h -Content : Linux HMDDevice implementation -Created : June 17, 2013 -Authors : Brant Lewis - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_Linux_HMDDevice.h" - -#include "OVR_Linux_DeviceManager.h" - -#include "OVR_Profile.h" - -#include <X11/Xlib.h> -#include <X11/extensions/Xinerama.h> - -namespace OVR { namespace Linux { - -//------------------------------------------------------------------------------------- - -HMDDeviceCreateDesc::HMDDeviceCreateDesc(DeviceFactory* factory, const String& displayDeviceName, long dispId) - : DeviceCreateDesc(factory, Device_HMD), - DisplayDeviceName(displayDeviceName), - DesktopX(0), DesktopY(0), Contents(0), EyeToScreenDistance(0), - HResolution(0), VResolution(0), HScreenSize(0), VScreenSize(0), - DisplayId(dispId) -{ - DeviceId = DisplayDeviceName; - for (int i=0; i<4; i++) - DistortionK[i] = 0; -} - -HMDDeviceCreateDesc::HMDDeviceCreateDesc(const HMDDeviceCreateDesc& other) - : DeviceCreateDesc(other.pFactory, Device_HMD), - DeviceId(other.DeviceId), DisplayDeviceName(other.DisplayDeviceName), - DesktopX(other.DesktopX), DesktopY(other.DesktopY), Contents(other.Contents), - HResolution(other.HResolution), VResolution(other.VResolution), - HScreenSize(other.HScreenSize), VScreenSize(other.VScreenSize), - DisplayId(other.DisplayId), EyeToScreenDistance(other.EyeToScreenDistance) -{ - for (int i=0; i<4; i++) - DistortionK[i] = other.DistortionK[i]; -} - -HMDDeviceCreateDesc::MatchResult HMDDeviceCreateDesc::MatchDevice(const DeviceCreateDesc& other, - DeviceCreateDesc** pcandidate) const -{ - if ((other.Type != Device_HMD) || (other.pFactory != pFactory)) - return Match_None; - - // There are several reasons we can come in here: - // a) Matching this HMD Monitor created desc to OTHER HMD Monitor desc - // - Require exact device DeviceId/DeviceName match - // b) Matching SensorDisplayInfo created desc to OTHER HMD Monitor desc - // - This DeviceId is empty; becomes candidate - // c) Matching this HMD Monitor created desc to SensorDisplayInfo desc - // - This other.DeviceId is empty; becomes candidate - - const HMDDeviceCreateDesc& s2 = (const HMDDeviceCreateDesc&) other; - - if ((DeviceId == s2.DeviceId) && - (DisplayId == s2.DisplayId)) - { - // Non-null DeviceId may match while size is different if screen size was overwritten - // by SensorDisplayInfo in prior iteration. - if (!DeviceId.IsEmpty() || - ((HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) ) - { - *pcandidate = 0; - return Match_Found; - } - } - - - // DisplayInfo takes precedence, although we try to match it first. - if ((HResolution == s2.HResolution) && - (VResolution == s2.VResolution) && - (HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) - { - if (DeviceId.IsEmpty() && !s2.DeviceId.IsEmpty()) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - - *pcandidate = 0; - return Match_Found; - } - - // SensorDisplayInfo may override resolution settings, so store as candidate. - if (s2.DeviceId.IsEmpty()) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - // OTHER HMD Monitor desc may initialize DeviceName/Id - else if (DeviceId.IsEmpty()) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - - return Match_None; -} - - -bool HMDDeviceCreateDesc::UpdateMatchedCandidate(const DeviceCreateDesc& other, - bool* newDeviceFlag) -{ - // This candidate was the the "best fit" to apply sensor DisplayInfo to. - OVR_ASSERT(other.Type == Device_HMD); - - const HMDDeviceCreateDesc& s2 = (const HMDDeviceCreateDesc&) other; - - // Force screen size on resolution from SensorDisplayInfo. - // We do this because USB detection is more reliable as compared to HDMI EDID, - // which may be corrupted by splitter reporting wrong monitor - if (s2.DeviceId.IsEmpty()) - { - HScreenSize = s2.HScreenSize; - VScreenSize = s2.VScreenSize; - Contents |= Contents_Screen; - - if (s2.Contents & HMDDeviceCreateDesc::Contents_Distortion) - { - memcpy(DistortionK, s2.DistortionK, sizeof(float)*4); - Contents |= Contents_Distortion; - } - DeviceId = s2.DeviceId; - DisplayId = s2.DisplayId; - DisplayDeviceName = s2.DisplayDeviceName; - if (newDeviceFlag) *newDeviceFlag = true; - } - else if (DeviceId.IsEmpty()) - { - DeviceId = s2.DeviceId; - DisplayId = s2.DisplayId; - DisplayDeviceName = s2.DisplayDeviceName; - - // ScreenSize and Resolution are NOT assigned here, since they may have - // come from a sensor DisplayInfo (which has precedence over HDMI). - - if (newDeviceFlag) *newDeviceFlag = true; - } - else - { - if (newDeviceFlag) *newDeviceFlag = false; - } - - return true; -} - -bool HMDDeviceCreateDesc::MatchDevice(const String& path) -{ - return DeviceId.CompareNoCase(path) == 0; -} - -//------------------------------------------------------------------------------------- -// ***** HMDDeviceFactory - -HMDDeviceFactory HMDDeviceFactory::Instance; - -void HMDDeviceFactory::EnumerateDevices(EnumerateVisitor& visitor) -{ - // For now we'll assume the Rift DK1 is attached in extended monitor mode. Ultimately we need to - // use XFree86 to enumerate X11 screens in case the Rift is attached as a separate screen. We also - // need to be able to read the EDID manufacturer product code to be able to differentiate between - // Rift models. - - bool foundHMD = false; - - Display* display = XOpenDisplay(NULL); - if (display && XineramaIsActive(display)) - { - int numberOfScreens; - XineramaScreenInfo* screens = XineramaQueryScreens(display, &numberOfScreens); - - for (int i = 0; i < numberOfScreens; i++) - { - XineramaScreenInfo screenInfo = screens[i]; - - if (screenInfo.width == 1280 && screenInfo.height == 800) - { - String deviceName = "OVR0001"; - - HMDDeviceCreateDesc hmdCreateDesc(this, deviceName, i); - hmdCreateDesc.SetScreenParameters(screenInfo.x_org, screenInfo.y_org, 1280, 800, 0.14976f, 0.0936f); - - OVR_DEBUG_LOG_TEXT(("DeviceManager - HMD Found %s - %d\n", - deviceName.ToCStr(), i)); - - // Notify caller about detected device. This will call EnumerateAddDevice - // if the this is the first time device was detected. - visitor.Visit(hmdCreateDesc); - foundHMD = true; - break; - } - } - - XFree(screens); - } - - - // Real HMD device is not found; however, we still may have a 'fake' HMD - // device created via SensorDeviceImpl::EnumerateHMDFromSensorDisplayInfo. - // Need to find it and set 'Enumerated' to true to avoid Removal notification. - if (!foundHMD) - { - Ptr<DeviceCreateDesc> hmdDevDesc = getManager()->FindDevice("", Device_HMD); - if (hmdDevDesc) - hmdDevDesc->Enumerated = true; - } -} - -DeviceBase* HMDDeviceCreateDesc::NewDeviceInstance() -{ - return new HMDDevice(this); -} - -bool HMDDeviceCreateDesc::Is7Inch() const -{ - return (strstr(DeviceId.ToCStr(), "OVR0001") != 0) || (Contents & Contents_7Inch); -} - -Profile* HMDDeviceCreateDesc::GetProfileAddRef() const -{ - // Create device may override profile name, so get it from there is possible. - ProfileManager* profileManager = GetManagerImpl()->GetProfileManager(); - ProfileType profileType = GetProfileType(); - const char * profileName = pDevice ? - ((HMDDevice*)pDevice)->GetProfileName() : - profileManager->GetDefaultProfileName(profileType); - - return profileName ? - profileManager->LoadProfile(profileType, profileName) : - profileManager->GetDeviceDefaultProfile(profileType); -} - - -bool HMDDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const -{ - if ((info->InfoClassType != Device_HMD) && - (info->InfoClassType != Device_None)) - return false; - - bool is7Inch = Is7Inch(); - - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, - is7Inch ? "Oculus Rift DK1" : - ((HResolution >= 1920) ? "Oculus Rift DK HD" : "Oculus Rift DK1-Prototype") ); - OVR_strcpy(info->Manufacturer, DeviceInfo::MaxNameLength, "Oculus VR"); - info->Type = Device_HMD; - info->Version = 0; - - // Display detection. - if (info->InfoClassType == Device_HMD) - { - HMDInfo* hmdInfo = static_cast<HMDInfo*>(info); - - hmdInfo->DesktopX = DesktopX; - hmdInfo->DesktopY = DesktopY; - hmdInfo->HResolution = HResolution; - hmdInfo->VResolution = VResolution; - hmdInfo->HScreenSize = HScreenSize; - hmdInfo->VScreenSize = VScreenSize; - hmdInfo->VScreenCenter = VScreenSize * 0.5f; - hmdInfo->InterpupillaryDistance = 0.064f; // Default IPD; should be configurable. - hmdInfo->LensSeparationDistance = 0.0635f; - - // Obtain IPD from profile. - Ptr<Profile> profile = *GetProfileAddRef(); - - if (profile) - { - hmdInfo->InterpupillaryDistance = profile->GetIPD(); - // TBD: Switch on EyeCup type. - } - - if (Contents & Contents_Distortion) - { - memcpy(hmdInfo->DistortionK, DistortionK, sizeof(float)*4); - hmdInfo->EyeToScreenDistance = EyeToScreenDistance; - } - else - { - if (is7Inch) - { - // 7" screen. - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.22f; - hmdInfo->DistortionK[2] = 0.24f; - hmdInfo->EyeToScreenDistance = 0.041f; - } - else - { - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.18f; - hmdInfo->DistortionK[2] = 0.115f; - - if (HResolution == 1920) - hmdInfo->EyeToScreenDistance = 0.040f; - else - hmdInfo->EyeToScreenDistance = 0.0387f; - } - } - - hmdInfo->ChromaAbCorrection[0] = 0.996f; - hmdInfo->ChromaAbCorrection[1] = -0.004f; - hmdInfo->ChromaAbCorrection[2] = 1.014f; - hmdInfo->ChromaAbCorrection[3] = 0.0f; - - OVR_strcpy(hmdInfo->DisplayDeviceName, sizeof(hmdInfo->DisplayDeviceName), - DisplayDeviceName.ToCStr()); - hmdInfo->DisplayId = DisplayId; - } - - return true; -} - -//------------------------------------------------------------------------------------- -// ***** HMDDevice - -HMDDevice::HMDDevice(HMDDeviceCreateDesc* createDesc) - : OVR::DeviceImpl<OVR::HMDDevice>(createDesc, 0) -{ -} -HMDDevice::~HMDDevice() -{ -} - -bool HMDDevice::Initialize(DeviceBase* parent) -{ - pParent = parent; - - // Initialize user profile to default for device. - ProfileManager* profileManager = GetManager()->GetProfileManager(); - ProfileName = profileManager->GetDefaultProfileName(getDesc()->GetProfileType()); - - return true; -} -void HMDDevice::Shutdown() -{ - ProfileName.Clear(); - pCachedProfile.Clear(); - pParent.Clear(); -} - -Profile* HMDDevice::GetProfile() const -{ - if (!pCachedProfile) - pCachedProfile = *getDesc()->GetProfileAddRef(); - return pCachedProfile.GetPtr(); -} - -const char* HMDDevice::GetProfileName() const -{ - return ProfileName.ToCStr(); -} - -bool HMDDevice::SetProfileName(const char* name) -{ - pCachedProfile.Clear(); - if (!name) - { - ProfileName.Clear(); - return 0; - } - if (GetManager()->GetProfileManager()->HasProfile(getDesc()->GetProfileType(), name)) - { - ProfileName = name; - return true; - } - return false; -} - -OVR::SensorDevice* HMDDevice::GetSensor() -{ - // Just return first sensor found since we have no way to match it yet. - OVR::SensorDevice* sensor = GetManager()->EnumerateDevices<SensorDevice>().CreateDevice(); - if (sensor) - sensor->SetCoordinateFrame(SensorDevice::Coord_HMD); - return sensor; -} - -}} // namespace OVR::Linux - - diff --git a/LibOVR/Src/OVR_Linux_HMDDevice.h b/LibOVR/Src/OVR_Linux_HMDDevice.h deleted file mode 100644 index b5c4bf1..0000000 --- a/LibOVR/Src/OVR_Linux_HMDDevice.h +++ /dev/null @@ -1,170 +0,0 @@ -/************************************************************************************ - -Filename : OVR_Linux_HMDDevice.h -Content : Linux HMDDevice implementation -Created : June 17, 2013 -Authors : Brant Lewis - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_Linux_HMDDevice_h -#define OVR_Linux_HMDDevice_h - -#include "OVR_Linux_DeviceManager.h" -#include "OVR_Profile.h" - -namespace OVR { namespace Linux { - -class HMDDevice; - -//------------------------------------------------------------------------------------- - -// HMDDeviceFactory enumerates attached Oculus HMD devices. -// -// This is currently done by matching monitor device strings. - -class HMDDeviceFactory : public DeviceFactory -{ -public: - static HMDDeviceFactory Instance; - - // Enumerates devices, creating and destroying relevant objects in manager. - virtual void EnumerateDevices(EnumerateVisitor& visitor); - -protected: - DeviceManager* getManager() const { return (DeviceManager*) pManager; } -}; - - -class HMDDeviceCreateDesc : public DeviceCreateDesc -{ - friend class HMDDevice; - -protected: - enum - { - Contents_Screen = 1, - Contents_Distortion = 2, - Contents_7Inch = 4, - }; - String DeviceId; - String DisplayDeviceName; - int DesktopX, DesktopY; - unsigned Contents; - unsigned HResolution, VResolution; - float HScreenSize, VScreenSize; - long DisplayId; - float DistortionK[4]; - float EyeToScreenDistance; - -public: - HMDDeviceCreateDesc(DeviceFactory* factory, const String& displayDeviceName, long dispId); - HMDDeviceCreateDesc(const HMDDeviceCreateDesc& other); - - virtual DeviceCreateDesc* Clone() const - { - return new HMDDeviceCreateDesc(*this); - } - - virtual DeviceBase* NewDeviceInstance(); - - virtual MatchResult MatchDevice(const DeviceCreateDesc& other, - DeviceCreateDesc**) const; - - // Matches device by path. - virtual bool MatchDevice(const String& path); - - virtual bool UpdateMatchedCandidate(const DeviceCreateDesc&, bool* newDeviceFlag = NULL); - - virtual bool GetDeviceInfo(DeviceInfo* info) const; - - // Requests the currently used default profile. This profile affects the - // settings reported by HMDInfo. - Profile* GetProfileAddRef() const; - - ProfileType GetProfileType() const - { - return (HResolution >= 1920) ? Profile_RiftDKHD : Profile_RiftDK1; - } - - - void SetScreenParameters(int x, int y, unsigned hres, unsigned vres, float hsize, float vsize) - { - DesktopX = x; - DesktopY = y; - HResolution = hres; - VResolution = vres; - HScreenSize = hsize; - VScreenSize = vsize; - Contents |= Contents_Screen; - } - void SetDistortion(float eye2screen, const float* dks) - { - EyeToScreenDistance = eye2screen; - - for (int i = 0; i < 4; i++) - DistortionK[i] = dks[i]; - Contents |= Contents_Distortion; - } - - void Set7Inch() { Contents |= Contents_7Inch; } - - bool Is7Inch() const; -}; - - -//------------------------------------------------------------------------------------- - -// HMDDevice represents an Oculus HMD device unit. An instance of this class -// is typically created from the DeviceManager. -// After HMD device is created, we its sensor data can be obtained by -// first creating a Sensor object and then wrappig it in SensorFusion. - -class HMDDevice : public DeviceImpl<OVR::HMDDevice> -{ -public: - HMDDevice(HMDDeviceCreateDesc* createDesc); - ~HMDDevice(); - - virtual bool Initialize(DeviceBase* parent); - virtual void Shutdown(); - - // Requests the currently used default profile. This profile affects the - // settings reported by HMDInfo. - virtual Profile* GetProfile() const; - virtual const char* GetProfileName() const; - virtual bool SetProfileName(const char* name); - - // Query associated sensor. - virtual OVR::SensorDevice* GetSensor(); - -protected: - HMDDeviceCreateDesc* getDesc() const { return (HMDDeviceCreateDesc*)pCreateDesc.GetPtr(); } - - // User name for the profile used with this device. - String ProfileName; - mutable Ptr<Profile> pCachedProfile; -}; - - -}} // namespace OVR::Linux - -#endif // OVR_Linux_HMDDevice_h - diff --git a/LibOVR/Src/OVR_Linux_SensorDevice.cpp b/LibOVR/Src/OVR_Linux_SensorDevice.cpp deleted file mode 100644 index 376e4d4..0000000 --- a/LibOVR/Src/OVR_Linux_SensorDevice.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/************************************************************************************ - -Filename : OVR_Linux_SensorDevice.cpp -Content : Linux SensorDevice implementation -Created : June 13, 2013 -Authors : Brant Lewis - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_Linux_HMDDevice.h" -#include "OVR_SensorImpl.h" -#include "OVR_DeviceImpl.h" - -namespace OVR { namespace OSX { - -} // namespace OSX - -//------------------------------------------------------------------------------------- -void SensorDeviceImpl::EnumerateHMDFromSensorDisplayInfo( const SensorDisplayInfoImpl& displayInfo, - DeviceFactory::EnumerateVisitor& visitor) -{ - - Linux::HMDDeviceCreateDesc hmdCreateDesc(&Linux::HMDDeviceFactory::Instance, "", 0); - - hmdCreateDesc.SetScreenParameters( 0, 0, - displayInfo.HResolution, displayInfo.VResolution, - displayInfo.HScreenSize, displayInfo.VScreenSize); - - if ((displayInfo.DistortionType & SensorDisplayInfoImpl::Mask_BaseFmt) & SensorDisplayInfoImpl::Base_Distortion) - hmdCreateDesc.SetDistortion(displayInfo.EyeToScreenDistance[0], displayInfo.DistortionK); - if (displayInfo.HScreenSize > 0.14f) - hmdCreateDesc.Set7Inch(); - - visitor.Visit(hmdCreateDesc); -} - -} // namespace OVR - - diff --git a/LibOVR/Src/OVR_OSX_DeviceManager.cpp b/LibOVR/Src/OVR_OSX_DeviceManager.cpp deleted file mode 100644 index 1e1f6d6..0000000 --- a/LibOVR/Src/OVR_OSX_DeviceManager.cpp +++ /dev/null @@ -1,360 +0,0 @@ -/************************************************************************************ - -Filename : OVR_OSX_DeviceManager.cpp -Content : OSX specific DeviceManager implementation. -Created : March 14, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_OSX_DeviceManager.h" - -// Sensor & HMD Factories -#include "OVR_LatencyTestImpl.h" -#include "OVR_SensorImpl.h" -#include "OVR_OSX_HMDDevice.h" -#include "OVR_OSX_HIDDevice.h" - -#include "Kernel/OVR_Timer.h" -#include "Kernel/OVR_Std.h" -#include "Kernel/OVR_Log.h" - -#include <IOKit/hid/IOHIDManager.h> -#include <IOKit/hid/IOHIDKeys.h> - - -namespace OVR { namespace OSX { - -//------------------------------------------------------------------------------------- -// **** OSX::DeviceManager - -DeviceManager::DeviceManager() -{ -} - -DeviceManager::~DeviceManager() -{ - OVR_DEBUG_LOG(("OSX::DeviceManager::~DeviceManager was called")); -} - -bool DeviceManager::Initialize(DeviceBase*) -{ - if (!DeviceManagerImpl::Initialize(0)) - return false; - - // Start the background thread. - pThread = *new DeviceManagerThread(); - if (!pThread || !pThread->Start()) - return false; - - // Wait for the thread to be fully up and running. - pThread->StartupEvent.Wait(); - - // Do this now that we know the thread's run loop. - HidDeviceManager = *HIDDeviceManager::CreateInternal(this); - - CGDisplayRegisterReconfigurationCallback(displayReconfigurationCallBack, this); - - pCreateDesc->pDevice = this; - LogText("OVR::DeviceManager - initialized.\n"); - - return true; -} - -void DeviceManager::Shutdown() -{ - LogText("OVR::DeviceManager - shutting down.\n"); - - CGDisplayRemoveReconfigurationCallback(displayReconfigurationCallBack, this); - - // Set Manager shutdown marker variable; this prevents - // any existing DeviceHandle objects from accessing device. - pCreateDesc->pLock->pManager = 0; - - // Push for thread shutdown *WITH NO WAIT*. - // This will have the following effect: - // - Exit command will get enqueued, which will be executed later on the thread itself. - // - Beyond this point, this DeviceManager object may be deleted by our caller. - // - Other commands, such as CreateDevice, may execute before ExitCommand, but they will - // fail gracefully due to pLock->pManager == 0. Future commands can't be enqued - // after pManager is null. - // - Once ExitCommand executes, ThreadCommand::Run loop will exit and release the last - // reference to the thread object. - pThread->Shutdown(); - pThread.Clear(); - - DeviceManagerImpl::Shutdown(); -} - -ThreadCommandQueue* DeviceManager::GetThreadQueue() -{ - return pThread; -} - -ThreadId DeviceManager::GetThreadId() const -{ - return pThread->GetThreadId(); -} - -bool DeviceManager::GetDeviceInfo(DeviceInfo* info) const -{ - if ((info->InfoClassType != Device_Manager) && - (info->InfoClassType != Device_None)) - return false; - - info->Type = Device_Manager; - info->Version = 0; - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, "DeviceManager"); - OVR_strcpy(info->Manufacturer,DeviceInfo::MaxNameLength, "Oculus VR, Inc."); - return true; -} - -DeviceEnumerator<> DeviceManager::EnumerateDevicesEx(const DeviceEnumerationArgs& args) -{ - // TBD: Can this be avoided in the future, once proper device notification is in place? - pThread->PushCall((DeviceManagerImpl*)this, - &DeviceManager::EnumerateAllFactoryDevices, true); - - return DeviceManagerImpl::EnumerateDevicesEx(args); -} - -void DeviceManager::displayReconfigurationCallBack (CGDirectDisplayID display, - CGDisplayChangeSummaryFlags flags, - void *userInfo) -{ - DeviceManager* manager = reinterpret_cast<DeviceManager*>(userInfo); - OVR_UNUSED(manager); - - if (flags & kCGDisplayAddFlag) - { - LogText("Display Added, id = %d\n", int(display)); - manager->EnumerateDevices<HMDDevice>(); - } - else if (flags & kCGDisplayRemoveFlag) - { - LogText("Display Removed, id = %d\n", int(display)); - manager->EnumerateDevices<HMDDevice>(); - } -} - -//------------------------------------------------------------------------------------- -// ***** DeviceManager Thread - -DeviceManagerThread::DeviceManagerThread() - : Thread(ThreadStackSize) -{ -} - -DeviceManagerThread::~DeviceManagerThread() -{ -} - -int DeviceManagerThread::Run() -{ - - SetThreadName("OVR::DeviceManagerThread"); - LogText("OVR::DeviceManagerThread - running (ThreadId=0x%p).\n", GetThreadId()); - - // Store out the run loop ref. - RunLoop = CFRunLoopGetCurrent(); - - // Create a 'source' to enable us to signal the run loop to process the command queue. - CFRunLoopSourceContext sourceContext; - memset(&sourceContext, 0, sizeof(sourceContext)); - sourceContext.version = 0; - sourceContext.info = this; - sourceContext.perform = &staticCommandQueueSourceCallback; - - CommandQueueSource = CFRunLoopSourceCreate(kCFAllocatorDefault, 0 , &sourceContext); - - CFRunLoopAddSource(RunLoop, CommandQueueSource, kCFRunLoopDefaultMode); - - - // Signal to the parent thread that initialization has finished. - StartupEvent.SetEvent(); - - - ThreadCommand::PopBuffer command; - - while(!IsExiting()) - { - // PopCommand will reset event on empty queue. - if (PopCommand(&command)) - { - command.Execute(); - } - else - { - SInt32 exitReason = 0; - do { - - UInt32 waitMs = INT_MAX; - - // If devices have time-dependent logic registered, get the longest wait - // allowed based on current ticks. - if (!TicksNotifiers.IsEmpty()) - { - UInt64 ticksMks = Timer::GetTicks(); - UInt32 waitAllowed; - - for (UPInt j = 0; j < TicksNotifiers.GetSize(); j++) - { - waitAllowed = (UInt32)(TicksNotifiers[j]->OnTicks(ticksMks) / Timer::MksPerMs); - if (waitAllowed < waitMs) - waitMs = waitAllowed; - } - } - - // Enter blocking run loop. We may continue until we timeout in which - // case it's time to service the ticks. Or if commands arrive in the command - // queue then the source callback will call 'CFRunLoopStop' causing this - // to return. - CFTimeInterval blockInterval = 0.001 * (double) waitMs; - exitReason = CFRunLoopRunInMode(kCFRunLoopDefaultMode, blockInterval, false); - - if (exitReason == kCFRunLoopRunFinished) - { - // Maybe this will occur during shutdown? - break; - } - else if (exitReason == kCFRunLoopRunStopped ) - { - // Commands need processing or we're being shutdown. - break; - } - else if (exitReason == kCFRunLoopRunTimedOut) - { - // Timed out so that we can service our ticks callbacks. - continue; - } - else if (exitReason == kCFRunLoopRunHandledSource) - { - // Should never occur since the last param when we call - // 'CFRunLoopRunInMode' is false. - OVR_ASSERT(false); - break; - } - else - { - OVR_ASSERT_LOG(false, ("CFRunLoopRunInMode returned unexpected code")); - break; - } - } - while(true); - } - } - - - CFRunLoopRemoveSource(RunLoop, CommandQueueSource, kCFRunLoopDefaultMode); - CFRelease(CommandQueueSource); - - LogText("OVR::DeviceManagerThread - exiting (ThreadId=0x%p).\n", GetThreadId()); - - return 0; -} - -void DeviceManagerThread::staticCommandQueueSourceCallback(void* pContext) -{ - DeviceManagerThread* pThread = (DeviceManagerThread*) pContext; - pThread->commandQueueSourceCallback(); -} - -void DeviceManagerThread::commandQueueSourceCallback() -{ - CFRunLoopStop(RunLoop); -} - -bool DeviceManagerThread::AddTicksNotifier(Notifier* notify) -{ - TicksNotifiers.PushBack(notify); - return true; -} - -bool DeviceManagerThread::RemoveTicksNotifier(Notifier* notify) -{ - for (UPInt i = 0; i < TicksNotifiers.GetSize(); i++) - { - if (TicksNotifiers[i] == notify) - { - TicksNotifiers.RemoveAt(i); - return true; - } - } - return false; -} - -void DeviceManagerThread::Shutdown() -{ - // Push for thread shutdown *WITH NO WAIT*. - // This will have the following effect: - // - Exit command will get enqueued, which will be executed later on the thread itself. - // - Beyond this point, this DeviceManager object may be deleted by our caller. - // - Other commands, such as CreateDevice, may execute before ExitCommand, but they will - // fail gracefully due to pLock->pManager == 0. Future commands can't be enqued - // after pManager is null. - // - Once ExitCommand executes, ThreadCommand::Run loop will exit and release the last - // reference to the thread object. - PushExitCommand(false); - - // make sure CFRunLoopRunInMode is woken up - CFRunLoopSourceSignal(CommandQueueSource); - CFRunLoopWakeUp(RunLoop); -} - -} // namespace OSX - - -//------------------------------------------------------------------------------------- -// ***** Creation - -// Creates a new DeviceManager and initializes OVR. -DeviceManager* DeviceManager::Create() -{ - - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "DeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<OSX::DeviceManager> manager = *new OSX::DeviceManager; - - if (manager) - { - if (manager->Initialize(0)) - { - manager->AddFactory(&LatencyTestDeviceFactory::Instance); - manager->AddFactory(&SensorDeviceFactory::Instance); - manager->AddFactory(&OSX::HMDDeviceFactory::Instance); - - manager->AddRef(); - } - else - { - manager.Clear(); - } - } - - return manager.GetPtr(); -} - -} // namespace OVR diff --git a/LibOVR/Src/OVR_OSX_DeviceManager.h b/LibOVR/Src/OVR_OSX_DeviceManager.h deleted file mode 100644 index ff6a577..0000000 --- a/LibOVR/Src/OVR_OSX_DeviceManager.h +++ /dev/null @@ -1,130 +0,0 @@ -/************************************************************************************ - -Filename : OVR_OSX_DeviceManager.h -Content : OSX specific DeviceManager header. -Created : March 14, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_OSX_DeviceManager_h -#define OVR_OSX_DeviceManager_h - -#include "OVR_DeviceImpl.h" - -#include "Kernel/OVR_Timer.h" - -#include <IOKit/hid/IOHIDManager.h> -#include <CoreGraphics/CGDirectDisplay.h> -#include <CoreGraphics/CGDisplayConfiguration.h> - - -namespace OVR { namespace OSX { - -class DeviceManagerThread; - -//------------------------------------------------------------------------------------- -// ***** OSX DeviceManager - -class DeviceManager : public DeviceManagerImpl -{ -public: - DeviceManager(); - ~DeviceManager(); - - // Initialize/Shutdown manager thread. - virtual bool Initialize(DeviceBase* parent); - virtual void Shutdown(); - - virtual ThreadCommandQueue* GetThreadQueue(); - virtual ThreadId GetThreadId() const; - - virtual DeviceEnumerator<> EnumerateDevicesEx(const DeviceEnumerationArgs& args); - - virtual bool GetDeviceInfo(DeviceInfo* info) const; - -protected: - static void displayReconfigurationCallBack (CGDirectDisplayID display, - CGDisplayChangeSummaryFlags flags, - void *userInfo); - -public: // data - Ptr<DeviceManagerThread> pThread; -}; - -//------------------------------------------------------------------------------------- -// ***** Device Manager Background Thread - -class DeviceManagerThread : public Thread, public ThreadCommandQueue -{ - friend class DeviceManager; - enum { ThreadStackSize = 32 * 1024 }; -public: - DeviceManagerThread(); - ~DeviceManagerThread(); - - virtual int Run(); - - // ThreadCommandQueue notifications for CommandEvent handling. - virtual void OnPushNonEmpty_Locked() - { - CFRunLoopSourceSignal(CommandQueueSource); - CFRunLoopWakeUp(RunLoop); - } - - virtual void OnPopEmpty_Locked() {} - - - // Notifier used for different updates (EVENT or regular timing or messages). - class Notifier - { - public: - - // Called when timing ticks are updated. // Returns the largest number of microseconds - // this function can wait till next call. - virtual UInt64 OnTicks(UInt64 ticksMks) - { OVR_UNUSED1(ticksMks); return Timer::MksPerSecond * 1000; } - }; - - // Add notifier that will be called at regular intervals. - bool AddTicksNotifier(Notifier* notify); - bool RemoveTicksNotifier(Notifier* notify); - - CFRunLoopRef GetRunLoop() - { return RunLoop; } - - void Shutdown(); -private: - CFRunLoopRef RunLoop; - - CFRunLoopSourceRef CommandQueueSource; - - static void staticCommandQueueSourceCallback(void* pContext); - void commandQueueSourceCallback(); - - Event StartupEvent; - - // Ticks notifiers. Used for time-dependent events such as keep-alive. - Array<Notifier*> TicksNotifiers; -}; - -}} // namespace OSX::OVR - -#endif // OVR_OSX_DeviceManager_h diff --git a/LibOVR/Src/OVR_OSX_HIDDevice.cpp b/LibOVR/Src/OVR_OSX_HIDDevice.cpp deleted file mode 100644 index 38b1b3e..0000000 --- a/LibOVR/Src/OVR_OSX_HIDDevice.cpp +++ /dev/null @@ -1,923 +0,0 @@ -/************************************************************************************ -Filename : OVR_OSX_HIDDevice.cpp -Content : OSX HID device implementation. -Created : February 26, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_OSX_HIDDevice.h" - -#include <IOKit/usb/IOUSBLib.h> - -namespace OVR { namespace OSX { - -static const UInt32 MAX_QUEUED_INPUT_REPORTS = 5; - -//------------------------------------------------------------------------------------- -// **** OSX::DeviceManager - -HIDDeviceManager::HIDDeviceManager(DeviceManager* manager) - : DevManager(manager) -{ - HIDManager = NULL; -} - -HIDDeviceManager::~HIDDeviceManager() -{ -} - -CFRunLoopRef HIDDeviceManager::getRunLoop() -{ - if (DevManager != NULL) - { - return DevManager->pThread->GetRunLoop(); - } - - return CFRunLoopGetCurrent(); -} - -bool HIDDeviceManager::initializeManager() -{ - if (HIDManager != NULL) - { - return true; - } - - HIDManager = IOHIDManagerCreate(kCFAllocatorDefault, kIOHIDOptionsTypeNone); - - if (!HIDManager) - { - return false; - } - - // Create a Matching Dictionary - CFMutableDictionaryRef matchDict = - CFDictionaryCreateMutable(kCFAllocatorDefault, - 2, - &kCFTypeDictionaryKeyCallBacks, - &kCFTypeDictionaryValueCallBacks); - - // Specify a device manufacturer in the Matching Dictionary - UInt32 vendorId = Oculus_VendorId; - CFNumberRef vendorIdRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &vendorId); - CFDictionarySetValue(matchDict, - CFSTR(kIOHIDVendorIDKey), - vendorIdRef); - // Register the Matching Dictionary to the HID Manager - IOHIDManagerSetDeviceMatching(HIDManager, matchDict); - CFRelease(vendorIdRef); - CFRelease(matchDict); - - // Register a callback for USB device detection with the HID Manager - IOHIDManagerRegisterDeviceMatchingCallback(HIDManager, &staticDeviceMatchingCallback, this); - - IOHIDManagerScheduleWithRunLoop(HIDManager, getRunLoop(), kCFRunLoopDefaultMode); - - return true; -} - -bool HIDDeviceManager::Initialize() -{ - return initializeManager(); -} - -void HIDDeviceManager::Shutdown() -{ - OVR_ASSERT_LOG(HIDManager, ("Should have called 'Initialize' before 'Shutdown'.")); - CFRelease(HIDManager); - - LogText("OVR::OSX::HIDDeviceManager - shutting down.\n"); -} - -bool HIDDeviceManager::getIntProperty(IOHIDDeviceRef device, CFStringRef propertyName, SInt32* pResult) -{ - - CFTypeRef ref = IOHIDDeviceGetProperty(device, propertyName); - - if (!ref) - { - return false; - } - - if (CFGetTypeID(ref) != CFNumberGetTypeID()) - { - return false; - } - - CFNumberGetValue((CFNumberRef) ref, kCFNumberSInt32Type, pResult); - - return true; -} - -bool HIDDeviceManager::initVendorProductVersion(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc) -{ - - if (!getVendorId(device, &(pDevDesc->VendorId))) - { - return false; - } - - if (!getProductId(device, &(pDevDesc->ProductId))) - { - return false; - } - - SInt32 result; - if (!getIntProperty(device, CFSTR(kIOHIDVersionNumberKey), &result)) - { - return false; - } - pDevDesc->VersionNumber = result; - - return true; -} - -bool HIDDeviceManager::initUsage(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc) -{ - - SInt32 result; - - if (!getIntProperty(device, CFSTR(kIOHIDPrimaryUsagePageKey), &result)) - { - return false; - } - - pDevDesc->UsagePage = result; - - - if (!getIntProperty(device, CFSTR(kIOHIDPrimaryUsageKey), &result)) - { - return false; - } - - pDevDesc->Usage = result; - - return true; -} - -bool HIDDeviceManager::initSerialNumber(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc) -{ - return getSerialNumberString(device, &(pDevDesc->SerialNumber)); -} - -bool HIDDeviceManager::initStrings(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc) -{ - - // Regardless of whether they fail we'll try and get the remaining. - getStringProperty(device, CFSTR(kIOHIDManufacturerKey), &(pDevDesc->Manufacturer)); - getStringProperty(device, CFSTR(kIOHIDProductKey), &(pDevDesc->Product)); - - return true; -} - -bool HIDDeviceManager::getStringProperty(IOHIDDeviceRef device, - CFStringRef propertyName, - String* pResult) -{ - - CFStringRef str = (CFStringRef) IOHIDDeviceGetProperty(device, propertyName); - - if (!str) - { - return false; - } - - CFIndex length = CFStringGetLength(str); - CFRange range = CFRangeMake(0, length); - - // Test the conversion first to get required buffer size. - CFIndex bufferLength; - CFIndex numberOfChars = CFStringGetBytes(str, - range, - kCFStringEncodingUTF8, - (char) '?', - FALSE, - NULL, - 0, - &bufferLength); - - if (numberOfChars == 0) - { - return false; - } - - // Now allocate buffer. - char* buffer = new char[bufferLength+1]; - - numberOfChars = CFStringGetBytes(str, - range, - kCFStringEncodingUTF8, - (char) '?', - FALSE, - (UInt8*) buffer, - bufferLength, - NULL); - OVR_ASSERT_LOG(numberOfChars != 0, ("CFStringGetBytes failed.")); - - buffer[bufferLength] = '\0'; - *pResult = String(buffer); - - return true; -} - -bool HIDDeviceManager::getVendorId(IOHIDDeviceRef device, UInt16* pResult) -{ - SInt32 result; - - if (!getIntProperty(device, CFSTR(kIOHIDVendorIDKey), &result)) - { - return false; - } - - *pResult = result; - - return true; -} - -bool HIDDeviceManager::getProductId(IOHIDDeviceRef device, UInt16* pResult) -{ - SInt32 result; - - if (!getIntProperty(device, CFSTR(kIOHIDProductIDKey), &result)) - { - return false; - } - - *pResult = result; - - return true; -} - -bool HIDDeviceManager::getLocationId(IOHIDDeviceRef device, SInt32* pResult) -{ - SInt32 result; - - if (!getIntProperty(device, CFSTR(kIOHIDLocationIDKey), &result)) - { - return false; - } - - *pResult = result; - - return true; -} - -bool HIDDeviceManager::getSerialNumberString(IOHIDDeviceRef device, String* pResult) -{ - - if (!getStringProperty(device, CFSTR(kIOHIDSerialNumberKey), pResult)) - { - return false; - } - - return true; -} - -bool HIDDeviceManager::getPath(IOHIDDeviceRef device, String* pPath) -{ - - String transport; - if (!getStringProperty(device, CFSTR(kIOHIDTransportKey), &transport)) - { - return false; - } - - UInt16 vendorId; - if (!getVendorId(device, &vendorId)) - { - return false; - } - - UInt16 productId; - if (!getProductId(device, &productId)) - { - return false; - } - - String serialNumber; - if (!getSerialNumberString(device, &serialNumber)) - { - return false; - } - - - StringBuffer buffer; - buffer.AppendFormat("%s:vid=%04hx:pid=%04hx:ser=%s", - transport.ToCStr(), - vendorId, - productId, - serialNumber.ToCStr()); - - *pPath = String(buffer); - - return true; -} - -bool HIDDeviceManager::Enumerate(HIDEnumerateVisitor* enumVisitor) -{ - if (!initializeManager()) - { - return false; - } - - - CFSetRef deviceSet = IOHIDManagerCopyDevices(HIDManager); - if (!deviceSet) - return false; - - CFIndex deviceCount = CFSetGetCount(deviceSet); - - // Allocate a block of memory and read the set into it. - IOHIDDeviceRef* devices = (IOHIDDeviceRef*) OVR_ALLOC(sizeof(IOHIDDeviceRef) * deviceCount); - CFSetGetValues(deviceSet, (const void **) devices); - - - // Iterate over devices. - for (CFIndex deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++) - { - IOHIDDeviceRef hidDev = devices[deviceIndex]; - - if (!hidDev) - { - continue; - } - - HIDDeviceDesc devDesc; - - if (getPath(hidDev, &(devDesc.Path)) && - initVendorProductVersion(hidDev, &devDesc) && - enumVisitor->MatchVendorProduct(devDesc.VendorId, devDesc.ProductId) && - initUsage(hidDev, &devDesc)) - { - initStrings(hidDev, &devDesc); - initSerialNumber(hidDev, &devDesc); - - // Look for the device to check if it is already opened. - Ptr<DeviceCreateDesc> existingDevice = DevManager->FindHIDDevice(devDesc, true); - // if device exists and it is opened then most likely the CreateHIDFile - // will fail; therefore, we just set Enumerated to 'true' and continue. - if (existingDevice && existingDevice->pDevice) - { - existingDevice->Enumerated = true; - continue; - } - - // open the device temporarily for startup communication - if (IOHIDDeviceOpen(hidDev, kIOHIDOptionsTypeSeizeDevice) == kIOReturnSuccess) - { - // Construct minimal device that the visitor callback can get feature reports from. - OSX::HIDDevice device(this, hidDev); - - enumVisitor->Visit(device, devDesc); - - IOHIDDeviceClose(hidDev, kIOHIDOptionsTypeSeizeDevice); - } - } - } - - OVR_FREE(devices); - CFRelease(deviceSet); - - return true; -} - -OVR::HIDDevice* HIDDeviceManager::Open(const String& path) -{ - - Ptr<OSX::HIDDevice> device = *new OSX::HIDDevice(this); - - if (!device->HIDInitialize(path)) - { - return NULL; - } - - device->AddRef(); - - return device; -} - -bool HIDDeviceManager::getFullDesc(IOHIDDeviceRef device, HIDDeviceDesc* desc) -{ - - if (!initVendorProductVersion(device, desc)) - { - return false; - } - - if (!initUsage(device, desc)) - { - return false; - } - - if (!initSerialNumber(device, desc)) - { - return false; - } - - initStrings(device, desc); - - return true; -} - -// New USB device specified in the matching dictionary has been added (callback function) -void HIDDeviceManager::staticDeviceMatchingCallback(void *inContext, - IOReturn inResult, - void *inSender, - IOHIDDeviceRef inIOHIDDeviceRef) -{ - HIDDeviceManager* hidMgr = static_cast<HIDDeviceManager*>(inContext); - HIDDeviceDesc hidDevDesc; - hidMgr->getPath(inIOHIDDeviceRef, &hidDevDesc.Path); - hidMgr->getFullDesc(inIOHIDDeviceRef, &hidDevDesc); - - hidMgr->DevManager->DetectHIDDevice(hidDevDesc); -} - -//------------------------------------------------------------------------------------- -// **** OSX::HIDDevice - -HIDDevice::HIDDevice(HIDDeviceManager* manager) - : HIDManager(manager), InMinimalMode(false) -{ - Device = NULL; - RepluggedNotificationPort = 0; -} - -// This is a minimal constructor used during enumeration for us to pass -// a HIDDevice to the visit function (so that it can query feature reports). -HIDDevice::HIDDevice(HIDDeviceManager* manager, IOHIDDeviceRef device) -: HIDManager(manager), Device(device), InMinimalMode(true) -{ - RepluggedNotificationPort = 0; -} - -HIDDevice::~HIDDevice() -{ - if (!InMinimalMode) - { - HIDShutdown(); - } -} - -bool HIDDevice::HIDInitialize(const String& path) -{ - - DevDesc.Path = path; - - if (!openDevice()) - { - LogText("OVR::OSX::HIDDevice - Failed to open HIDDevice: %s", path.ToCStr()); - return false; - } - - // Setup notification for when a device is unplugged and plugged back in. - if (!setupDevicePluggedInNotification()) - { - LogText("OVR::OSX::HIDDevice - Failed to setup notification for when device plugged back in."); - closeDevice(false); - return false; - } - - HIDManager->DevManager->pThread->AddTicksNotifier(this); - - - LogText("OVR::OSX::HIDDevice - Opened '%s'\n" - " Manufacturer:'%s' Product:'%s' Serial#:'%s'\n", - DevDesc.Path.ToCStr(), - DevDesc.Manufacturer.ToCStr(), DevDesc.Product.ToCStr(), - DevDesc.SerialNumber.ToCStr()); - - return true; -} - -bool HIDDevice::initInfo() -{ - // Device must have been successfully opened. - OVR_ASSERT(Device); - - - // Get report lengths. - SInt32 bufferLength; - bool getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxInputReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - InputReportBufferLength = (UInt16) bufferLength; - - getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxOutputReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - OutputReportBufferLength = (UInt16) bufferLength; - - getResult = HIDManager->getIntProperty(Device, CFSTR(kIOHIDMaxFeatureReportSizeKey), &bufferLength); - OVR_ASSERT(getResult); - FeatureReportBufferLength = (UInt16) bufferLength; - - - if (ReadBufferSize < InputReportBufferLength) - { - OVR_ASSERT_LOG(false, ("Input report buffer length is bigger than read buffer.")); - return false; - } - - // Get device desc. - if (!HIDManager->getFullDesc(Device, &DevDesc)) - { - OVR_ASSERT_LOG(false, ("Failed to get device desc while initializing device.")); - return false; - } - - return true; -} - -void HIDDevice::staticDeviceAddedCallback(void* pContext, io_iterator_t iterator) -{ - HIDDevice* pDevice = (HIDDevice*) pContext; - pDevice->deviceAddedCallback(iterator); -} - -void HIDDevice::deviceAddedCallback(io_iterator_t iterator) -{ - - if (Device == NULL) - { - if (openDevice()) - { - LogText("OVR::OSX::HIDDevice - Reopened device : %s", DevDesc.Path.ToCStr()); - - Ptr<DeviceCreateDesc> existingHIDDev = HIDManager->DevManager->FindHIDDevice(DevDesc, true); - if (existingHIDDev && existingHIDDev->pDevice) - { - HIDManager->DevManager->CallOnDeviceAdded(existingHIDDev); - } - } - } - - // Reset callback. - while (IOIteratorNext(iterator)) - ; -} - -bool HIDDevice::openDevice() -{ - - // Have to iterate through devices again to generate paths. - CFSetRef deviceSet = IOHIDManagerCopyDevices(HIDManager->HIDManager); - CFIndex deviceCount = CFSetGetCount(deviceSet); - - // Allocate a block of memory and read the set into it. - IOHIDDeviceRef* devices = (IOHIDDeviceRef*) OVR_ALLOC(sizeof(IOHIDDeviceRef) * deviceCount); - CFSetGetValues(deviceSet, (const void **) devices); - - - // Iterate over devices. - IOHIDDeviceRef device = NULL; - - for (CFIndex deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++) - { - IOHIDDeviceRef tmpDevice = devices[deviceIndex]; - - if (!tmpDevice) - { - continue; - } - - String path; - if (!HIDManager->getPath(tmpDevice, &path)) - { - continue; - } - - if (path == DevDesc.Path) - { - device = tmpDevice; - break; - } - } - - - OVR_FREE(devices); - - if (!device) - { - CFRelease(deviceSet); - return false; - } - - // Attempt to open device. - if (IOHIDDeviceOpen(device, kIOHIDOptionsTypeSeizeDevice) - != kIOReturnSuccess) - { - CFRelease(deviceSet); - return false; - } - - // Retain the device before we release the set. - CFRetain(device); - CFRelease(deviceSet); - - - Device = device; - - - if (!initInfo()) - { - IOHIDDeviceClose(Device, kIOHIDOptionsTypeSeizeDevice); - CFRelease(Device); - Device = NULL; - return false; - } - - - // Setup the Run Loop and callbacks. - IOHIDDeviceScheduleWithRunLoop(Device, - HIDManager->getRunLoop(), - kCFRunLoopDefaultMode); - - IOHIDDeviceRegisterInputReportCallback(Device, - ReadBuffer, - ReadBufferSize, - staticHIDReportCallback, - this); - - IOHIDDeviceRegisterRemovalCallback(Device, - staticDeviceRemovedCallback, - this); - - return true; -} - -void HIDDevice::HIDShutdown() -{ - - HIDManager->DevManager->pThread->RemoveTicksNotifier(this); - - if (Device != NULL) // Device may already have been closed if unplugged. - { - closeDevice(false); - } - - IOObjectRelease(RepluggedNotification); - if (RepluggedNotificationPort) - IONotificationPortDestroy(RepluggedNotificationPort); - - LogText("OVR::OSX::HIDDevice - HIDShutdown '%s'\n", DevDesc.Path.ToCStr()); -} - -bool HIDDevice::setupDevicePluggedInNotification() -{ - - // Setup notification when devices are plugged in. - RepluggedNotificationPort = IONotificationPortCreate(kIOMasterPortDefault); - - CFRunLoopSourceRef notificationRunLoopSource = - IONotificationPortGetRunLoopSource(RepluggedNotificationPort); - - CFRunLoopAddSource(HIDManager->getRunLoop(), - notificationRunLoopSource, - kCFRunLoopDefaultMode); - - CFMutableDictionaryRef matchingDict = IOServiceMatching(kIOUSBDeviceClassName); - - // Have to specify vendorId and productId. Doesn't seem to accept additional - // things like serial number. - SInt32 vendorId = DevDesc.VendorId; - CFNumberRef numberRef = CFNumberCreate(kCFAllocatorDefault, - kCFNumberSInt32Type, - &vendorId); - CFDictionarySetValue(matchingDict, CFSTR(kUSBVendorID), numberRef); - CFRelease(numberRef); - - SInt32 deviceProductId = DevDesc.ProductId; - numberRef = CFNumberCreate(kCFAllocatorDefault, - kCFNumberSInt32Type, - &deviceProductId); - CFDictionarySetValue(matchingDict, CFSTR(kUSBProductID), numberRef); - CFRelease(numberRef); - - kern_return_t result = - IOServiceAddMatchingNotification(RepluggedNotificationPort, - kIOMatchedNotification, - matchingDict, - staticDeviceAddedCallback, - this, - &RepluggedNotification); - - if (result != KERN_SUCCESS) - { - CFRelease(RepluggedNotificationPort); - RepluggedNotificationPort = 0; - return false; - } - - // Iterate through to arm. - while (IOIteratorNext(RepluggedNotification)) - { - } - - return true; -} - -void HIDDevice::closeDevice(bool wasUnplugged) -{ - OVR_ASSERT(Device != NULL); - - if (!wasUnplugged) - { - // Clear the registered callbacks. - IOHIDDeviceRegisterInputReportCallback(Device, - ReadBuffer, - InputReportBufferLength, - NULL, - this); - - IOHIDDeviceRegisterRemovalCallback(Device, NULL, this); - - IOHIDDeviceUnscheduleFromRunLoop(Device, - HIDManager->getRunLoop(), - kCFRunLoopDefaultMode); - IOHIDDeviceClose(Device, kIOHIDOptionsTypeNone); - } - - CFRelease(Device); - Device = NULL; - - LogText("OVR::OSX::HIDDevice - HID Device Closed '%s'\n", DevDesc.Path.ToCStr()); -} - -void HIDDevice::staticHIDReportCallback(void* pContext, - IOReturn result, - void* pSender, - IOHIDReportType reportType, - uint32_t reportId, - uint8_t* pReport, - CFIndex reportLength) -{ - HIDDevice* pDevice = (HIDDevice*) pContext; - return pDevice->hidReportCallback(pReport, (UInt32)reportLength); -} - -void HIDDevice::hidReportCallback(UByte* pData, UInt32 length) -{ - - // We got data. - if (Handler) - { - Handler->OnInputReport(pData, length); - } -} - -void HIDDevice::staticDeviceRemovedCallback(void* pContext, IOReturn result, void* pSender) -{ - HIDDevice* pDevice = (HIDDevice*) pContext; - pDevice->deviceRemovedCallback(); -} - -void HIDDevice::deviceRemovedCallback() -{ - Ptr<HIDDevice> _this(this); // prevent from release - - Ptr<DeviceCreateDesc> existingHIDDev = HIDManager->DevManager->FindHIDDevice(DevDesc, true); - if (existingHIDDev && existingHIDDev->pDevice) - { - HIDManager->DevManager->CallOnDeviceRemoved(existingHIDDev); - } - closeDevice(true); -} - -CFStringRef HIDDevice::generateRunLoopModeString(IOHIDDeviceRef device) -{ - const UInt32 safeBuffSize = 256; - char nameBuff[safeBuffSize]; - OVR_sprintf(nameBuff, safeBuffSize, "%016lX", device); - - return CFStringCreateWithCString(NULL, nameBuff, kCFStringEncodingASCII); -} - -bool HIDDevice::SetFeatureReport(UByte* data, UInt32 length) -{ - - if (!Device) - return false; - - UByte reportID = data[0]; - - if (reportID == 0) - { - // Not using reports so remove from data packet. - data++; - length--; - } - - IOReturn result = IOHIDDeviceSetReport( Device, - kIOHIDReportTypeFeature, - reportID, - data, - length); - - return (result == kIOReturnSuccess); -} - -bool HIDDevice::GetFeatureReport(UByte* data, UInt32 length) -{ - if (!Device) - return false; - - CFIndex bufferLength = length; - - // Report id is in first byte of the buffer. - IOReturn result = IOHIDDeviceGetReport(Device, kIOHIDReportTypeFeature, data[0], data, &bufferLength); - - return (result == kIOReturnSuccess); -} - -UInt64 HIDDevice::OnTicks(UInt64 ticksMks) -{ - - if (Handler) - { - return Handler->OnTicks(ticksMks); - } - - return DeviceManagerThread::Notifier::OnTicks(ticksMks); -} - -HIDDeviceManager* HIDDeviceManager::CreateInternal(OSX::DeviceManager* devManager) -{ - - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "HIDDeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<OSX::HIDDeviceManager> manager = *new OSX::HIDDeviceManager(devManager); - - if (manager) - { - if (manager->Initialize()) - { - manager->AddRef(); - } - else - { - manager.Clear(); - } - } - - return manager.GetPtr(); -} - -} // namespace OSX - -//------------------------------------------------------------------------------------- -// ***** Creation - -// Creates a new HIDDeviceManager and initializes OVR. -HIDDeviceManager* HIDDeviceManager::Create() -{ - OVR_ASSERT_LOG(false, ("Standalone mode not implemented yet.")); - - if (!System::IsInitialized()) - { - // Use custom message, since Log is not yet installed. - OVR_DEBUG_STATEMENT(Log::GetDefaultLog()-> - LogMessage(Log_Debug, "HIDDeviceManager::Create failed - OVR::System not initialized"); ); - return 0; - } - - Ptr<OSX::HIDDeviceManager> manager = *new OSX::HIDDeviceManager(NULL); - - if (manager) - { - if (manager->Initialize()) - { - manager->AddRef(); - } - else - { - manager.Clear(); - } - } - - return manager.GetPtr(); -} - -} // namespace OVR diff --git a/LibOVR/Src/OVR_OSX_HIDDevice.h b/LibOVR/Src/OVR_OSX_HIDDevice.h deleted file mode 100644 index e2fac31..0000000 --- a/LibOVR/Src/OVR_OSX_HIDDevice.h +++ /dev/null @@ -1,160 +0,0 @@ -/************************************************************************************ -Filename : OVR_OSX_HIDDevice.h -Content : OSX HID device implementation. -Created : February 26, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_OSX_HIDDevice_h -#define OVR_OSX_HIDDevice_h - -#include "OVR_HIDDevice.h" - -#include "OVR_OSX_DeviceManager.h" - -#include <IOKit/IOKitLib.h> - -namespace OVR { namespace OSX { - -class HIDDeviceManager; - -//------------------------------------------------------------------------------------- -// ***** OSX HIDDevice - -class HIDDevice : public OVR::HIDDevice, public DeviceManagerThread::Notifier -{ -private: - friend class HIDDeviceManager; - -public: - HIDDevice(HIDDeviceManager* manager); - - // This is a minimal constructor used during enumeration for us to pass - // a HIDDevice to the visit function (so that it can query feature reports). - HIDDevice(HIDDeviceManager* manager, IOHIDDeviceRef device); - - virtual ~HIDDevice(); - - bool HIDInitialize(const String& path); - void HIDShutdown(); - - virtual bool SetFeatureReport(UByte* data, UInt32 length); - virtual bool GetFeatureReport(UByte* data, UInt32 length); - - bool Write(UByte* data, UInt32 length); - - bool Read(UByte* pData, UInt32 length, UInt32 timeoutMilliS); - bool ReadBlocking(UByte* pData, UInt32 length); - - - // DeviceManagerThread::Notifier - UInt64 OnTicks(UInt64 ticksMks); - -private: - bool initInfo(); - bool openDevice(); - void closeDevice(bool wasUnplugged); - bool setupDevicePluggedInNotification(); - CFStringRef generateRunLoopModeString(IOHIDDeviceRef device); - - static void staticHIDReportCallback(void* pContext, - IOReturn result, - void* pSender, - IOHIDReportType reportType, - uint32_t reportId, - uint8_t* pReport, - CFIndex reportLength); - void hidReportCallback(UByte* pData, UInt32 length); - - static void staticDeviceRemovedCallback(void* pContext, - IOReturn result, - void* pSender); - void deviceRemovedCallback(); - - static void staticDeviceAddedCallback(void* pContext, - io_iterator_t iterator); - void deviceAddedCallback(io_iterator_t iterator); - - bool InMinimalMode; - HIDDeviceManager* HIDManager; - IOHIDDeviceRef Device; - HIDDeviceDesc DevDesc; - - enum { ReadBufferSize = 96 }; - UByte ReadBuffer[ReadBufferSize]; - - UInt16 InputReportBufferLength; - UInt16 OutputReportBufferLength; - UInt16 FeatureReportBufferLength; - - IONotificationPortRef RepluggedNotificationPort; - io_iterator_t RepluggedNotification; -}; - - -//------------------------------------------------------------------------------------- -// ***** OSX HIDDeviceManager - -class HIDDeviceManager : public OVR::HIDDeviceManager -{ - friend class HIDDevice; - -public: - HIDDeviceManager(OSX::DeviceManager* Manager); - virtual ~HIDDeviceManager(); - - virtual bool Initialize(); - virtual void Shutdown(); - - virtual bool Enumerate(HIDEnumerateVisitor* enumVisitor); - virtual OVR::HIDDevice* Open(const String& path); - - static HIDDeviceManager* CreateInternal(DeviceManager* manager); - -private: - CFRunLoopRef getRunLoop(); - bool initializeManager(); - bool initVendorProductVersion(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc); - bool initUsage(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc); - bool initStrings(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc); - bool initSerialNumber(IOHIDDeviceRef device, HIDDeviceDesc* pDevDesc); - bool getVendorId(IOHIDDeviceRef device, UInt16* pResult); - bool getProductId(IOHIDDeviceRef device, UInt16* pResult); - bool getLocationId(IOHIDDeviceRef device, SInt32* pResult); - bool getSerialNumberString(IOHIDDeviceRef device, String* pResult); - bool getPath(IOHIDDeviceRef device, String* pPath); - bool getIntProperty(IOHIDDeviceRef device, CFStringRef key, int32_t* pResult); - bool getStringProperty(IOHIDDeviceRef device, CFStringRef propertyName, String* pResult); - bool getFullDesc(IOHIDDeviceRef device, HIDDeviceDesc* desc); - - static void staticDeviceMatchingCallback(void *inContext, - IOReturn inResult, - void *inSender, - IOHIDDeviceRef inIOHIDDeviceRef); - - DeviceManager* DevManager; - - IOHIDManagerRef HIDManager; -}; - -}} // namespace OVR::OSX - -#endif // OVR_OSX_HIDDevice_h diff --git a/LibOVR/Src/OVR_OSX_HMDDevice.cpp b/LibOVR/Src/OVR_OSX_HMDDevice.cpp deleted file mode 100644 index 214a2e7..0000000 --- a/LibOVR/Src/OVR_OSX_HMDDevice.cpp +++ /dev/null @@ -1,417 +0,0 @@ -/************************************************************************************ - -Filename : OVR_OSX_HMDDevice.cpp -Content : OSX Interface to HMD - detects HMD display -Created : September 21, 2012 -Authors : Michael Antonov - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_OSX_HMDDevice.h" -#include <CoreGraphics/CGDirectDisplay.h> -#include <CoreGraphics/CGDisplayConfiguration.h> -#include <CoreFoundation/CoreFoundation.h> -#include <CoreFoundation/CFString.h> -#include <IOKit/graphics/IOGraphicsLib.h> - -namespace OVR { namespace OSX { - -//------------------------------------------------------------------------------------- - -HMDDeviceCreateDesc::HMDDeviceCreateDesc(DeviceFactory* factory, - UInt32 vend, UInt32 prod, const String& displayDeviceName, long dispId) - : DeviceCreateDesc(factory, Device_HMD), - DisplayDeviceName(displayDeviceName), - DesktopX(0), DesktopY(0), Contents(0), EyeToScreenDistance(0), - HResolution(0), VResolution(0), HScreenSize(0), VScreenSize(0), - DisplayId(dispId) -{ - /* //?????????? - char idstring[9]; - idstring[0] = 'A'-1+((vend>>10) & 31); - idstring[1] = 'A'-1+((vend>>5) & 31); - idstring[2] = 'A'-1+((vend>>0) & 31); - snprintf(idstring+3, 5, "%04d", prod); - DeviceId = idstring;*/ - DeviceId = DisplayDeviceName; - - for (int i=0; i<4; i++) - DistortionK[i] = 0; -} - -HMDDeviceCreateDesc::HMDDeviceCreateDesc(const HMDDeviceCreateDesc& other) - : DeviceCreateDesc(other.pFactory, Device_HMD), - DeviceId(other.DeviceId), DisplayDeviceName(other.DisplayDeviceName), - DesktopX(other.DesktopX), DesktopY(other.DesktopY), Contents(other.Contents), - HResolution(other.HResolution), VResolution(other.VResolution), - HScreenSize(other.HScreenSize), VScreenSize(other.VScreenSize), - DisplayId(other.DisplayId), EyeToScreenDistance(other.EyeToScreenDistance) -{ - for (int i=0; i<4; i++) - DistortionK[i] = other.DistortionK[i]; -} - -HMDDeviceCreateDesc::MatchResult HMDDeviceCreateDesc::MatchDevice(const DeviceCreateDesc& other, - DeviceCreateDesc** pcandidate) const -{ - if ((other.Type != Device_HMD) || (other.pFactory != pFactory)) - return Match_None; - - // There are several reasons we can come in here: - // a) Matching this HMD Monitor created desc to OTHER HMD Monitor desc - // - Require exact device DeviceId/DeviceName match - // b) Matching SensorDisplayInfo created desc to OTHER HMD Monitor desc - // - This DeviceId is empty; becomes candidate - // c) Matching this HMD Monitor created desc to SensorDisplayInfo desc - // - This other.DeviceId is empty; becomes candidate - - const HMDDeviceCreateDesc& s2 = (const HMDDeviceCreateDesc&) other; - - if ((DeviceId == s2.DeviceId) && - (DisplayId == s2.DisplayId)) - { - // Non-null DeviceId may match while size is different if screen size was overwritten - // by SensorDisplayInfo in prior iteration. - if (!DeviceId.IsEmpty() || - ((HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) ) - { - *pcandidate = 0; - return Match_Found; - } - } - - - // DisplayInfo takes precedence, although we try to match it first. - if ((HResolution == s2.HResolution) && - (VResolution == s2.VResolution) && - (HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) - { - if (DeviceId.IsEmpty() && !s2.DeviceId.IsEmpty()) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - - *pcandidate = 0; - return Match_Found; - } - - // SensorDisplayInfo may override resolution settings, so store as candidiate. - if (s2.DeviceId.IsEmpty() && s2.DisplayId == 0) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - // OTHER HMD Monitor desc may initialize DeviceName/Id - else if (DeviceId.IsEmpty() && DisplayId == 0) - { - *pcandidate = const_cast<DeviceCreateDesc*>((const DeviceCreateDesc*)this); - return Match_Candidate; - } - - return Match_None; -} - - -bool HMDDeviceCreateDesc::UpdateMatchedCandidate(const DeviceCreateDesc& other, bool* newDeviceFlag) -{ - // This candidate was the the "best fit" to apply sensor DisplayInfo to. - OVR_ASSERT(other.Type == Device_HMD); - - const HMDDeviceCreateDesc& s2 = (const HMDDeviceCreateDesc&) other; - - // Force screen size on resolution from SensorDisplayInfo. - // We do this because USB detection is more reliable as compared to HDMI EDID, - // which may be corrupted by splitter reporting wrong monitor - if (s2.DeviceId.IsEmpty() && s2.DisplayId == 0) - { - // disconnected HMD: replace old descriptor by the 'fake' one. - HScreenSize = s2.HScreenSize; - VScreenSize = s2.VScreenSize; - Contents |= Contents_Screen; - - if (s2.Contents & HMDDeviceCreateDesc::Contents_Distortion) - { - memcpy(DistortionK, s2.DistortionK, sizeof(float)*4); - Contents |= Contents_Distortion; - } - DeviceId = s2.DeviceId; - DisplayId = s2.DisplayId; - DisplayDeviceName = s2.DisplayDeviceName; - if (newDeviceFlag) *newDeviceFlag = true; - } - else if (DeviceId.IsEmpty()) - { - // This branch is executed when 'fake' HMD descriptor is being replaced by - // the real one. - DeviceId = s2.DeviceId; - DisplayId = s2.DisplayId; - DisplayDeviceName = s2.DisplayDeviceName; - if (newDeviceFlag) *newDeviceFlag = true; - } - else - { - if (newDeviceFlag) *newDeviceFlag = false; - } - - return true; -} - - -//------------------------------------------------------------------------------------- - - -//------------------------------------------------------------------------------------- -// ***** HMDDeviceFactory - -HMDDeviceFactory HMDDeviceFactory::Instance; - -void HMDDeviceFactory::EnumerateDevices(EnumerateVisitor& visitor) -{ - CGDirectDisplayID Displays[32]; - uint32_t NDisplays = 0; - CGGetOnlineDisplayList(32, Displays, &NDisplays); - - for (int i = 0; i < NDisplays; i++) - { - io_service_t port = CGDisplayIOServicePort(Displays[i]); - CFDictionaryRef DispInfo = IODisplayCreateInfoDictionary(port, kIODisplayMatchingInfo); - - uint32_t vendor = CGDisplayVendorNumber(Displays[i]); - uint32_t product = CGDisplayModelNumber(Displays[i]); - unsigned mwidth = (unsigned)CGDisplayPixelsWide(Displays[i]); - unsigned mheight = (unsigned)CGDisplayPixelsHigh(Displays[i]); - CGRect desktop = CGDisplayBounds(Displays[i]); - - if (vendor == 16082 && ( (product == 1)||(product == 2) ) ) // 7" or HD - { - char idstring[9]; - idstring[0] = 'A'-1+((vendor>>10) & 31); - idstring[1] = 'A'-1+((vendor>>5) & 31); - idstring[2] = 'A'-1+((vendor>>0) & 31); - snprintf(idstring+3, 5, "%04d", product); - - HMDDeviceCreateDesc hmdCreateDesc(this, vendor, product, idstring, Displays[i]); - - if (product == 2) - { - hmdCreateDesc.SetScreenParameters(desktop.origin.x, desktop.origin.y, - mwidth, mheight, 0.12096f, 0.06804f); - } - else - { - if (hmdCreateDesc.Is7Inch()) - { - // Physical dimension of SLA screen. - hmdCreateDesc.SetScreenParameters(desktop.origin.x, desktop.origin.y, - mwidth, mheight, 0.14976f, 0.0936f); - } - else - { - hmdCreateDesc.SetScreenParameters(desktop.origin.x, desktop.origin.y, - mwidth, mheight, 0.12096f, 0.0756f); - } - } - - OVR_DEBUG_LOG_TEXT(("DeviceManager - HMD Found %x:%x\n", vendor, product)); - - // Notify caller about detected device. This will call EnumerateAddDevice - // if the this is the first time device was detected. - visitor.Visit(hmdCreateDesc); - } - CFRelease(DispInfo); - } -} - -DeviceBase* HMDDeviceCreateDesc::NewDeviceInstance() -{ - return new HMDDevice(this); -} - -bool HMDDeviceCreateDesc::Is7Inch() const -{ - return (strstr(DeviceId.ToCStr(), "OVR0001") != 0) || (Contents & Contents_7Inch); -} - -Profile* HMDDeviceCreateDesc::GetProfileAddRef() const -{ - // Create device may override profile name, so get it from there is possible. - ProfileManager* profileManager = GetManagerImpl()->GetProfileManager(); - ProfileType profileType = GetProfileType(); - const char * profileName = pDevice ? - ((HMDDevice*)pDevice)->GetProfileName() : - profileManager->GetDefaultProfileName(profileType); - - return profileName ? - profileManager->LoadProfile(profileType, profileName) : - profileManager->GetDeviceDefaultProfile(profileType); -} - -bool HMDDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const -{ - if ((info->InfoClassType != Device_HMD) && - (info->InfoClassType != Device_None)) - return false; - - bool is7Inch = Is7Inch(); - - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, - is7Inch ? "Oculus Rift DK1" : - ((HResolution >= 1920) ? "Oculus Rift DK HD" : "Oculus Rift DK1-Prototype") ); - OVR_strcpy(info->Manufacturer, DeviceInfo::MaxNameLength, "Oculus VR"); - info->Type = Device_HMD; - info->Version = 0; - - // Display detection. - if (info->InfoClassType == Device_HMD) - { - HMDInfo* hmdInfo = static_cast<HMDInfo*>(info); - - hmdInfo->DesktopX = DesktopX; - hmdInfo->DesktopY = DesktopY; - hmdInfo->HResolution = HResolution; - hmdInfo->VResolution = VResolution; - hmdInfo->HScreenSize = HScreenSize; - hmdInfo->VScreenSize = VScreenSize; - hmdInfo->VScreenCenter = VScreenSize * 0.5f; - hmdInfo->InterpupillaryDistance = 0.064f; // Default IPD; should be configurable. - hmdInfo->LensSeparationDistance = 0.0635f; - - // Obtain IPD from profile. - Ptr<Profile> profile = *GetProfileAddRef(); - - if (profile) - { - hmdInfo->InterpupillaryDistance = profile->GetIPD(); - // TBD: Switch on EyeCup type. - } - - if (Contents & Contents_Distortion) - { - memcpy(hmdInfo->DistortionK, DistortionK, sizeof(float)*4); - hmdInfo->EyeToScreenDistance = EyeToScreenDistance; - } - else - { - if (is7Inch) - { - // 7" screen. - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.22f; - hmdInfo->DistortionK[2] = 0.24f; - hmdInfo->EyeToScreenDistance = 0.041f; - } - else - { - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.18f; - hmdInfo->DistortionK[2] = 0.115f; - - if (HResolution == 1920) - hmdInfo->EyeToScreenDistance = 0.040f; - else - hmdInfo->EyeToScreenDistance = 0.0387f; - } - } - - hmdInfo->ChromaAbCorrection[0] = 0.996f; - hmdInfo->ChromaAbCorrection[1] = -0.004f; - hmdInfo->ChromaAbCorrection[2] = 1.014f; - hmdInfo->ChromaAbCorrection[3] = 0.0f; - - OVR_strcpy(hmdInfo->DisplayDeviceName, sizeof(hmdInfo->DisplayDeviceName), - DisplayDeviceName.ToCStr()); - hmdInfo->DisplayId = DisplayId; - } - - return true; -} - -//------------------------------------------------------------------------------------- -// ***** HMDDevice - -HMDDevice::HMDDevice(HMDDeviceCreateDesc* createDesc) - : OVR::DeviceImpl<OVR::HMDDevice>(createDesc, 0) -{ -} -HMDDevice::~HMDDevice() -{ -} - -bool HMDDevice::Initialize(DeviceBase* parent) -{ - pParent = parent; - - // Initialize user profile to default for device. - ProfileManager* profileManager = GetManager()->GetProfileManager(); - ProfileName = profileManager->GetDefaultProfileName(getDesc()->GetProfileType()); - - return true; -} -void HMDDevice::Shutdown() -{ - ProfileName.Clear(); - pCachedProfile.Clear(); - pParent.Clear(); -} - -Profile* HMDDevice::GetProfile() const -{ - if (!pCachedProfile) - pCachedProfile = *getDesc()->GetProfileAddRef(); - return pCachedProfile.GetPtr(); -} - -const char* HMDDevice::GetProfileName() const -{ - return ProfileName.ToCStr(); -} - -bool HMDDevice::SetProfileName(const char* name) -{ - pCachedProfile.Clear(); - if (!name) - { - ProfileName.Clear(); - return 0; - } - if (GetManager()->GetProfileManager()->HasProfile(getDesc()->GetProfileType(), name)) - { - ProfileName = name; - return true; - } - return false; -} - -OVR::SensorDevice* HMDDevice::GetSensor() -{ - // Just return first sensor found since we have no way to match it yet. - OVR::SensorDevice* sensor = GetManager()->EnumerateDevices<SensorDevice>().CreateDevice(); - if (sensor) - sensor->SetCoordinateFrame(SensorDevice::Coord_HMD); - return sensor; -} - - -}} // namespace OVR::OSX - - diff --git a/LibOVR/Src/OVR_OSX_HMDDevice.h b/LibOVR/Src/OVR_OSX_HMDDevice.h deleted file mode 100644 index d92aa1f..0000000 --- a/LibOVR/Src/OVR_OSX_HMDDevice.h +++ /dev/null @@ -1,174 +0,0 @@ -/************************************************************************************ - -Filename : OVR_OSX_HMDDevice.h -Content : OSX HMDDevice implementation -Created : September 21, 2012 -Authors : Michael Antonov - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#ifndef OVR_OSX_HMDDevice_h -#define OVR_OSX_HMDDevice_h - -#include "OVR_DeviceImpl.h" -#include <Kernel/OVR_String.h> -#include "OVR_Profile.h" - -namespace OVR { namespace OSX { - -class HMDDevice; - - -//------------------------------------------------------------------------------------- - -// HMDDeviceFactory enumerates attached Oculus HMD devices. -// -// This is currently done by matching monitor device strings. - -class HMDDeviceFactory : public DeviceFactory -{ -public: - static HMDDeviceFactory Instance; - - // Enumerates devices, creating and destroying relevant objects in manager. - virtual void EnumerateDevices(EnumerateVisitor& visitor); - -protected: - DeviceManager* getManager() const { return (DeviceManager*) pManager; } -}; - - -class HMDDeviceCreateDesc : public DeviceCreateDesc -{ - friend class HMDDevice; - -protected: - enum - { - Contents_Screen = 1, - Contents_Distortion = 2, - Contents_7Inch = 4, - }; - -public: - - HMDDeviceCreateDesc(DeviceFactory* factory, - UInt32 vendor, UInt32 product, const String& displayDeviceName, long dispId); - HMDDeviceCreateDesc(const HMDDeviceCreateDesc& other); - - virtual DeviceCreateDesc* Clone() const - { - return new HMDDeviceCreateDesc(*this); - } - - virtual DeviceBase* NewDeviceInstance(); - - virtual MatchResult MatchDevice(const DeviceCreateDesc& other, - DeviceCreateDesc**) const; - - virtual bool UpdateMatchedCandidate(const DeviceCreateDesc&, bool* newDeviceFlag = NULL); - - virtual bool GetDeviceInfo(DeviceInfo* info) const; - - // Requests the currently used default profile. This profile affects the - // settings reported by HMDInfo. - Profile* GetProfileAddRef() const; - - ProfileType GetProfileType() const - { - return (HResolution >= 1920) ? Profile_RiftDKHD : Profile_RiftDK1; - } - - void SetScreenParameters(int x, int y, unsigned hres, unsigned vres, float hsize, float vsize) - { - DesktopX = x; - DesktopY = y; - HResolution = hres; - VResolution = vres; - HScreenSize = hsize; - VScreenSize = vsize; - Contents |= Contents_Screen; - } - - void SetDistortion(float eye2screen, const float* dks) - { - EyeToScreenDistance = eye2screen; - - for (int i = 0; i < 4; i++) - DistortionK[i] = dks[i]; - Contents |= Contents_Distortion; - } - - void Set7Inch() { Contents |= Contents_7Inch; } - - bool Is7Inch() const; - -protected: - String DeviceId; - String DisplayDeviceName; - int DesktopX, DesktopY; - unsigned Contents; - unsigned HResolution, VResolution; - float HScreenSize, VScreenSize; - long DisplayId; - float DistortionK[4]; - float EyeToScreenDistance; -}; - - -//------------------------------------------------------------------------------------- - -// HMDDevice represents an Oculus HMD device unit. An instance of this class -// is typically created from the DeviceManager. -// After HMD device is created, we its sensor data can be obtained by -// first creating a Sensor object and then wrappig it in SensorFusion. - -class HMDDevice : public DeviceImpl<OVR::HMDDevice> -{ -public: - HMDDevice(HMDDeviceCreateDesc* createDesc); - ~HMDDevice(); - - virtual bool Initialize(DeviceBase* parent); - virtual void Shutdown(); - - - // Requests the currently used default profile. This profile affects the - // settings reported by HMDInfo. - virtual Profile* GetProfile() const; - virtual const char* GetProfileName() const; - virtual bool SetProfileName(const char* name); - - // Query associated sensor. - virtual OVR::SensorDevice* GetSensor(); - -protected: - HMDDeviceCreateDesc* getDesc() const { return (HMDDeviceCreateDesc*)pCreateDesc.GetPtr(); } - - // User name for the profile used with this device. - String ProfileName; - mutable Ptr<Profile> pCachedProfile; -}; - - -}} // namespace OVR::OSX - -#endif // OVR_OSX_HMDDevice_h - diff --git a/LibOVR/Src/OVR_OSX_SensorDevice.cpp b/LibOVR/Src/OVR_OSX_SensorDevice.cpp deleted file mode 100644 index bee648d..0000000 --- a/LibOVR/Src/OVR_OSX_SensorDevice.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/************************************************************************************ - -Filename : OVR_OSX_SensorDevice.cpp -Content : OSX SensorDevice implementation -Created : March 14, 2013 -Authors : Lee Cooper - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, -which is provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -You may obtain a copy of the License at - -http://www.oculusvr.com/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, the Oculus VR SDK -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*************************************************************************************/ - -#include "OVR_OSX_HMDDevice.h" -#include "OVR_SensorImpl.h" -#include "OVR_DeviceImpl.h" - -namespace OVR { namespace OSX { - -} // namespace OSX - -//------------------------------------------------------------------------------------- -void SensorDeviceImpl::EnumerateHMDFromSensorDisplayInfo( const SensorDisplayInfoImpl& displayInfo, - DeviceFactory::EnumerateVisitor& visitor) -{ - - OSX::HMDDeviceCreateDesc hmdCreateDesc(&OSX::HMDDeviceFactory::Instance, 1, 1, "", 0); - - hmdCreateDesc.SetScreenParameters( 0, 0, - displayInfo.HResolution, displayInfo.VResolution, - displayInfo.HScreenSize, displayInfo.VScreenSize); - - if ((displayInfo.DistortionType & SensorDisplayInfoImpl::Mask_BaseFmt) & SensorDisplayInfoImpl::Base_Distortion) - hmdCreateDesc.SetDistortion(displayInfo.EyeToScreenDistance[0], displayInfo.DistortionK); - if (displayInfo.HScreenSize > 0.14f) - hmdCreateDesc.Set7Inch(); - - visitor.Visit(hmdCreateDesc); -} - -} // namespace OVR - - diff --git a/LibOVR/Src/OVR_Profile.cpp b/LibOVR/Src/OVR_Profile.cpp index fdac5d7..1fa8eb4 100644 --- a/LibOVR/Src/OVR_Profile.cpp +++ b/LibOVR/Src/OVR_Profile.cpp @@ -12,16 +12,16 @@ Notes : can be accomplished in game via the Profile API or by the official Oculus Configuration Utility. -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -32,6 +32,7 @@ limitations under the License. ************************************************************************************/ #include "OVR_Profile.h" +#include "OVR_Device.h" #include "OVR_JSON.h" #include "Kernel/OVR_Types.h" #include "Kernel/OVR_SysFile.h" @@ -51,8 +52,10 @@ limitations under the License. #endif -#define PROFILE_VERSION 1.0 -#define MAX_PROFILE_MAJOR_VERSION 1 + +#define PROFILE_VERSION 2.0 +#define MAX_PROFILE_MAJOR_VERSION 2 +#define MAX_DEVICE_PROFILE_MAJOR_VERSION 1 namespace OVR { @@ -127,28 +130,143 @@ String GetBaseOVRPath(bool create_dir) return path; } -String GetProfilePath(bool create_dir) +String ProfileManager::GetProfilePath(bool create_dir) { String path = GetBaseOVRPath(create_dir); - path += "/Profiles.json"; + path += "/ProfileDB.json"; return path; } +bool ProfileManager::GetDeviceTags(const DeviceBase* device, String& product, String& serial) +{ + product = ""; + serial = ""; + + if (device && device->GetType() == Device_HMD) + { + HMDDevice* hmd = (HMDDevice*)device; + + Ptr<SensorDevice> sensor = *(hmd->GetSensor()); + if (sensor) + { + SensorInfo sinfo; + sensor->GetDeviceInfo(&sinfo); + serial = sinfo.SerialNumber; // get the serial number + + // Derive the product tag from the HMD product name + HMDInfo hmdinfo; + hmd->GetDeviceInfo(&hmdinfo); + + const char* product_name = NULL; + + // If the HMD is unrecognized then use the name stamped into the + // sensor firmware + if (hmdinfo.HmdType == HmdType_None || hmdinfo.Type == HmdType_Unknown) + product_name = sinfo.ProductName.ToCStr(); + else + product_name = hmdinfo.ProductName.ToCStr(); + + // First strip off "Oculus" + const char* oculus = strstr(product_name, "Oculus "); + if (oculus) + product_name = oculus + OVR_strlen("Oculus "); + // And remove spaces from the name + for (const char* s=product_name; *s != 0; s++) + { + if (*s != ' ') + product.AppendChar(*s); + } + } + } + + return (!product.IsEmpty() && !serial.IsEmpty()); +} + +static JSON* FindTaggedData(JSON* data, const char** tag_names, const char** qtags, int num_qtags) +{ + if (data == NULL || !(data->Name == "TaggedData") || data->Type != JSON_Array) + return NULL; + + JSON* tagged_item = data->GetFirstItem(); + while (tagged_item) + { + JSON* tags = tagged_item->GetItemByName("tags"); + if (tags->Type == JSON_Array && num_qtags == tags->GetArraySize()) + { // Check for a full tag match on each item + int num_matches = 0; + + for (int k=0; k<num_qtags; k++) + { + JSON* tag = tags->GetFirstItem(); + while (tag) + { + JSON* tagval = tag->GetFirstItem(); + if (tagval && tagval->Name == tag_names[k]) + { + if (tagval->Value == qtags[k]) + num_matches++; + break; + } + tag = tags->GetNextItem(tag); + } + } + + // if all tags were matched then copy the values into this Profile + if (num_matches == num_qtags) + { + JSON* vals = tagged_item->GetItemByName("vals"); + return vals; + } + } + + tagged_item = data->GetNextItem(tagged_item); + } + + return NULL; +} + +static void FilterTaggedData(JSON* data, const char* tag_name, const char* qtag, Array<JSON*>& items) +{ + if (data == NULL || !(data->Name == "TaggedData") || data->Type != JSON_Array) + return; + + JSON* tagged_item = data->GetFirstItem(); + while (tagged_item) + { + JSON* tags = tagged_item->GetItemByName("tags"); + if (tags->Type == JSON_Array) + { // Check for a tag match on the requested tag + + JSON* tag = tags->GetFirstItem(); + while (tag) + { + JSON* tagval = tag->GetFirstItem(); + if (tagval && tagval->Name == tag_name) + { + if (tagval->Value == qtag) + { // Add this item to the output list + items.PushBack(tagged_item); + } + break; + } + tag = tags->GetNextItem(tag); + } + } + + tagged_item = data->GetNextItem(tagged_item); + } +} + //----------------------------------------------------------------------------- // ***** ProfileManager ProfileManager::ProfileManager() { Changed = false; - CacheDevice = Profile_Unknown; } ProfileManager::~ProfileManager() { - // If the profiles have been altered then write out the profile file - if (Changed) - SaveCache(); - ClearCache(); } @@ -157,49 +275,50 @@ ProfileManager* ProfileManager::Create() return new ProfileManager(); } -Profile* ProfileManager::CreateProfileObject(const char* user, - ProfileType device, - const char** device_name) +// Clear the local profile cache +void ProfileManager::ClearCache() { Lock::Locker lockScope(&ProfileLock); - - Profile* profile = NULL; - switch (device) + //ProfileCache.Clear(); + if (ProfileCache) { - case Profile_GenericHMD: - *device_name = NULL; - profile = new HMDProfile(Profile_GenericHMD, user); - break; - case Profile_RiftDK1: - *device_name = "RiftDK1"; - profile = new RiftDK1Profile(user); - break; - case Profile_RiftDKHD: - *device_name = "RiftDKHD"; - profile = new RiftDKHDProfile(user); - break; - case Profile_Unknown: - break; + //ProfileCache->Release(); + ProfileCache = NULL; } - - return profile; + Changed = false; } - -// Clear the local profile cache -void ProfileManager::ClearCache() +// Returns a profile with all system default values +Profile* ProfileManager::GetDefaultProfile(const DeviceBase* device) { - Lock::Locker lockScope(&ProfileLock); + // In the absence of any data, set some reasonable profile defaults. + // However, this is not future proof and developers should still + // provide reasonable default values for queried fields. + Profile* profile = CreateProfile(); + profile->SetValue(OVR_KEY_USER, "default"); + profile->SetValue(OVR_KEY_NAME, "Default"); + profile->SetValue(OVR_KEY_GENDER, OVR_DEFAULT_GENDER); + profile->SetFloatValue(OVR_KEY_PLAYER_HEIGHT, OVR_DEFAULT_PLAYER_HEIGHT); + profile->SetFloatValue(OVR_KEY_EYE_HEIGHT, 1.675f); + profile->SetFloatValue(OVR_KEY_IPD, OVR_DEFAULT_IPD); + float dist[2] = {OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL, OVR_DEFAULT_NECK_TO_EYE_VERTICAL}; + profile->SetFloatValues(OVR_KEY_NECK_TO_EYE_DISTANCE, dist, 2); + //profile->SetFloatValue(OVR_KEY_NECK_TO_EYE_VERTICAL, 0.12f); + + // TODO: Provide device specific defaults + OVR_UNUSED(device); + + // DK1 default + //profile->SetValue("EyeCup", "A"); - ProfileCache.Clear(); - CacheDevice = Profile_Unknown; + return profile; } // Poplulates the local profile cache. This occurs on the first access of the profile // data. All profile operations are performed against the local cache until the // ProfileManager is released or goes out of scope at which time the cache is serialized // to disk. -void ProfileManager::LoadCache(ProfileType device) +void ProfileManager::LoadCache(bool create) { Lock::Locker lockScope(&ProfileLock); @@ -208,27 +327,75 @@ void ProfileManager::LoadCache(ProfileType device) String path = GetProfilePath(false); Ptr<JSON> root = *JSON::Load(path); - if (!root || root->GetItemCount() < 3) - return; + if (root == NULL) + { + path = GetBaseOVRPath(false) + "/Profiles.json"; // look for legacy profile + root = *JSON::Load(path); + + if (root == NULL) + { + if (create) + { // Generate a skeleton profile database + root = *JSON::CreateObject(); + root->AddNumberItem("Oculus Profile Version", 2.0); + root->AddItem("Users", JSON::CreateArray()); + root->AddItem("TaggedData", JSON::CreateArray()); + ProfileCache = root; + } + + return; + } - // First read the file type and version to make sure this is a valid file - JSON* item0 = root->GetFirstItem(); - JSON* item1 = root->GetNextItem(item0); - JSON* item2 = root->GetNextItem(item1); + // Verify the legacy version + JSON* version_item = root->GetFirstItem(); + if (version_item->Name == "Oculus Profile Version") + { + int major = atoi(version_item->Value.ToCStr()); + if (major != 1) + return; // don't use the file on unsupported major version number + } + else + { + return; // invalid file + } - if (item0->Name == "Oculus Profile Version") - { - int major = atoi(item0->Value.ToCStr()); - if (major > MAX_PROFILE_MAJOR_VERSION) - return; // don't parse the file on unsupported major version number + // Convert the legacy format to the new database format + LoadV1Profiles(root); } else { - return; + // Verify the file format and version + JSON* version_item = root->GetFirstItem(); + if (version_item->Name == "Oculus Profile Version") + { + int major = atoi(version_item->Value.ToCStr()); + if (major != 2) + return; // don't use the file on unsupported major version number + } + else + { + return; // invalid file + } + + ProfileCache = root; // store the database contents for traversal } +} + +void ProfileManager::LoadV1Profiles(JSON* v1) +{ + JSON* item0 = v1->GetFirstItem(); + JSON* item1 = v1->GetNextItem(item0); + JSON* item2 = v1->GetNextItem(item1); - DefaultProfile = item1->Value; + // Create the new profile database + Ptr<JSON> root = *JSON::CreateObject(); + root->AddNumberItem("Oculus Profile Version", 2.0); + root->AddItem("Users", JSON::CreateArray()); + root->AddItem("TaggedData", JSON::CreateArray()); + ProfileCache = root; + const char* default_dk1_user = item1->Value; + // Read the number of profiles int profileCount = (int)item2->dValue; JSON* profileItem = item2; @@ -253,585 +420,1098 @@ void ProfileManager::LoadCache(ProfileType device) { return; // invalid field } + + // Read the user profile fields + if (CreateUser(profileName, profileName)) + { + const char* tag_names[2] = {"User", "Product"}; + const char* tags[2]; + tags[0] = profileName; - const char* deviceName = 0; - bool deviceFound = false; - Ptr<Profile> profile = *CreateProfileObject(profileName, device, &deviceName); + Ptr<Profile> user_profile = *CreateProfile(); + user_profile->SetValue(OVR_KEY_NAME, profileName); - // Read the base profile fields. - if (profile) - { - while (item = profileItem->GetNextItem(item), item) + float neckeye[2] = { 0, 0 }; + + item = profileItem->GetNextItem(item); + while (item) { if (item->Type != JSON_Object) { - profile->ParseProperty(item->Name, item->Value); + if (item->Name == OVR_KEY_PLAYER_HEIGHT) + { // Add an explicit eye height + + } + if (item->Name == "NeckEyeHori") + neckeye[0] = (float)item->dValue; + else if (item->Name == "NeckEyeVert") + neckeye[1] = (float)item->dValue; + else + user_profile->SetValue(item); } else - { // Search for the matching device to get device specific fields - if (!deviceFound && deviceName && OVR_strcmp(item->Name, deviceName) == 0) - { - deviceFound = true; + { + // Add the user/device tag values + const char* device_name = item->Name.ToCStr(); + Ptr<Profile> device_profile = *CreateProfile(); - for (JSON* deviceItem = item->GetFirstItem(); deviceItem; - deviceItem = item->GetNextItem(deviceItem)) - { - profile->ParseProperty(deviceItem->Name, deviceItem->Value); - } + JSON* device_item = item->GetFirstItem(); + while (device_item) + { + device_profile->SetValue(device_item); + device_item = item->GetNextItem(device_item); } + + tags[1] = device_name; + SetTaggedProfile(tag_names, tags, 2, device_profile); } + + item = profileItem->GetNextItem(item); } - } - // Add the new profile - ProfileCache.PushBack(profile); + // Add an explicit eye-height field + float player_height = user_profile->GetFloatValue(OVR_KEY_PLAYER_HEIGHT, + OVR_DEFAULT_PLAYER_HEIGHT); + if (player_height > 0) + { + char gender[16]; + user_profile->GetValue(OVR_KEY_GENDER, gender, 16); + + const float EYE_TO_HEADTOP_RATIO = 0.44538f; + const float MALE_AVG_HEAD_HEIGHT = 0.232f; + const float FEMALE_AVG_HEAD_HEIGHT = 0.218f; + + // compute distance from top of skull to the eye + float head_height; + if (OVR_strcmp(gender, "Female") == 0) + head_height = FEMALE_AVG_HEAD_HEIGHT; + else + head_height = MALE_AVG_HEAD_HEIGHT; + + float skull = EYE_TO_HEADTOP_RATIO * head_height; + float eye_height = player_height - skull; + + user_profile->SetFloatValue(OVR_KEY_EYE_HEIGHT, eye_height); + } + + // Convert NeckEye values to an array + if (neckeye[0] > 0 && neckeye[1] > 0) + user_profile->SetFloatValues(OVR_KEY_NECK_TO_EYE_DISTANCE, neckeye, 2); + + // Add the user tag values + SetTaggedProfile(tag_names, tags, 1, user_profile); + } } } - CacheDevice = device; + // since V1 profiles were only for DK1, the assign the user to all DK1's + const char* tag_names[1] = { "Product" }; + const char* tags[1] = { "RiftDK1" }; + Ptr<Profile> product_profile = *CreateProfile(); + product_profile->SetValue("DefaultUser", default_dk1_user); + SetTaggedProfile(tag_names, tags, 1, product_profile); } +// Returns the number of stored profiles for this device type +int ProfileManager::GetUserCount() +{ + Lock::Locker lockScope(&ProfileLock); + + if (ProfileCache == NULL) + { // Load the cache + LoadCache(false); + if (ProfileCache == NULL) + return 0; + } + + JSON* users = ProfileCache->GetItemByName("Users"); + if (users == NULL) + return 0; + + return users->GetItemCount(); +} -// Serializes the profiles to disk. -void ProfileManager::SaveCache() +bool ProfileManager::CreateUser(const char* user, const char* name) { - String path = GetProfilePath(true); - Lock::Locker lockScope(&ProfileLock); - Ptr<JSON> oldroot = *JSON::Load(path); - if (oldroot) - { - if (oldroot->GetItemCount() >= 3) - { - JSON* item0 = oldroot->GetFirstItem(); - JSON* item1 = oldroot->GetNextItem(item0); - oldroot->GetNextItem(item1); + if (ProfileCache == NULL) + { // Load the cache + LoadCache(true); + if (ProfileCache == NULL) + return false; + } - if (item0->Name == "Oculus Profile Version") - { - int major = atoi(item0->Value.ToCStr()); - if (major > MAX_PROFILE_MAJOR_VERSION) - oldroot.Clear(); // don't use the file on unsupported major version number - } - else + JSON* users = ProfileCache->GetItemByName("Users"); + if (users == NULL) + { // Generate the User section + users = JSON::CreateArray(); + ProfileCache->AddItem("Users", users); +//TODO: Insert this before the TaggedData + } + + // Search for the pre-existence of this user + JSON* user_item = users->GetFirstItem(); + int index = 0; + while (user_item) + { + JSON* userid = user_item->GetItemByName("User"); + int compare = OVR_strcmp(user, userid->Value); + if (compare == 0) + { // The user already exists so simply update the fields + JSON* name_item = user_item->GetItemByName("Name"); + if (name_item && OVR_strcmp(name, name_item->Value) != 0) { - oldroot.Clear(); + name_item->Value = name; + Changed = true; } + return true; } - else - { - oldroot.Clear(); + else if (compare < 0) + { // A new user should be placed before this item + break; } + + user_item = users->GetNextItem(user_item); + index++; } - - // Create a new json root - Ptr<JSON> root = *JSON::CreateObject(); - root->AddNumberItem("Oculus Profile Version", PROFILE_VERSION); - root->AddStringItem("CurrentProfile", DefaultProfile); - root->AddNumberItem("ProfileCount", (double) ProfileCache.GetSize()); - // Generate a JSON subtree for each profile - for (unsigned int i=0; i<ProfileCache.GetSize(); i++) - { - Profile* profile = ProfileCache[i]; + // Create and fill the user struct + JSON* new_user = JSON::CreateObject(); + new_user->AddStringItem(OVR_KEY_USER, user); + new_user->AddStringItem(OVR_KEY_NAME, name); + // user_item->AddStringItem("Password", password); - // Write the base profile information - JSON* json_profile = JSON::CreateObject(); - json_profile->Name = "Profile"; - json_profile->AddStringItem("Name", profile->Name); - const char* gender; - switch (profile->GetGender()) - { - case Profile::Gender_Male: gender = "Male"; break; - case Profile::Gender_Female: gender = "Female"; break; - default: gender = "Unspecified"; - } - json_profile->AddStringItem("Gender", gender); - json_profile->AddNumberItem("PlayerHeight", profile->PlayerHeight); - json_profile->AddNumberItem("IPD", profile->IPD); + if (user_item == NULL) + users->AddArrayElement(new_user); + else + users->InsertArrayElement(index, new_user); - const char* device_name = NULL; - // Create a device-specific subtree for the cached device - if (profile->Type == Profile_RiftDK1) - { - device_name = "RiftDK1"; - - RiftDK1Profile* rift = (RiftDK1Profile*)profile; - JSON* json_rift = JSON::CreateObject(); - json_profile->AddItem(device_name, json_rift); + Changed = true; + return true; +} - const char* eyecup = "A"; - switch (rift->EyeCups) - { - case EyeCup_A: eyecup = "A"; break; - case EyeCup_B: eyecup = "B"; break; - case EyeCup_C: eyecup = "C"; break; - } - json_rift->AddStringItem("EyeCup", eyecup); - json_rift->AddNumberItem("LL", rift->LL); - json_rift->AddNumberItem("LR", rift->LR); - json_rift->AddNumberItem("RL", rift->RL); - json_rift->AddNumberItem("RR", rift->RR); - } - else if (profile->Type == Profile_RiftDKHD) - { - device_name = "RiftDKHD"; - - RiftDKHDProfile* rift = (RiftDKHDProfile*)profile; - JSON* json_rift = JSON::CreateObject(); - json_profile->AddItem(device_name, json_rift); +// Returns the user id of a specific user in the list. The returned +// memory is locally allocated and should not be stored or deleted. Returns NULL +// if the index is invalid +const char* ProfileManager::GetUser(unsigned int index) +{ + Lock::Locker lockScope(&ProfileLock); - const char* eyecup = "A"; - switch (rift->EyeCups) - { - case EyeCup_A: eyecup = "A"; break; - case EyeCup_B: eyecup = "B"; break; - case EyeCup_C: eyecup = "C"; break; - } - json_rift->AddStringItem("EyeCup", eyecup); - //json_rift->AddNumberItem("LL", rift->LL); - //json_rift->AddNumberItem("LR", rift->LR); - //json_rift->AddNumberItem("RL", rift->RL); - //json_rift->AddNumberItem("RR", rift->RR); - } - - // There may be multiple devices stored per user, but only a single - // device is represented by this root. We don't want to overwrite - // the other devices so we need to examine the older root - // and merge previous devices into new json root - if (oldroot) + if (ProfileCache == NULL) + { // Load the cache + LoadCache(false); + if (ProfileCache == NULL) + return NULL; + } + + JSON* users = ProfileCache->GetItemByName("Users"); + + if (users && index < users->GetItemCount()) + { + JSON* user_item = users->GetItemByIndex(index); + if (user_item) { - JSON* old_profile = oldroot->GetFirstItem(); - while (old_profile) + JSON* user = user_item->GetFirstItem(); + if (user) { - if (old_profile->Name == "Profile") - { - JSON* profile_name = old_profile->GetItemByName("Name"); - if (profile_name && OVR_strcmp(profile->Name, profile_name->Value) == 0) - { // Now that we found the user in the older root, add all the - // object children to the new root - except for the one for the - // current device - JSON* old_item = old_profile->GetFirstItem(); - while (old_item) - { - if (old_item->Type == JSON_Object - && (device_name == NULL || OVR_strcmp(old_item->Name, device_name) != 0)) - { - JSON* old_device = old_item; - old_item = old_profile->GetNextItem(old_item); - - // remove the node from the older root to avoid multiple reference - old_device->RemoveNode(); - // add the node pointer to the new root - json_profile->AddItem(old_device->Name, old_device); - } - else - { - old_item = old_profile->GetNextItem(old_item); - } - } - - break; - } - } - - old_profile = oldroot->GetNextItem(old_profile); + JSON* userid = user_item->GetItemByName(OVR_KEY_USER); + if (userid) + return userid->Value.ToCStr(); } } - - // Add the completed user profile to the new root - root->AddItem("Profile", json_profile); } + - // Save the profile to disk - root->Save(path); + return NULL; } -// Returns the number of stored profiles for this device type -int ProfileManager::GetProfileCount(ProfileType device) +bool ProfileManager::RemoveUser(const char* user) { Lock::Locker lockScope(&ProfileLock); - if (CacheDevice == Profile_Unknown) - LoadCache(device); + if (ProfileCache == NULL) + { // Load the cache + LoadCache(false); + if (ProfileCache == NULL) + return true; + } + + JSON* users = ProfileCache->GetItemByName("Users"); + if (users == NULL) + return true; + + // Remove this user from the User table + JSON* user_item = users->GetFirstItem(); + while (user_item) + { + JSON* userid = user_item->GetItemByName("User"); + if (OVR_strcmp(user, userid->Value) == 0) + { // Delete the user entry + user_item->RemoveNode(); + user_item->Release(); + Changed = true; + break; + } + + user_item = users->GetNextItem(user_item); + } + + // Now remove all data entries with this user tag + JSON* tagged_data = ProfileCache->GetItemByName("TaggedData"); + Array<JSON*> user_items; + FilterTaggedData(tagged_data, "User", user, user_items); + for (unsigned int i=0; i<user_items.GetSize(); i++) + { + user_items[i]->RemoveNode(); + user_items[i]->Release(); + Changed = true; + } + + return Changed; +} - return (int)ProfileCache.GetSize(); +Profile* ProfileManager::CreateProfile() +{ + Profile* profile = new Profile(); + return profile; } -// Returns the profile name of a specific profile in the list. The returned -// memory is locally allocated and should not be stored or deleted. Returns NULL -// if the index is invalid -const char* ProfileManager::GetProfileName(ProfileType device, unsigned int index) +// Returns the name of the profile that is marked as the current default user. +const char* ProfileManager::GetDefaultUser(const DeviceBase* device) { - Lock::Locker lockScope(&ProfileLock); + const char* tag_names[2] = {"Product", "Serial"}; + const char* tags[2]; - if (CacheDevice == Profile_Unknown) - LoadCache(device); + String product; + String serial; + if (!GetDeviceTags(device, product, serial)) + return NULL; - if (index < ProfileCache.GetSize()) - { - Profile* profile = ProfileCache[index]; - OVR_strcpy(NameBuff, Profile::MaxNameLen, profile->Name); - return NameBuff; - } - else + const char* product_str = product.IsEmpty() ? NULL : product.ToCStr(); + const char* serial_str = serial.IsEmpty() ? NULL : serial.ToCStr(); + + if (product_str && serial_str) { - return NULL; + tags[0] = product_str; + tags[1] = serial_str; + // Look for a default user on this specific device + Ptr<Profile> p = *GetTaggedProfile(tag_names, tags, 2); + if (p == NULL) + { // Look for a default user on this product + p = *GetTaggedProfile(tag_names, tags, 1); + } + + if (p) + { + const char* user = p->GetValue("DefaultUser"); + if (user != NULL && user[0] != 0) + { + TempBuff = user; + return TempBuff.ToCStr(); + } + } } + + return NULL; } -bool ProfileManager::HasProfile(ProfileType device, const char* name) +//----------------------------------------------------------------------------- +bool ProfileManager::SetDefaultUser(const DeviceBase* device, const char* user) { - Lock::Locker lockScope(&ProfileLock); + const char* tag_names[2] = {"Product", "Serial"}; + const char* tags[2]; + + String product; + String serial; + if (!GetDeviceTags(device, product, serial)) + return NULL; - if (CacheDevice == Profile_Unknown) - LoadCache(device); + const char* product_str = product.IsEmpty() ? NULL : product.ToCStr(); + const char* serial_str = serial.IsEmpty() ? NULL : serial.ToCStr(); - for (unsigned i = 0; i< ProfileCache.GetSize(); i++) + if (product_str && serial_str) { - if (ProfileCache[i] && OVR_strcmp(ProfileCache[i]->Name, name) == 0) - return true; + tags[0] = product_str; + tags[1] = serial_str; + + Ptr<Profile> p = *CreateProfile(); + p->SetValue("DefaultUser", user); + return SetTaggedProfile(tag_names, tags, 2, p); } + return false; } - -// Returns a specific profile object in the list. The returned memory should be -// encapsulated in a Ptr<> object or released after use. Returns NULL if the index -// is invalid -Profile* ProfileManager::LoadProfile(ProfileType device, unsigned int index) +//----------------------------------------------------------------------------- +Profile* ProfileManager::GetTaggedProfile(const char** tag_names, const char** tags, int num_tags) { Lock::Locker lockScope(&ProfileLock); - if (CacheDevice == Profile_Unknown) - LoadCache(device); + if (ProfileCache == NULL) + { // Load the cache + LoadCache(false); + if (ProfileCache == NULL) + return NULL; + } - if (index < ProfileCache.GetSize()) - { - Profile* profile = ProfileCache[index]; - return profile->Clone(); + JSON* tagged_data = ProfileCache->GetItemByName("TaggedData"); + OVR_ASSERT(tagged_data); + if (tagged_data == NULL) + return false; + + Profile* profile = new Profile(); + + JSON* vals = FindTaggedData(tagged_data, tag_names, tags, num_tags); + if (vals) + { + JSON* item = vals->GetFirstItem(); + while (item) + { + //printf("Add %s, %s\n", item->Name.ToCStr(), item->Value.ToCStr()); + //profile->Settings.Set(item->Name, item->Value); + profile->SetValue(item); + item = vals->GetNextItem(item); + } + + return profile; } else { + profile->Release(); return NULL; } } -// Returns a profile object for a particular device and user name. The returned -// memory should be encapsulated in a Ptr<> object or released after use. Returns -// NULL if the profile is not found -Profile* ProfileManager::LoadProfile(ProfileType device, const char* user) +//----------------------------------------------------------------------------- +bool ProfileManager::SetTaggedProfile(const char** tag_names, const char** tags, int num_tags, Profile* profile) { - if (user == NULL) - return NULL; - Lock::Locker lockScope(&ProfileLock); - - if (CacheDevice == Profile_Unknown) - LoadCache(device); - for (unsigned int i=0; i<ProfileCache.GetSize(); i++) - { - if (OVR_strcmp(user, ProfileCache[i]->Name) == 0) - { // Found the requested user profile - Profile* profile = ProfileCache[i]; - return profile->Clone(); + if (ProfileCache == NULL) + { // Load the cache + LoadCache(true); + if (ProfileCache == NULL) + return false; // TODO: Generate a new profile DB + } + + JSON* tagged_data = ProfileCache->GetItemByName("TaggedData"); + OVR_ASSERT(tagged_data); + if (tagged_data == NULL) + return false; + + // Get the cached tagged data section + JSON* vals = FindTaggedData(tagged_data, tag_names, tags, num_tags); + if (vals == NULL) + { + JSON* tagged_item = JSON::CreateObject(); + JSON* taglist = JSON::CreateArray(); + for (int i=0; i<num_tags; i++) + { + JSON* k = JSON::CreateObject(); + k->AddStringItem(tag_names[i], tags[i]); + taglist->AddArrayElement(k); } + + vals = JSON::CreateObject(); + + tagged_item->AddItem("tags", taglist); + tagged_item->AddItem("vals", vals); + tagged_data->AddArrayElement(tagged_item); } - return NULL; -} + // Now add or update each profile setting in cache + for (unsigned int i=0; i<profile->Values.GetSize(); i++) + { + JSON* value = profile->Values[i]; + + bool found = false; + JSON* item = vals->GetFirstItem(); + while (item) + { + if (value->Name == item->Name) + { + // Don't allow a pre-existing type to be overridden + OVR_ASSERT(value->Type == item->Type); + + if (value->Type == item->Type) + { // Check for the same value + if (value->Type == JSON_Array) + { // Update each array item + if (item->GetArraySize() == value->GetArraySize()) + { // Update each value (assumed to be basic types and not array of objects) + JSON* value_element = value->GetFirstItem(); + JSON* item_element = item->GetFirstItem(); + while (item_element && value_element) + { + if (value_element->Type == JSON_String) + { + if (item_element->Value != value_element->Value) + { // Overwrite the changed value and mark for file update + item_element->Value = value_element->Value; + Changed = true; + } + } + else { + if (item_element->dValue != value_element->dValue) + { // Overwrite the changed value and mark for file update + item_element->dValue = value_element->dValue; + Changed = true; + } + } + + value_element = value->GetNextItem(value_element); + item_element = item->GetNextItem(item_element); + } + } + else + { // if the array size changed, simply create a new one +// TODO: Create the new array + } + } + else if (value->Type == JSON_String) + { + if (item->Value != value->Value) + { // Overwrite the changed value and mark for file update + item->Value = value->Value; + Changed = true; + } + } + else { + if (item->dValue != value->dValue) + { // Overwrite the changed value and mark for file update + item->dValue = value->dValue; + Changed = true; + } + } + } + else + { + return false; + } -// Returns a profile with all system default values -Profile* ProfileManager::GetDeviceDefaultProfile(ProfileType device) -{ - const char* device_name = NULL; - return CreateProfileObject("default", device, &device_name); + found = true; + break; + } + + item = vals->GetNextItem(item); + } + + if (!found) + { // Add the new value + if (value->Type == JSON_String) + vals->AddStringItem(value->Name, value->Value); + else if (value->Type == JSON_Bool) + vals->AddBoolItem(value->Name, (value->dValue != 0)); + else if (value->Type == JSON_Array) + vals->AddItem(value->Name, value->Copy()); + else + vals->AddNumberItem(value->Name, value->dValue); + + Changed = true; + } + } + + return true; } -// Returns the name of the profile that is marked as the current default user. -const char* ProfileManager::GetDefaultProfileName(ProfileType device) +//----------------------------------------------------------------------------- +Profile* ProfileManager::GetProfile(const DeviceBase* device, const char* user) { Lock::Locker lockScope(&ProfileLock); - if (CacheDevice == Profile_Unknown) - LoadCache(device); + if (ProfileCache == NULL) + { // Load the cache + LoadCache(false); + if (ProfileCache == NULL) + return NULL; + } + + Profile* profile = new Profile(); - if (ProfileCache.GetSize() > 0) + if (device) { - OVR_strcpy(NameBuff, Profile::MaxNameLen, DefaultProfile); - return NameBuff; + if (!profile->LoadDeviceProfile(device) && (user == NULL)) + { + profile->Release(); + return NULL; + } } - else + + if (user) { - return NULL; + String product; + String serial; + GetDeviceTags(device, product, serial); + + const char* product_str = product.IsEmpty() ? NULL : product.ToCStr(); + const char* serial_str = serial.IsEmpty() ? NULL : serial.ToCStr(); + + if (!profile->LoadProfile(ProfileCache.GetPtr(), user, product_str, serial_str)) + { + profile->Release(); + return NULL; + } } + + return profile; +} + +//----------------------------------------------------------------------------- +// ***** Profile + +Profile::~Profile() +{ + ValMap.Clear(); + for (unsigned int i=0; i<Values.GetSize(); i++) + Values[i]->Release(); + + Values.Clear(); } -// Marks a particular user as the current default user. -bool ProfileManager::SetDefaultProfileName(ProfileType device, const char* name) +bool Profile::Close() { - Lock::Locker lockScope(&ProfileLock); + // TODO: + return true; +} - if (CacheDevice == Profile_Unknown) - LoadCache(device); -// TODO: I should verify that the user is valid - if (ProfileCache.GetSize() > 0) +//----------------------------------------------------------------------------- +void Profile::CopyItems(JSON* root, String prefix) +{ + JSON* item = root->GetFirstItem(); + while (item) { - DefaultProfile = name; - Changed = true; - return true; + String item_name; + if (prefix.IsEmpty()) + item_name = item->Name; + else + item_name = prefix + "." + item->Name; + + if (item->Type == JSON_Object) + { // recursively copy the children + + CopyItems(item, item_name); + } + else + { + //Settings.Set(item_name, item->Value); + SetValue(item); + } + + item = root->GetNextItem(item); + } +} + +//----------------------------------------------------------------------------- +bool Profile::LoadDeviceFile(unsigned int device_id, const char* serial) +{ + if (serial[0] == 0) + return false; + + String path = GetBaseOVRPath(false); + path += "/Devices.json"; + + // Load the device profiles + Ptr<JSON> root = *JSON::Load(path); + if (root == NULL) + return false; + + // Quick sanity check of the file type and format before we parse it + JSON* version = root->GetFirstItem(); + if (version && version->Name == "Oculus Device Profile Version") + { + int major = atoi(version->Value.ToCStr()); + if (major > MAX_DEVICE_PROFILE_MAJOR_VERSION) + return false; // don't parse the file on unsupported major version number } else { return false; + } + + + JSON* device = root->GetNextItem(version); + while (device) + { + if (device->Name == "Device") + { + JSON* product_item = device->GetItemByName("ProductID"); + JSON* serial_item = device->GetItemByName("Serial"); + if (product_item && serial_item + && (product_item->dValue == device_id) && (serial_item->Value == serial)) + { + // found the entry for this device so recursively copy all the settings to the profile + CopyItems(device, ""); + return true; + } + } + + device = root->GetNextItem(device); } + + return false; } +//----------------------------------------------------------------------------- +static int BCDByte(unsigned int byte) +{ + int digit1 = (byte >> 4) & 0x000f; + int digit2 = byte & 0x000f; + int decimal = digit1 * 10 + digit2; + return decimal; +} -// Saves a new or existing profile. Returns true on success or false on an -// invalid or failed save. -bool ProfileManager::Save(const Profile* profile) +//----------------------------------------------------------------------------- +bool Profile::LoadDeviceProfile(const DeviceBase* device) { - Lock::Locker lockScope(&ProfileLock); + bool success = false; + if (device == NULL) + return false; - if (OVR_strcmp(profile->Name, "default") == 0) - return false; // don't save a default profile + SensorDevice* sensor = NULL; - // TODO: I should also verify that this profile type matches the current cache - if (CacheDevice == Profile_Unknown) - LoadCache(profile->Type); + if (device->GetType() == Device_HMD) + { + // Convert the HMD device to Sensor + sensor = ((HMDDevice*)device)->GetSensor(); + device = sensor; + if (device == NULL) + return false; + } - // Look for the pre-existence of this profile - bool added = false; - for (unsigned int i=0; i<ProfileCache.GetSize(); i++) + if (device->GetType() == Device_Sensor) { - int compare = OVR_strcmp(profile->Name, ProfileCache[i]->Name); - - if (compare == 0) - { - // TODO: I should do a proper field comparison to avoid unnecessary - // overwrites and file saves - - // Replace the previous instance with the new profile - ProfileCache[i] = *profile->Clone(); - added = true; - Changed = true; - break; + SensorDevice* sensor = (SensorDevice*)device; + + SensorInfo sinfo; + sensor->GetDeviceInfo(&sinfo); + + int dev_major = BCDByte((sinfo.Version >> 8) & 0x00ff); + OVR_UNUSED(dev_major); + int dev_minor = BCDByte(sinfo.Version & 0xff); + + if (dev_minor > 18) + { // If the firmware supports hardware stored profiles then grab the device profile + // from the sensor + // TBD: Implement this + } + else + { + // Grab the model and serial number from the device and use it to access the device + // profile file stored on the local machine + success = LoadDeviceFile(sinfo.ProductId, sinfo.SerialNumber); } } - if (!added) - { - ProfileCache.PushBack(*profile->Clone()); - if (ProfileCache.GetSize() == 1) - CacheDevice = profile->Type; - - Changed = true; - } + if (sensor) + sensor->Release(); // release the sensor handle - return true; + return success; } -// Removes an existing profile. Returns true if the profile was found and deleted -// and returns false otherwise. -bool ProfileManager::Delete(const Profile* profile) +//----------------------------------------------------------------------------- +bool Profile::LoadUser(JSON* root, + const char* user, + const char* model_name, + const char* device_serial) { - Lock::Locker lockScope(&ProfileLock); + if (user == NULL) + return false; - if (OVR_strcmp(profile->Name, "default") == 0) - return false; // don't delete a default profile + // For legacy files, convert to old style names + //if (model_name && OVR_strcmp(model_name, "Oculus Rift DK1") == 0) + // model_name = "RiftDK1"; + + bool user_found = false; + JSON* data = root->GetItemByName("TaggedData"); + if (data) + { + const char* tag_names[3]; + const char* tags[3]; + tag_names[0] = "User"; + tags[0] = user; + int num_tags = 1; + + if (model_name) + { + tag_names[num_tags] = "Product"; + tags[num_tags] = model_name; + num_tags++; + } - if (CacheDevice == Profile_Unknown) - LoadCache(profile->Type); + if (device_serial) + { + tag_names[num_tags] = "Serial"; + tags[num_tags] = device_serial; + num_tags++; + } - // Look for the existence of this profile - for (unsigned int i=0; i<ProfileCache.GetSize(); i++) - { - if (OVR_strcmp(profile->Name, ProfileCache[i]->Name) == 0) - { - if (OVR_strcmp(profile->Name, DefaultProfile) == 0) - DefaultProfile.Clear(); - - ProfileCache.RemoveAt(i); - Changed = true; - return true; + // Retrieve all tag permutations + for (int combos=1; combos<=num_tags; combos++) + { + for (int i=0; i<(num_tags - combos + 1); i++) + { + JSON* vals = FindTaggedData(data, tag_names+i, tags+i, combos); + if (vals) + { + if (i==0) // This tag-combination contains a user match + user_found = true; + + // Add the values to the Profile. More specialized multi-tag values + // will take precedence over and overwrite generalized ones + // For example: ("Me","RiftDK1").IPD would overwrite ("Me").IPD + JSON* item = vals->GetFirstItem(); + while (item) + { + //printf("Add %s, %s\n", item->Name.ToCStr(), item->Value.ToCStr()); + //Settings.Set(item->Name, item->Value); + SetValue(item); + item = vals->GetNextItem(item); + } + } + } } } - return false; -} + if (user_found) + SetValue(OVR_KEY_USER, user); + return user_found; +} //----------------------------------------------------------------------------- -// ***** Profile - -Profile::Profile(ProfileType device, const char* name) +bool Profile::LoadProfile(JSON* root, + const char* user, + const char* device_model, + const char* device_serial) { - Type = device; - Gender = Gender_Unspecified; - PlayerHeight = 1.778f; // 5'10" inch man - IPD = 0.064f; - OVR_strcpy(Name, MaxNameLen, name); + if (!LoadUser(root, user, device_model, device_serial)) + return false; + + return true; } -bool Profile::ParseProperty(const char* prop, const char* sval) +//----------------------------------------------------------------------------- +char* Profile::GetValue(const char* key, char* val, int val_length) const { - if (OVR_strcmp(prop, "Name") == 0) + JSON* value = NULL; + if (ValMap.Get(key, &value)) { - OVR_strcpy(Name, MaxNameLen, sval); - return true; + OVR_strcpy(val, val_length, value->Value.ToCStr()); + return val; } - else if (OVR_strcmp(prop, "Gender") == 0) + else { - if (OVR_strcmp(sval, "Male") == 0) - Gender = Gender_Male; - else if (OVR_strcmp(sval, "Female") == 0) - Gender = Gender_Female; - else - Gender = Gender_Unspecified; - - return true; + val[0] = 0; + return NULL; } - else if (OVR_strcmp(prop, "PlayerHeight") == 0) +} + +//----------------------------------------------------------------------------- +const char* Profile::GetValue(const char* key) +{ + // Non-reentrant query. The returned buffer can only be used until the next call + // to GetValue() + JSON* value = NULL; + if (ValMap.Get(key, &value)) { - PlayerHeight = (float)atof(sval); - return true; + TempVal = value->Value; + return TempVal.ToCStr(); } - else if (OVR_strcmp(prop, "IPD") == 0) + else { - IPD = (float)atof(sval); - return true; + return NULL; } - - return false; } +//----------------------------------------------------------------------------- +int Profile::GetNumValues(const char* key) const +{ + JSON* value = NULL; + if (ValMap.Get(key, &value)) + { + if (value->Type == JSON_Array) + return value->GetArraySize(); + else + return 1; + } + else + return 0; +} -// Computes the eye height from the metric head height -float Profile::GetEyeHeight() +//----------------------------------------------------------------------------- +bool Profile::GetBoolValue(const char* key, bool default_val) const { - const float EYE_TO_HEADTOP_RATIO = 0.44538f; - const float MALE_AVG_HEAD_HEIGHT = 0.232f; - const float FEMALE_AVG_HEAD_HEIGHT = 0.218f; - - // compute distance from top of skull to the eye - float head_height; - if (Gender == Gender_Female) - head_height = FEMALE_AVG_HEAD_HEIGHT; + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Bool) + return (value->dValue != 0); else - head_height = MALE_AVG_HEAD_HEIGHT; + return default_val; +} - float skull = EYE_TO_HEADTOP_RATIO * head_height; +//----------------------------------------------------------------------------- +int Profile::GetIntValue(const char* key, int default_val) const +{ + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Number) + return (int)(value->dValue); + else + return default_val; +} - float eye_height = PlayerHeight - skull; - return eye_height; +//----------------------------------------------------------------------------- +float Profile::GetFloatValue(const char* key, float default_val) const +{ + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Number) + return (float)(value->dValue); + else + return default_val; } //----------------------------------------------------------------------------- -// ***** HMDProfile +int Profile::GetFloatValues(const char* key, float* values, int num_vals) const +{ + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Array) + { + int val_count = Alg::Min(value->GetArraySize(), num_vals); + JSON* item = value->GetFirstItem(); + int count=0; + while (item && count < val_count) + { + if (item->Type == JSON_Number) + values[count] = (float)item->dValue; + else + break; + + count++; + item = value->GetNextItem(item); + } + + return count; + } + else + { + return 0; + } +} -HMDProfile::HMDProfile(ProfileType type, const char* name) : Profile(type, name) +//----------------------------------------------------------------------------- +double Profile::GetDoubleValue(const char* key, double default_val) const { - LL = 0; - LR = 0; - RL = 0; - RR = 0; + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Number) + return value->dValue; + else + return default_val; } -bool HMDProfile::ParseProperty(const char* prop, const char* sval) +//----------------------------------------------------------------------------- +int Profile::GetDoubleValues(const char* key, double* values, int num_vals) const { - if (OVR_strcmp(prop, "LL") == 0) + JSON* value = NULL; + if (ValMap.Get(key, &value) && value->Type == JSON_Array) { - LL = atoi(sval); - return true; + int val_count = Alg::Min(value->GetArraySize(), num_vals); + JSON* item = value->GetFirstItem(); + int count=0; + while (item && count < val_count) + { + if (item->Type == JSON_Number) + values[count] = item->dValue; + else + break; + + count++; + item = value->GetNextItem(item); + } + + return count; } - else if (OVR_strcmp(prop, "LR") == 0) + else { - LR = atoi(sval); - return true; + return 0; } - else if (OVR_strcmp(prop, "RL") == 0) +} + +//----------------------------------------------------------------------------- +void Profile::SetValue(JSON* val) +{ + if (val->Type == JSON_Number) + SetDoubleValue(val->Name, val->dValue); + else if (val->Type == JSON_Bool) + SetBoolValue(val->Name, (val->dValue != 0)); + else if (val->Type == JSON_String) + SetValue(val->Name, val->Value); + else if (val->Type == JSON_Array) { - RL = atoi(sval); - return true; + if (val == NULL) + return; + + // Create a copy of the array + JSON* value = val->Copy(); + Values.PushBack(value); + ValMap.Set(value->Name, value); + } +} + +//----------------------------------------------------------------------------- +void Profile::SetValue(const char* key, const char* val) +{ + if (key == NULL || val == NULL) + return; + + JSON* value = NULL; + if (ValMap.Get(key, &value)) + { + value->Value = val; } - else if (OVR_strcmp(prop, "RR") == 0) + else { - RR = atoi(sval); - return true; + value = JSON::CreateString(val); + value->Name = key; + + Values.PushBack(value); + ValMap.Set(key, value); } - - return Profile::ParseProperty(prop, sval); } -Profile* HMDProfile::Clone() const +//----------------------------------------------------------------------------- +void Profile::SetBoolValue(const char* key, bool val) { - HMDProfile* profile = new HMDProfile(*this); - return profile; + if (key == NULL) + return; + + JSON* value = NULL; + if (ValMap.Get(key, &value)) + { + value->dValue = val; + } + else + { + value = JSON::CreateBool(val); + value->Name = key; + + Values.PushBack(value); + ValMap.Set(key, value); + } } //----------------------------------------------------------------------------- -// ***** RiftDK1Profile +void Profile::SetIntValue(const char* key, int val) +{ + SetDoubleValue(key, val); +} -RiftDK1Profile::RiftDK1Profile(const char* name) : HMDProfile(Profile_RiftDK1, name) +//----------------------------------------------------------------------------- +void Profile::SetFloatValue(const char* key, float val) { - EyeCups = EyeCup_A; + SetDoubleValue(key, val); } -bool RiftDK1Profile::ParseProperty(const char* prop, const char* sval) +//----------------------------------------------------------------------------- +void Profile::SetFloatValues(const char* key, const float* vals, int num_vals) { - if (OVR_strcmp(prop, "EyeCup") == 0) + JSON* value = NULL; + int val_count = 0; + if (ValMap.Get(key, &value)) { - switch (sval[0]) + if (value->Type == JSON_Array) { - case 'C': EyeCups = EyeCup_C; break; - case 'B': EyeCups = EyeCup_B; break; - default: EyeCups = EyeCup_A; break; + // truncate the existing array if fewer entries provided + int num_existing_vals = value->GetArraySize(); + for (int i=num_vals; i<num_existing_vals; i++) + value->RemoveLast(); + + JSON* item = value->GetFirstItem(); + while (item && val_count < num_vals) + { + if (item->Type == JSON_Number) + item->dValue = vals[val_count]; + + item = value->GetNextItem(item); + val_count++; + } + } + else + { + return; // Maybe we should change the data type? } - return true; } - - return HMDProfile::ParseProperty(prop, sval); -} + else + { + value = JSON::CreateArray(); + value->Name = key; -Profile* RiftDK1Profile::Clone() const -{ - RiftDK1Profile* profile = new RiftDK1Profile(*this); - return profile; + Values.PushBack(value); + ValMap.Set(key, value); + } + + for (val_count; val_count < num_vals; val_count++) + value->AddArrayNumber(vals[val_count]); } //----------------------------------------------------------------------------- -// ***** RiftDKHDProfile - -RiftDKHDProfile::RiftDKHDProfile(const char* name) : HMDProfile(Profile_RiftDKHD, name) +void Profile::SetDoubleValue(const char* key, double val) { - EyeCups = EyeCup_A; + JSON* value = NULL; + if (ValMap.Get(key, &value)) + { + value->dValue = val; + } + else + { + value = JSON::CreateNumber(val); + value->Name = key; + + Values.PushBack(value); + ValMap.Set(key, value); + } } -bool RiftDKHDProfile::ParseProperty(const char* prop, const char* sval) +//----------------------------------------------------------------------------- +void Profile::SetDoubleValues(const char* key, const double* vals, int num_vals) { - if (OVR_strcmp(prop, "EyeCup") == 0) + JSON* value = NULL; + int val_count = 0; + if (ValMap.Get(key, &value)) { - switch (sval[0]) + if (value->Type == JSON_Array) { - case 'C': EyeCups = EyeCup_C; break; - case 'B': EyeCups = EyeCup_B; break; - default: EyeCups = EyeCup_A; break; + // truncate the existing array if fewer entries provided + int num_existing_vals = value->GetArraySize(); + for (int i=num_vals; i<num_existing_vals; i++) + value->RemoveLast(); + + JSON* item = value->GetFirstItem(); + while (item && val_count < num_vals) + { + if (item->Type == JSON_Number) + item->dValue = vals[val_count]; + + item = value->GetNextItem(item); + val_count++; + } + } + else + { + return; // Maybe we should change the data type? } - return true; } - - return HMDProfile::ParseProperty(prop, sval); -} + else + { + value = JSON::CreateArray(); + value->Name = key; -Profile* RiftDKHDProfile::Clone() const -{ - RiftDKHDProfile* profile = new RiftDKHDProfile(*this); - return profile; + Values.PushBack(value); + ValMap.Set(key, value); + } + + for (val_count; val_count < num_vals; val_count++) + value->AddArrayNumber(vals[val_count]); } } // OVR diff --git a/LibOVR/Src/OVR_Profile.h b/LibOVR/Src/OVR_Profile.h index 9e2f9f3..e34820a 100644 --- a/LibOVR/Src/OVR_Profile.h +++ b/LibOVR/Src/OVR_Profile.h @@ -11,16 +11,16 @@ Notes : can be accomplished in game via the Profile API or by the official Oculus Configuration Utility. -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -33,22 +33,17 @@ limitations under the License. #ifndef OVR_Profile_h #define OVR_Profile_h +#include "OVR_DeviceConstants.h" #include "Kernel/OVR_String.h" #include "Kernel/OVR_RefCount.h" #include "Kernel/OVR_Array.h" +#include "Kernel/OVR_StringHash.h" namespace OVR { -// Defines the profile object for each device type -enum ProfileType -{ - Profile_Unknown = 0, - Profile_GenericHMD = 10, - Profile_RiftDK1 = 11, - Profile_RiftDKHD = 12, -}; - class Profile; +class DeviceBase; +class JSON; // ----------------------------------------------------------------------------- // ***** ProfileManager @@ -69,183 +64,137 @@ class Profile; // { // Retrieve the current profile settings // } // } // Profile will be destroyed and any disk I/O completed when going out of scope - class ProfileManager : public RefCountBase<ProfileManager> { protected: // Synchronize ProfileManager access since it may be accessed from multiple threads, // as it's shared through DeviceManager. - Lock ProfileLock; - Array<Ptr<Profile> > ProfileCache; - ProfileType CacheDevice; - String DefaultProfile; - bool Changed; - char NameBuff[32]; + Lock ProfileLock; + Ptr<JSON> ProfileCache; + bool Changed; + String TempBuff; public: static ProfileManager* Create(); - // Static interface functions - int GetProfileCount(ProfileType device); - const char* GetProfileName(ProfileType device, unsigned int index); - bool HasProfile(ProfileType device, const char* name); - Profile* LoadProfile(ProfileType device, unsigned int index); - Profile* LoadProfile(ProfileType device, const char* name); - Profile* GetDeviceDefaultProfile(ProfileType device); - const char* GetDefaultProfileName(ProfileType device); - bool SetDefaultProfileName(ProfileType device, const char* name); - bool Save(const Profile* profile); - bool Delete(const Profile* profile); - + int GetUserCount(); + const char* GetUser(unsigned int index); + bool CreateUser(const char* user, const char* name); + bool RemoveUser(const char* user); + const char* GetDefaultUser(const DeviceBase* device); + bool SetDefaultUser(const DeviceBase* device, const char* user); + + virtual Profile* CreateProfile(); + Profile* GetProfile(const DeviceBase* device, const char* user); + Profile* GetDefaultProfile(const DeviceBase* device); + Profile* GetTaggedProfile(const char** key_names, const char** keys, int num_keys); + bool SetTaggedProfile(const char** key_names, const char** keys, int num_keys, Profile* profile); + + bool GetDeviceTags(const DeviceBase* device, String& product, String& serial); + protected: ProfileManager(); ~ProfileManager(); - void LoadCache(ProfileType device); - void SaveCache(); + String GetProfilePath(bool create_dir); + void LoadCache(bool create); void ClearCache(); - Profile* CreateProfileObject(const char* user, - ProfileType device, - const char** device_name); + void LoadV1Profiles(JSON* v1); + + }; + //------------------------------------------------------------------- // ***** Profile // The base profile for all users. This object is not created directly. // Instead derived device objects provide add specific device members to // the base profile - class Profile : public RefCountBase<Profile> { -public: - enum { MaxNameLen = 32 }; - - enum GenderType - { - Gender_Unspecified = 0, - Gender_Male = 1, - Gender_Female = 2 - }; - - ProfileType Type; // The type of device profile - char Name[MaxNameLen]; // The name given to this profile - protected: - GenderType Gender; // The gender of the user - float PlayerHeight; // The height of the user in meters - float IPD; // Distance between eyes in meters + OVR::Hash<String, JSON*, String::HashFunctor> ValMap; + OVR::Array<JSON*> Values; + OVR::String TempVal; public: - virtual Profile* Clone() const = 0; - - // These are properties which are intrinsic to the user and affect scene setup - GenderType GetGender() { return Gender; }; - float GetPlayerHeight() { return PlayerHeight; }; - float GetIPD() { return IPD; }; - float GetEyeHeight(); - - void SetGender(GenderType gender) { Gender = gender; }; - void SetPlayerHeight(float height) { PlayerHeight = height; }; - void SetIPD(float ipd) { IPD = ipd; }; - -protected: - Profile(ProfileType type, const char* name); + ~Profile(); + + int GetNumValues(const char* key) const; + const char* GetValue(const char* key); + char* GetValue(const char* key, char* val, int val_length) const; + bool GetBoolValue(const char* key, bool default_val) const; + int GetIntValue(const char* key, int default_val) const; + float GetFloatValue(const char* key, float default_val) const; + int GetFloatValues(const char* key, float* values, int num_vals) const; + double GetDoubleValue(const char* key, double default_val) const; + int GetDoubleValues(const char* key, double* values, int num_vals) const; + + void SetValue(const char* key, const char* val); + void SetBoolValue(const char* key, bool val); + void SetIntValue(const char* key, int val); + void SetFloatValue(const char* key, float val); + void SetFloatValues(const char* key, const float* vals, int num_vals); + void SetDoubleValue(const char* key, double val); + void SetDoubleValues(const char* key, const double* vals, int num_vals); - virtual bool ParseProperty(const char* prop, const char* sval); - - friend class ProfileManager; -}; - -//----------------------------------------------------------------------------- -// ***** HMDProfile - -// The generic HMD profile is used for properties that are common to all headsets -class HMDProfile : public Profile -{ -protected: - // FOV extents in pixels measured by a user - int LL; // left eye outer extent - int LR; // left eye inner extent - int RL; // right eye inner extent - int RR; // right eye outer extent - -public: - virtual Profile* Clone() const; - - void SetLL(int val) { LL = val; }; - void SetLR(int val) { LR = val; }; - void SetRL(int val) { RL = val; }; - void SetRR(int val) { RR = val; }; - - int GetLL() { return LL; }; - int GetLR() { return LR; }; - int GetRL() { return RL; }; - int GetRR() { return RR; }; + bool Close(); protected: - HMDProfile(ProfileType type, const char* name); + Profile() {}; - virtual bool ParseProperty(const char* prop, const char* sval); - - friend class ProfileManager; -}; - -// For headsets that use eye cups -enum EyeCupType -{ - EyeCup_A = 0, - EyeCup_B = 1, - EyeCup_C = 2 -}; - -//----------------------------------------------------------------------------- -// ***** RiftDK1Profile - -// This profile is specific to the Rift Dev Kit 1 and contains overrides specific -// to that device and lens cup settings. -class RiftDK1Profile : public HMDProfile -{ -protected: - EyeCupType EyeCups; // Which eye cup does the player use - -public: - virtual Profile* Clone() const; - - EyeCupType GetEyeCup() { return EyeCups; }; - void SetEyeCup(EyeCupType cup) { EyeCups = cup; }; - -protected: - RiftDK1Profile(const char* name); - - virtual bool ParseProperty(const char* prop, const char* sval); - - friend class ProfileManager; -}; - -//----------------------------------------------------------------------------- -// ***** RiftDKHDProfile - -// This profile is specific to the Rift HD Dev Kit and contains overrides specific -// to that device and lens cup settings. -class RiftDKHDProfile : public HMDProfile -{ -protected: - EyeCupType EyeCups; // Which eye cup does the player use - -public: - virtual Profile* Clone() const; + + void SetValue(JSON* val); - EyeCupType GetEyeCup() { return EyeCups; }; - void SetEyeCup(EyeCupType cup) { EyeCups = cup; }; + + static bool LoadProfile(const DeviceBase* device, + const char* user, + Profile** profile); + void CopyItems(JSON* root, String prefix); + + bool LoadDeviceFile(unsigned int device_id, const char* serial); + bool LoadDeviceProfile(const DeviceBase* device); -protected: - RiftDKHDProfile(const char* name); + bool LoadProfile(JSON* root, + const char* user, + const char* device_model, + const char* device_serial); - virtual bool ParseProperty(const char* prop, const char* sval); + bool LoadUser(JSON* root, + const char* user, + const char* device_name, + const char* device_serial); + friend class ProfileManager; }; +// # defined() check for CAPI compatibility near term that re-defines these +// for now. To be unified. +#if !defined(OVR_KEY_USER) + +#define OVR_KEY_USER "User" +#define OVR_KEY_NAME "Name" +#define OVR_KEY_GENDER "Gender" +#define OVR_KEY_PLAYER_HEIGHT "PlayerHeight" +#define OVR_KEY_EYE_HEIGHT "EyeHeight" +#define OVR_KEY_IPD "IPD" +#define OVR_KEY_NECK_TO_EYE_DISTANCE "NeckEyeDistance" +#define OVR_KEY_EYE_RELIEF_DIAL "EyeReliefDial" +#define OVR_KEY_EYE_TO_NOSE_DISTANCE "EyeToNoseDist" +#define OVR_KEY_MAX_EYE_TO_PLATE_DISTANCE "MaxEyeToPlateDist" +#define OVR_KEY_EYE_CUP "EyeCup" +#define OVR_KEY_CUSTOM_EYE_RENDER "CustomEyeRender" + +#define OVR_DEFAULT_GENDER "Male" +#define OVR_DEFAULT_PLAYER_HEIGHT 1.778f +#define OVR_DEFAULT_EYE_HEIGHT 1.675f +#define OVR_DEFAULT_IPD 0.064f +#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL 0.09f +#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL 0.15f +#define OVR_DEFAULT_EYE_RELIEF_DIAL 3 + +#endif // OVR_KEY_USER String GetBaseOVRPath(bool create_dir); diff --git a/LibOVR/Src/OVR_Sensor2Impl.cpp b/LibOVR/Src/OVR_Sensor2Impl.cpp new file mode 100644 index 0000000..fa5d6e9 --- /dev/null +++ b/LibOVR/Src/OVR_Sensor2Impl.cpp @@ -0,0 +1,1128 @@ +/************************************************************************************ + +Filename : OVR_Sensor2Impl.cpp +Content : DK2 sensor device specific implementation. +Created : January 21, 2013 +Authors : Lee Cooper + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "OVR_Sensor2Impl.h" +#include "OVR_SensorImpl_Common.h" +#include "OVR_Sensor2ImplUtil.h" +#include "Kernel/OVR_Alg.h" + +//extern FILE *SF_LOG_fp; + +namespace OVR { + +//------------------------------------------------------------------------------------- +// ***** Oculus Sensor2-specific packet data structures + +enum { + Sensor2_VendorId = Oculus_VendorId, + Sensor2_ProductId = 0x0021, + + Sensor2_BootLoader = 0x1001, + + Sensor2_DefaultReportRate = 1000, // Hz +}; + + +// Messages we care for +enum Tracker2MessageType +{ + Tracker2Message_None = 0, + Tracker2Message_Sensors = 11, + Tracker2Message_Unknown = 0x100, + Tracker2Message_SizeError = 0x101, +}; + + +struct Tracker2Sensors +{ + UInt16 LastCommandID; + UByte NumSamples; + UInt16 RunningSampleCount; // Named 'SampleCount' in the firmware docs. + SInt16 Temperature; + UInt32 SampleTimestamp; + TrackerSample Samples[2]; + SInt16 MagX, MagY, MagZ; + UInt16 FrameCount; + UInt32 FrameTimestamp; + UByte FrameID; + UByte CameraPattern; + UInt16 CameraFrameCount; // Named 'CameraCount' in the firmware docs. + UInt32 CameraTimestamp; + + Tracker2MessageType Decode(const UByte* buffer, int size) + { + if (size < 64) + return Tracker2Message_SizeError; + + LastCommandID = DecodeUInt16(buffer + 1); + NumSamples = buffer[3]; + RunningSampleCount = DecodeUInt16(buffer + 4); + Temperature = DecodeSInt16(buffer + 6); + SampleTimestamp = DecodeUInt32(buffer + 8); + + // Only unpack as many samples as there actually are. + UByte iterationCount = (NumSamples > 1) ? 2 : NumSamples; + + for (UByte i = 0; i < iterationCount; i++) + { + UnpackSensor(buffer + 12 + 16 * i, &Samples[i].AccelX, &Samples[i].AccelY, &Samples[i].AccelZ); + UnpackSensor(buffer + 20 + 16 * i, &Samples[i].GyroX, &Samples[i].GyroY, &Samples[i].GyroZ); + } + + MagX = DecodeSInt16(buffer + 44); + MagY = DecodeSInt16(buffer + 46); + MagZ = DecodeSInt16(buffer + 48); + + FrameCount = DecodeUInt16(buffer + 50); + + FrameTimestamp = DecodeUInt32(buffer + 52); + FrameID = buffer[56]; + CameraPattern = buffer[57]; + CameraFrameCount = DecodeUInt16(buffer + 58); + CameraTimestamp = DecodeUInt32(buffer + 60); + + return Tracker2Message_Sensors; + } +}; + +struct Tracker2Message +{ + Tracker2MessageType Type; + Tracker2Sensors Sensors; +}; + +// Sensor reports data in the following coordinate system: +// Accelerometer: 10^-4 m/s^2; X forward, Y right, Z Down. +// Gyro: 10^-4 rad/s; X positive roll right, Y positive pitch up; Z positive yaw right. + + +// We need to convert it to the following RHS coordinate system: +// X right, Y Up, Z Back (out of screen) +// +Vector3f AccelFromBodyFrameUpdate(const Tracker2Sensors& update, UByte sampleNumber) +{ + const TrackerSample& sample = update.Samples[sampleNumber]; + float ax = (float)sample.AccelX; + float ay = (float)sample.AccelY; + float az = (float)sample.AccelZ; + + return Vector3f(ax, ay, az) * 0.0001f; +} + + +Vector3f MagFromBodyFrameUpdate(const Tracker2Sensors& update) +{ + return Vector3f( (float)update.MagX, (float)update.MagY, (float)update.MagZ) * 0.0001f; +} + +Vector3f EulerFromBodyFrameUpdate(const Tracker2Sensors& update, UByte sampleNumber) +{ + const TrackerSample& sample = update.Samples[sampleNumber]; + float gx = (float)sample.GyroX; + float gy = (float)sample.GyroY; + float gz = (float)sample.GyroZ; + + return Vector3f(gx, gy, gz) * 0.0001f; +} + +bool Sensor2DeviceImpl::decodeTracker2Message(Tracker2Message* message, UByte* buffer, int size) +{ + memset(message, 0, sizeof(Tracker2Message)); + + if (size < 4) + { + message->Type = Tracker2Message_SizeError; + return false; + } + + switch (buffer[0]) + { + case Tracker2Message_Sensors: + message->Type = message->Sensors.Decode(buffer, size); + break; + + default: + message->Type = Tracker2Message_Unknown; + break; + } + + return (message->Type < Tracker2Message_Unknown) && (message->Type != Tracker2Message_None); +} + +//------------------------------------------------------------------------------------- +// ***** Sensor2Device + +Sensor2DeviceImpl::Sensor2DeviceImpl(SensorDeviceCreateDesc* createDesc) + : SensorDeviceImpl(createDesc), + LastNumSamples(0), + LastRunningSampleCount(0), + FullCameraFrameCount(0), + LastCameraTime("C"), + LastFrameTime("F"), + LastSensorTime("S"), + LastFrameTimestamp(0) +{ + // 15 samples ok in min-window for DK2 since it uses microsecond clock. + TimeFilter = SensorTimeFilter(SensorTimeFilter::Settings(15)); + + pCalibration = new SensorCalibration(this); +} + +Sensor2DeviceImpl::~Sensor2DeviceImpl() +{ + delete pCalibration; +} + +void Sensor2DeviceImpl::openDevice() +{ + + // Read the currently configured range from sensor. + SensorRangeImpl sr(SensorRange(), 0); + + if (GetInternalDevice()->GetFeatureReport(sr.Buffer, SensorRangeImpl::PacketSize)) + { + sr.Unpack(); + sr.GetSensorRange(&CurrentRange); + } + + // Read the currently configured calibration from sensor. + SensorFactoryCalibrationImpl sc; + if (GetInternalDevice()->GetFeatureReport(sc.Buffer, SensorFactoryCalibrationImpl::PacketSize)) + { + sc.Unpack(); + AccelCalibrationOffset = sc.AccelOffset; + GyroCalibrationOffset = sc.GyroOffset; + AccelCalibrationMatrix = sc.AccelMatrix; + GyroCalibrationMatrix = sc.GyroMatrix; + CalibrationTemperature = sc.Temperature; + } + + // If the sensor has "DisplayInfo" data, use HMD coordinate frame by default. + SensorDisplayInfoImpl displayInfo; + if (GetInternalDevice()->GetFeatureReport(displayInfo.Buffer, SensorDisplayInfoImpl::PacketSize)) + { + displayInfo.Unpack(); + Coordinates = (displayInfo.DistortionType & SensorDisplayInfoImpl::Mask_BaseFmt) ? + Coord_HMD : Coord_Sensor; + } + Coordinates = Coord_HMD; // TODO temporary to force it behave + + // Read/Apply sensor config. + setCoordinateFrame(Coordinates); + setReportRate(Sensor2_DefaultReportRate); + setOnboardCalibrationEnabled(false); + + // Must send DK2 keep-alive. Set Keep-alive at 10 seconds. + KeepAliveMuxReport keepAlive; + keepAlive.CommandId = 0; + keepAlive.INReport = 11; + keepAlive.Interval = 10 * 1000; + + // Device creation is done from background thread so we don't need to add this to the command queue. + KeepAliveMuxImpl keepAliveImpl(keepAlive); + GetInternalDevice()->SetFeatureReport(keepAliveImpl.Buffer, KeepAliveMuxImpl::PacketSize); + + // Read the temperature data from the device + pCalibration->Initialize(); +} + +bool Sensor2DeviceImpl::SetTrackingReport(const TrackingReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setTrackingReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setTrackingReport(const TrackingReport& data) +{ + TrackingImpl ci(data); + return GetInternalDevice()->SetFeatureReport(ci.Buffer, TrackingImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetTrackingReport(TrackingReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getTrackingReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getTrackingReport(TrackingReport* data) +{ + TrackingImpl ci; + if (GetInternalDevice()->GetFeatureReport(ci.Buffer, TrackingImpl::PacketSize)) + { + ci.Unpack(); + *data = ci.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetDisplayReport(const DisplayReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setDisplayReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setDisplayReport(const DisplayReport& data) +{ + DisplayImpl di(data); + return GetInternalDevice()->SetFeatureReport(di.Buffer, DisplayImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetDisplayReport(DisplayReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getDisplayReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getDisplayReport(DisplayReport* data) +{ + DisplayImpl di; + if (GetInternalDevice()->GetFeatureReport(di.Buffer, DisplayImpl::PacketSize)) + { + di.Unpack(); + *data = di.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetMagCalibrationReport(const MagCalibrationReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setMagCalibrationReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setMagCalibrationReport(const MagCalibrationReport& data) +{ + MagCalibrationImpl mci(data); + return GetInternalDevice()->SetFeatureReport(mci.Buffer, MagCalibrationImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetMagCalibrationReport(MagCalibrationReport* data) +{ + // direct call if we are already on the device manager thread + if (GetCurrentThreadId() == GetManagerImpl()->GetThreadId()) + { + return getMagCalibrationReport(data); + } + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getMagCalibrationReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getMagCalibrationReport(MagCalibrationReport* data) +{ + MagCalibrationImpl mci; + if (GetInternalDevice()->GetFeatureReport(mci.Buffer, MagCalibrationImpl::PacketSize)) + { + mci.Unpack(); + *data = mci.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetPositionCalibrationReport(const PositionCalibrationReport& data) +{ + Lock::Locker lock(&IndexedReportLock); + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setPositionCalibrationReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setPositionCalibrationReport(const PositionCalibrationReport& data) +{ + UByte version = GetDeviceInterfaceVersion(); + if (version < 5) + { + PositionCalibrationImpl_Pre5 pci(data); + return GetInternalDevice()->SetFeatureReport(pci.Buffer, PositionCalibrationImpl_Pre5::PacketSize); + } + + PositionCalibrationImpl pci(data); + return GetInternalDevice()->SetFeatureReport(pci.Buffer, PositionCalibrationImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetPositionCalibrationReport(PositionCalibrationReport* data) +{ + Lock::Locker lock(&IndexedReportLock); + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getPositionCalibrationReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getPositionCalibrationReport(PositionCalibrationReport* data) +{ + UByte version = GetDeviceInterfaceVersion(); + if (version < 5) + { + PositionCalibrationImpl_Pre5 pci; + if (GetInternalDevice()->GetFeatureReport(pci.Buffer, PositionCalibrationImpl_Pre5::PacketSize)) + { + pci.Unpack(); + *data = pci.Settings; + return true; + } + + return false; + } + + PositionCalibrationImpl pci; + if (GetInternalDevice()->GetFeatureReport(pci.Buffer, PositionCalibrationImpl::PacketSize)) + { + pci.Unpack(); + *data = pci.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::GetAllPositionCalibrationReports(Array<PositionCalibrationReport>* data) +{ + Lock::Locker lock(&IndexedReportLock); + + PositionCalibrationReport pc; + bool result = GetPositionCalibrationReport(&pc); + if (!result) + return false; + + int positions = pc.NumPositions; + data->Clear(); + data->Resize(positions); + + for (int i = 0; i < positions; i++) + { + result = GetPositionCalibrationReport(&pc); + if (!result) + return false; + OVR_ASSERT(pc.NumPositions == positions); + + (*data)[pc.PositionIndex] = pc; + // IMU should be the last one + OVR_ASSERT(pc.PositionType == (pc.PositionIndex == positions - 1) ? + PositionCalibrationReport::PositionType_IMU : PositionCalibrationReport::PositionType_LED); + } + return true; +} + +bool Sensor2DeviceImpl::SetCustomPatternReport(const CustomPatternReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setCustomPatternReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setCustomPatternReport(const CustomPatternReport& data) +{ + CustomPatternImpl cpi(data); + return GetInternalDevice()->SetFeatureReport(cpi.Buffer, CustomPatternImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetCustomPatternReport(CustomPatternReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getCustomPatternReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getCustomPatternReport(CustomPatternReport* data) +{ + CustomPatternImpl cpi; + if (GetInternalDevice()->GetFeatureReport(cpi.Buffer, CustomPatternImpl::PacketSize)) + { + cpi.Unpack(); + *data = cpi.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetManufacturingReport(const ManufacturingReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setManufacturingReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setManufacturingReport(const ManufacturingReport& data) +{ + ManufacturingImpl mi(data); + return GetInternalDevice()->SetFeatureReport(mi.Buffer, ManufacturingImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetManufacturingReport(ManufacturingReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getManufacturingReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getManufacturingReport(ManufacturingReport* data) +{ + ManufacturingImpl mi; + if (GetInternalDevice()->GetFeatureReport(mi.Buffer, ManufacturingImpl::PacketSize)) + { + mi.Unpack(); + *data = mi.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetLensDistortionReport(const LensDistortionReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setLensDistortionReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setLensDistortionReport(const LensDistortionReport& data) +{ + LensDistortionImpl ui(data); + return GetInternalDevice()->SetFeatureReport(ui.Buffer, LensDistortionImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetLensDistortionReport(LensDistortionReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getLensDistortionReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getLensDistortionReport(LensDistortionReport* data) +{ + LensDistortionImpl ui; + if (GetInternalDevice()->GetFeatureReport(ui.Buffer, LensDistortionImpl::PacketSize)) + { + ui.Unpack(); + *data = ui.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetUUIDReport(const UUIDReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setUUIDReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setUUIDReport(const UUIDReport& data) +{ + UUIDImpl ui(data); + return GetInternalDevice()->SetFeatureReport(ui.Buffer, UUIDImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetUUIDReport(UUIDReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getUUIDReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getUUIDReport(UUIDReport* data) +{ + UUIDImpl ui; + if (GetInternalDevice()->GetFeatureReport(ui.Buffer, UUIDImpl::PacketSize)) + { + ui.Unpack(); + *data = ui.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetKeepAliveMuxReport(const KeepAliveMuxReport& data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setKeepAliveMuxReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setKeepAliveMuxReport(const KeepAliveMuxReport& data) +{ + KeepAliveMuxImpl kami(data); + return GetInternalDevice()->SetFeatureReport(kami.Buffer, KeepAliveMuxImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetKeepAliveMuxReport(KeepAliveMuxReport* data) +{ + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getKeepAliveMuxReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getKeepAliveMuxReport(KeepAliveMuxReport* data) +{ + KeepAliveMuxImpl kami; + if (GetInternalDevice()->GetFeatureReport(kami.Buffer, KeepAliveMuxImpl::PacketSize)) + { + kami.Unpack(); + *data = kami.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::SetTemperatureReport(const TemperatureReport& data) +{ + Lock::Locker lock(&IndexedReportLock); + + // direct call if we are already on the device manager thread + if (GetCurrentThreadId() == GetManagerImpl()->GetThreadId()) + { + return setTemperatureReport(data); + } + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::setTemperatureReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::setTemperatureReport(const TemperatureReport& data) +{ + TemperatureImpl ti(data); + return GetInternalDevice()->SetFeatureReport(ti.Buffer, TemperatureImpl::PacketSize); +} + +bool Sensor2DeviceImpl::GetTemperatureReport(TemperatureReport* data) +{ + Lock::Locker lock(&IndexedReportLock); + + // direct call if we are already on the device manager thread + if (GetCurrentThreadId() == GetManagerImpl()->GetThreadId()) + { + return getTemperatureReport(data); + } + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getTemperatureReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::GetAllTemperatureReports(Array<Array<TemperatureReport> >* data) +{ + Lock::Locker lock(&IndexedReportLock); + + TemperatureReport t; + bool result = GetTemperatureReport(&t); + if (!result) + return false; + + int bins = t.NumBins, samples = t.NumSamples; + data->Clear(); + data->Resize(bins); + for (int i = 0; i < bins; i++) + (*data)[i].Resize(samples); + + for (int i = 0; i < bins; i++) + for (int j = 0; j < samples; j++) + { + result = GetTemperatureReport(&t); + if (!result) + return false; + OVR_ASSERT(t.NumBins == bins && t.NumSamples == samples); + + (*data)[t.Bin][t.Sample] = t; + } + return true; +} + +bool Sensor2DeviceImpl::getTemperatureReport(TemperatureReport* data) +{ + TemperatureImpl ti; + if (GetInternalDevice()->GetFeatureReport(ti.Buffer, TemperatureImpl::PacketSize)) + { + ti.Unpack(); + *data = ti.Settings; + return true; + } + + return false; +} + +bool Sensor2DeviceImpl::GetGyroOffsetReport(GyroOffsetReport* data) +{ + // direct call if we are already on the device manager thread + if (GetCurrentThreadId() == GetManagerImpl()->GetThreadId()) + { + return getGyroOffsetReport(data); + } + + bool result; + if (!GetManagerImpl()->GetThreadQueue()-> + PushCallAndWaitResult(this, &Sensor2DeviceImpl::getGyroOffsetReport, &result, data)) + { + return false; + } + + return result; +} + +bool Sensor2DeviceImpl::getGyroOffsetReport(GyroOffsetReport* data) +{ + GyroOffsetImpl goi; + if (GetInternalDevice()->GetFeatureReport(goi.Buffer, GyroOffsetImpl::PacketSize)) + { + goi.Unpack(); + *data = goi.Settings; + return true; + } + + return false; +} + +void Sensor2DeviceImpl::onTrackerMessage(Tracker2Message* message) +{ + if (message->Type != Tracker2Message_Sensors) + return; + + const float sampleIntervalTimeUnit = (1.0f / 1000.f); + double scaledSampleIntervalTimeUnit = sampleIntervalTimeUnit; + Tracker2Sensors& s = message->Sensors; + + double absoluteTimeSeconds = 0.0; + + if (SequenceValid) + { + UInt32 runningSampleCountDelta; + + if (s.RunningSampleCount < LastRunningSampleCount) + { + // The running sample count on the device rolled around the 16 bit counter + // (expect to happen about once per minute), so RunningSampleCount + // needs a high word increment. + runningSampleCountDelta = ((((int)s.RunningSampleCount) + 0x10000) - (int)LastRunningSampleCount); + } + else + { + runningSampleCountDelta = (s.RunningSampleCount - LastRunningSampleCount); + } + + absoluteTimeSeconds = LastSensorTime.TimeSeconds; + scaledSampleIntervalTimeUnit = TimeFilter.ScaleTimeUnit(sampleIntervalTimeUnit); + + // If we missed a small number of samples, replicate the last sample. + if ((runningSampleCountDelta > LastNumSamples) && (runningSampleCountDelta <= 254)) + { + if (HandlerRef.HasHandlers()) + { + MessageBodyFrame sensors(this); + + sensors.AbsoluteTimeSeconds = absoluteTimeSeconds - s.NumSamples * scaledSampleIntervalTimeUnit; + sensors.TimeDelta = (float) ((runningSampleCountDelta - LastNumSamples) * scaledSampleIntervalTimeUnit); + sensors.Acceleration = LastAcceleration; + sensors.RotationRate = LastRotationRate; + sensors.MagneticField = LastMagneticField; + sensors.Temperature = LastTemperature; + + pCalibration->Apply(sensors); + HandlerRef.Call(sensors); + } + } + } + else + { + LastAcceleration = Vector3f(0); + LastRotationRate = Vector3f(0); + LastMagneticField= Vector3f(0); + LastTemperature = 0; + SequenceValid = true; + } + + LastNumSamples = s.NumSamples; + LastRunningSampleCount = s.RunningSampleCount; + + if (HandlerRef.HasHandlers()) + { + MessageBodyFrame sensors(this); + UByte iterations = s.NumSamples; + + if (s.NumSamples > 2) + { + iterations = 2; + sensors.TimeDelta = (float) ((s.NumSamples - 1) * scaledSampleIntervalTimeUnit); + } + else + { + sensors.TimeDelta = (float) scaledSampleIntervalTimeUnit; + } + + for (UByte i = 0; i < iterations; i++) + { + sensors.AbsoluteTimeSeconds = absoluteTimeSeconds - ( iterations - 1 - i ) * scaledSampleIntervalTimeUnit; + sensors.Acceleration = AccelFromBodyFrameUpdate(s, i); + sensors.RotationRate = EulerFromBodyFrameUpdate(s, i); + sensors.MagneticField= MagFromBodyFrameUpdate(s); + sensors.Temperature = s.Temperature * 0.01f; + + pCalibration->Apply(sensors); + HandlerRef.Call(sensors); + + // TimeDelta for the last two sample is always fixed. + sensors.TimeDelta = (float) scaledSampleIntervalTimeUnit; + } + + // Send pixel read only when frame timestamp changes. + if (LastFrameTimestamp != s.FrameTimestamp) + { + MessagePixelRead pixelRead(this); + // Prepare message for pixel read + pixelRead.PixelReadValue = s.FrameID; + pixelRead.RawFrameTime = s.FrameTimestamp; + pixelRead.RawSensorTime = s.SampleTimestamp; + pixelRead.SensorTimeSeconds = LastSensorTime.TimeSeconds; + pixelRead.FrameTimeSeconds = LastFrameTime.TimeSeconds; + + HandlerRef.Call(pixelRead); + LastFrameTimestamp = s.FrameTimestamp; + } + + UInt16 lowFrameCount = (UInt16) FullCameraFrameCount; + // Send message only when frame counter changes + if (lowFrameCount != s.CameraFrameCount) + { + // check for the rollover in the counter + if (s.CameraFrameCount < lowFrameCount) + FullCameraFrameCount += 0x10000; + // update the low bits + FullCameraFrameCount = (FullCameraFrameCount & ~0xFFFF) | s.CameraFrameCount; + + MessageExposureFrame vision(this); + vision.CameraPattern = s.CameraPattern; + vision.CameraFrameCount = FullCameraFrameCount; + vision.CameraTimeSeconds = LastCameraTime.TimeSeconds; + + HandlerRef.Call(vision); + } + + LastAcceleration = sensors.Acceleration; + LastRotationRate = sensors.RotationRate; + LastMagneticField= sensors.MagneticField; + LastTemperature = sensors.Temperature; + + //LastPixelRead = pixelRead.PixelReadValue; + //LastPixelReadTimeStamp = LastFrameTime; + } + else + { + if (s.NumSamples != 0) + { + UByte i = (s.NumSamples > 1) ? 1 : 0; + LastAcceleration = AccelFromBodyFrameUpdate(s, i); + LastRotationRate = EulerFromBodyFrameUpdate(s, i); + LastMagneticField = MagFromBodyFrameUpdate(s); + LastTemperature = s.Temperature * 0.01f; + } + } +} + +// Helper function to handle wrap-around of timestamps from Tracker2Message and convert them +// to system time. +// - Any timestamps that didn't increment keep their old system time. +// - This is a bit tricky since we don't know which one of timestamps has most recent time. +// - The first timestamp must be the IMU one; we assume that others can't be too much ahead of it + +void UpdateDK2Timestamps(SensorTimeFilter& tf, + SensorTimestampMapping** timestamps, UInt32 *rawValues, int count) +{ + int updateIndices[4]; + int updateCount = 0; + int i; + double now = Timer::GetSeconds(); + + OVR_ASSERT(count <= sizeof(updateIndices)/sizeof(int)); + + // Update timestamp wrapping for any values that changed. + for (i = 0; i < count; i++) + { + UInt32 lowMks = (UInt32)timestamps[i]->TimestampMks; // Low 32-bits are raw old timestamp. + + if (rawValues[i] != lowMks) + { + if (i == 0) + { + // Only check for rollover in the IMU timestamp + if (rawValues[i] < lowMks) + { + LogText("Timestamp %d rollover, was: %u, now: %u\n", i, lowMks, rawValues[i]); + timestamps[i]->TimestampMks += 0x100000000; + } + // Update the low bits + timestamps[i]->TimestampMks = (timestamps[i]->TimestampMks & 0xFFFFFFFF00000000) | rawValues[i]; + } + else + { + // Take the high bits from the main timestamp first (not a typo in the first argument!) + timestamps[i]->TimestampMks = + (timestamps[0]->TimestampMks & 0xFFFFFFFF00000000) | rawValues[i]; + // Now force it into the reasonable range around the expanded main timestamp + if (timestamps[i]->TimestampMks > timestamps[0]->TimestampMks + 0x1000000) + timestamps[i]->TimestampMks -= 0x100000000; + else if (timestamps[i]->TimestampMks + 0x100000000 < timestamps[0]->TimestampMks + 0x1000000) + timestamps[i]->TimestampMks += 0x100000000; + } + + updateIndices[updateCount] = i; + updateCount++; + } + } + + + // TBD: Simplify. Update indices should no longer be needed with new TimeFilter accepting + // previous values. + // We might want to have multi-element checking time roll-over. + + static const double mksToSec = 1.0 / 1000000.0; + + for (int i = 0; i < updateCount; i++) + { + SensorTimestampMapping& ts = *timestamps[updateIndices[i]]; + + ts.TimeSeconds = tf.SampleToSystemTime(((double)ts.TimestampMks) * mksToSec, + now, ts.TimeSeconds, ts.DebugTag); + } +} + + +void Sensor2DeviceImpl::OnInputReport(UByte* pData, UInt32 length) +{ + bool processed = false; + if (!processed) + { + Tracker2Message message; + if (decodeTracker2Message(&message, pData, length)) + { + processed = true; + + // Process microsecond timestamps from DK2 tracker. + // Mapped and raw values must correspond to one another in each array. + // IMU timestamp must be the first one! + SensorTimestampMapping* tsMaps[3] = + { + &LastSensorTime, + &LastCameraTime, + &LastFrameTime + }; + UInt32 tsRawMks[3] = + { + message.Sensors.SampleTimestamp, + message.Sensors.CameraTimestamp, + message.Sensors.FrameTimestamp + }; + // Handle wrap-around and convert samples to system time for any samples that changed. + UpdateDK2Timestamps(TimeFilter, tsMaps, tsRawMks, sizeof(tsRawMks)/sizeof(tsRawMks[0])); + + onTrackerMessage(&message); + + /* + if (SF_LOG_fp) + { + static UInt32 lastFrameTs = 0; + static UInt32 lastCameraTs = 0; + + if ((lastFrameTs != message.Sensors.FrameTimestamp) || + (lastCameraTs = message.Sensors.CameraTimestamp)) + fprintf(SF_LOG_fp, "msg cameraTs: 0x%X frameTs: 0x%X sensorTs: 0x%X\n", + message.Sensors.CameraTimestamp, message.Sensors.FrameTimestamp, + message.Sensors.SampleTimestamp); + + lastFrameTs = message.Sensors.FrameTimestamp; + lastCameraTs = message.Sensors.CameraTimestamp; + } + */ + +#if 0 + // Checks for DK2 firmware bug. + static unsigned SLastSampleTime = 0; + if ((SLastSampleTime > message.Sensors.SampleTimestamp) && message.Sensors.SampleTimestamp > 1000000 ) + { + fprintf(SF_LOG_fp, "*** Sample Timestamp Wrap! ***\n"); + OVR_ASSERT (SLastSampleTime <= message.Sensors.SampleTimestamp); + } + SLastSampleTime = message.Sensors.SampleTimestamp; + + static unsigned SLastCameraTime = 0; + if ((SLastCameraTime > message.Sensors.CameraTimestamp) && message.Sensors.CameraTimestamp > 1000000 ) + { + fprintf(SF_LOG_fp, "*** Camera Timestamp Wrap! ***\n"); + OVR_ASSERT (SLastCameraTime <= message.Sensors.CameraTimestamp); + } + SLastCameraTime = message.Sensors.CameraTimestamp; + + static unsigned SLastFrameTime = 0; + if ((SLastFrameTime > message.Sensors.FrameTimestamp) && message.Sensors.FrameTimestamp > 1000000 ) + { + fprintf(SF_LOG_fp, "*** Frame Timestamp Wrap! ***\n"); + OVR_ASSERT (SLastFrameTime <= message.Sensors.FrameTimestamp); + } + SLastFrameTime = message.Sensors.FrameTimestamp; +#endif + } + } +} + +double Sensor2DeviceImpl::OnTicks(double tickSeconds) +{ + + if (tickSeconds >= NextKeepAliveTickSeconds) + { + // Must send DK2 keep-alive. Set Keep-alive at 10 seconds. + KeepAliveMuxReport keepAlive; + keepAlive.CommandId = 0; + keepAlive.INReport = 11; + keepAlive.Interval = 10 * 1000; + + // Device creation is done from background thread so we don't need to add this to the command queue. + KeepAliveMuxImpl keepAliveImpl(keepAlive); + GetInternalDevice()->SetFeatureReport(keepAliveImpl.Buffer, KeepAliveMuxImpl::PacketSize); + + // Emit keep-alive every few seconds. + double keepAliveDelta = 3.0; // Use 3-second interval. + NextKeepAliveTickSeconds = tickSeconds + keepAliveDelta; + } + return NextKeepAliveTickSeconds - tickSeconds; +} + +} // namespace OVR diff --git a/LibOVR/Src/OVR_Sensor2Impl.h b/LibOVR/Src/OVR_Sensor2Impl.h new file mode 100644 index 0000000..12da869 --- /dev/null +++ b/LibOVR/Src/OVR_Sensor2Impl.h @@ -0,0 +1,157 @@ +/************************************************************************************ + +Filename : OVR_Sensor2Impl.h +Content : DK2 sensor device specific implementation. +Created : January 21, 2013 +Authors : Lee Cooper + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Sensor2Impl_h +#define OVR_Sensor2Impl_h + +#include "OVR_SensorImpl.h" +#include "OVR_SensorCalibration.h" + +namespace OVR { + +struct Tracker2Message; + +//------------------------------------------------------------------------------------- +// Used to convert DK2 Mks timestamps to system TimeSeconds +struct SensorTimestampMapping +{ + UInt64 TimestampMks; + double TimeSeconds; + const char* DebugTag; + + SensorTimestampMapping(const char* debugTag) + : TimestampMks(0), TimeSeconds(0.0), DebugTag(debugTag) { } +}; + +//------------------------------------------------------------------------------------- +// ***** OVR::Sensor2DeviceImpl + +// Oculus Sensor2 interface. +class Sensor2DeviceImpl : public SensorDeviceImpl +{ +public: + Sensor2DeviceImpl(SensorDeviceCreateDesc* createDesc); + ~Sensor2DeviceImpl(); + + // HIDDevice::Notifier interface. + virtual void OnInputReport(UByte* pData, UInt32 length); + virtual double OnTicks(double tickSeconds); + + // Get/set feature reports added for DK2. See 'DK2 Firmware Specification' document details. + virtual bool SetTrackingReport(const TrackingReport& data); + virtual bool GetTrackingReport(TrackingReport* data); + + virtual bool SetDisplayReport(const DisplayReport& data); + virtual bool GetDisplayReport(DisplayReport* data); + + virtual bool SetMagCalibrationReport(const MagCalibrationReport& data); + virtual bool GetMagCalibrationReport(MagCalibrationReport* data); + + virtual bool SetPositionCalibrationReport(const PositionCalibrationReport& data); + bool GetPositionCalibrationReport(PositionCalibrationReport* data); + virtual bool GetAllPositionCalibrationReports(Array<PositionCalibrationReport>* data); + + virtual bool SetCustomPatternReport(const CustomPatternReport& data); + virtual bool GetCustomPatternReport(CustomPatternReport* data); + + virtual bool SetKeepAliveMuxReport(const KeepAliveMuxReport& data); + virtual bool GetKeepAliveMuxReport(KeepAliveMuxReport* data); + + virtual bool SetManufacturingReport(const ManufacturingReport& data); + virtual bool GetManufacturingReport(ManufacturingReport* data); + + virtual bool SetUUIDReport(const UUIDReport& data); + virtual bool GetUUIDReport(UUIDReport* data); + + virtual bool SetTemperatureReport(const TemperatureReport& data); + bool GetTemperatureReport(TemperatureReport* data); + virtual bool GetAllTemperatureReports(Array<Array<TemperatureReport> >*); + + virtual bool GetGyroOffsetReport(GyroOffsetReport* data); + + virtual bool SetLensDistortionReport(const LensDistortionReport& data); + virtual bool GetLensDistortionReport(LensDistortionReport* data); + +protected: + virtual void openDevice(); + + bool decodeTracker2Message(Tracker2Message* message, UByte* buffer, int size); + + bool setTrackingReport(const TrackingReport& data); + bool getTrackingReport(TrackingReport* data); + + bool setDisplayReport(const DisplayReport& data); + bool getDisplayReport(DisplayReport* data); + + bool setMagCalibrationReport(const MagCalibrationReport& data); + bool getMagCalibrationReport(MagCalibrationReport* data); + + bool setPositionCalibrationReport(const PositionCalibrationReport& data); + bool getPositionCalibrationReport(PositionCalibrationReport* data); + + bool setCustomPatternReport(const CustomPatternReport& data); + bool getCustomPatternReport(CustomPatternReport* data); + + bool setKeepAliveMuxReport(const KeepAliveMuxReport& data); + bool getKeepAliveMuxReport(KeepAliveMuxReport* data); + + bool setManufacturingReport(const ManufacturingReport& data); + bool getManufacturingReport(ManufacturingReport* data); + + bool setUUIDReport(const UUIDReport& data); + bool getUUIDReport(UUIDReport* data); + + bool setTemperatureReport(const TemperatureReport& data); + bool getTemperatureReport(TemperatureReport* data); + + bool getGyroOffsetReport(GyroOffsetReport* data); + + bool setLensDistortionReport(const LensDistortionReport& data); + bool getLensDistortionReport(LensDistortionReport* data); + + // Called for decoded messages + void onTrackerMessage(Tracker2Message* message); + + UByte LastNumSamples; + UInt16 LastRunningSampleCount; + UInt32 FullCameraFrameCount; + + SensorTimestampMapping LastCameraTime; + SensorTimestampMapping LastFrameTime; + SensorTimestampMapping LastSensorTime; + // Record last frame timestamp to know when to send pixelRead messages. + UInt32 LastFrameTimestamp; + + SensorCalibration *pCalibration; + + // This lock is used to protect operations with auto-incrementing indices + // (see TemperatureReport and PositionCalibrationReport) + Lock IndexedReportLock; +}; + +} // namespace OVR + +#endif // OVR_Sensor2Impl_h diff --git a/LibOVR/Src/OVR_Sensor2ImplUtil.h b/LibOVR/Src/OVR_Sensor2ImplUtil.h new file mode 100644 index 0000000..ffe9b6a --- /dev/null +++ b/LibOVR/Src/OVR_Sensor2ImplUtil.h @@ -0,0 +1,676 @@ +/************************************************************************************ + +Filename : OVR_Sensor2ImplUtil.h +Content : DK2 sensor device feature report utils. +Created : January 27, 2014 +Authors : Lee Cooper + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Sensor2ImplUtil_h +#define OVR_Sensor2ImplUtil_h + +#include "OVR_Device.h" +#include "OVR_SensorImpl_Common.h" +#include "Kernel/OVR_Alg.h" + +namespace OVR { + +using namespace Alg; + +// Tracking feature report. +struct TrackingImpl +{ + enum { PacketSize = 13 }; + UByte Buffer[PacketSize]; + + TrackingReport Settings; + + TrackingImpl() + { + for (int i=0; i<PacketSize; i++) + { + Buffer[i] = 0; + } + + Buffer[0] = 12; + } + + TrackingImpl(const TrackingReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 12; + EncodeUInt16 ( Buffer+1, Settings.CommandId ); + Buffer[3] = Settings.Pattern; + Buffer[4] = UByte(Settings.Enable << 0 | + Settings.Autoincrement << 1 | + Settings.UseCarrier << 2 | + Settings.SyncInput << 3 | + Settings.VsyncLock << 4 | + Settings.CustomPattern << 5); + Buffer[5] = 0; + EncodeUInt16 ( Buffer+6, Settings.ExposureLength ); + EncodeUInt16 ( Buffer+8, Settings.FrameInterval ); + EncodeUInt16 ( Buffer+10, Settings.VsyncOffset ); + Buffer[12] = Settings.DutyCycle; + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.Pattern = Buffer[3]; + Settings.Enable = (Buffer[4] & 0x01) != 0; + Settings.Autoincrement = (Buffer[4] & 0x02) != 0; + Settings.UseCarrier = (Buffer[4] & 0x04) != 0; + Settings.SyncInput = (Buffer[4] & 0x08) != 0; + Settings.VsyncLock = (Buffer[4] & 0x10) != 0; + Settings.CustomPattern = (Buffer[4] & 0x20) != 0; + Settings.ExposureLength = DecodeUInt16(Buffer+6); + Settings.FrameInterval = DecodeUInt16(Buffer+8); + Settings.VsyncOffset = DecodeUInt16(Buffer+10); + Settings.DutyCycle = Buffer[12]; + } +}; + +// Display feature report. +struct DisplayImpl +{ + enum { PacketSize = 16 }; + UByte Buffer[PacketSize]; + + DisplayReport Settings; + + DisplayImpl() + { + for (int i=0; i<PacketSize; i++) + { + Buffer[i] = 0; + } + + Buffer[0] = 13; + } + + DisplayImpl(const DisplayReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 13; + EncodeUInt16 ( Buffer+1, Settings.CommandId ); + Buffer[3] = Settings.Brightness; + Buffer[4] = UByte( (Settings.ShutterType & 0x0F) | + (Settings.CurrentLimit & 0x03) << 4 | + (Settings.UseRolling ? 0x40 : 0) | + (Settings.ReverseRolling ? 0x80 : 0)); + Buffer[5] = UByte( (Settings.HighBrightness ? 0x01 : 0) | + (Settings.SelfRefresh ? 0x02 : 0) | + (Settings.ReadPixel ? 0x04 : 0) | + (Settings.DirectPentile ? 0x08 : 0)); + EncodeUInt16 ( Buffer+8, Settings.Persistence ); + EncodeUInt16 ( Buffer+10, Settings.LightingOffset ); + EncodeUInt16 ( Buffer+12, Settings.PixelSettle ); + EncodeUInt16 ( Buffer+14, Settings.TotalRows ); + } + + void Unpack() + { + + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.Brightness = Buffer[3]; + Settings.ShutterType = DisplayReport::ShutterTypeEnum(Buffer[4] & 0x0F); + Settings.CurrentLimit = DisplayReport::CurrentLimitEnum((Buffer[4] >> 4) & 0x02); + Settings.UseRolling = (Buffer[4] & 0x40) != 0; + Settings.ReverseRolling = (Buffer[4] & 0x80) != 0; + Settings.HighBrightness = (Buffer[5] & 0x01) != 0; + Settings.SelfRefresh = (Buffer[5] & 0x02) != 0; + Settings.ReadPixel = (Buffer[5] & 0x04) != 0; + Settings.DirectPentile = (Buffer[5] & 0x08) != 0; + Settings.Persistence = DecodeUInt16(Buffer+8); + Settings.LightingOffset = DecodeUInt16(Buffer+10); + Settings.PixelSettle = DecodeUInt16(Buffer+12); + Settings.TotalRows = DecodeUInt16(Buffer+14); + } +}; + +// MagCalibration feature report. +struct MagCalibrationImpl +{ + enum { PacketSize = 52 }; + UByte Buffer[PacketSize]; + + MagCalibrationReport Settings; + + MagCalibrationImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 14; + } + + MagCalibrationImpl(const MagCalibrationReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + Buffer[0] = 14; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.Version; + + for (int i = 0; i < 3; i++) + for (int j = 0; j < 4; j++) + { + SInt32 value = SInt32(Settings.Calibration.M[i][j] * 1e4f); + EncodeSInt32(Buffer + 4 + 4 * (4 * i + j), value); + } + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.Version = Buffer[3]; + + for (int i = 0; i < 3; i++) + for (int j = 0; j < 4; j++) + { + SInt32 value = DecodeSInt32(Buffer + 4 + 4 * (4 * i + j)); + Settings.Calibration.M[i][j] = (float)value * 1e-4f; + } + } +}; + +//------------------------------------------------------------------------------------- +// PositionCalibration feature report. +// - Sensor interface versions before 5 do not support Normal and Rotation. + +struct PositionCalibrationImpl +{ + enum { PacketSize = 30 }; + UByte Buffer[PacketSize]; + + PositionCalibrationReport Settings; + + PositionCalibrationImpl() + { + for (int i=0; i<PacketSize; i++) + { + Buffer[i] = 0; + } + + Buffer[0] = 15; + } + + PositionCalibrationImpl(const PositionCalibrationReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 15; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.Version; + + Vector3d position = Settings.Position * 1e6; + EncodeSInt32(Buffer+4, (SInt32) position.x); + EncodeSInt32(Buffer+8, (SInt32) position.y); + EncodeSInt32(Buffer+12, (SInt32) position.z); + + Vector3d normal = Settings.Normal * 1e6; + EncodeSInt16(Buffer+16, (SInt16) normal.x); + EncodeSInt16(Buffer+18, (SInt16) normal.y); + EncodeSInt16(Buffer+20, (SInt16) normal.z); + + double rotation = Settings.Rotation * 1e4; + EncodeSInt16(Buffer+22, (SInt16) rotation); + + EncodeUInt16(Buffer+24, Settings.PositionIndex); + EncodeUInt16(Buffer+26, Settings.NumPositions); + EncodeUInt16(Buffer+28, UInt16(Settings.PositionType)); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.Version = Buffer[3]; + + Settings.Position.x = DecodeSInt32(Buffer + 4) * 1e-6; + Settings.Position.y = DecodeSInt32(Buffer + 8) * 1e-6; + Settings.Position.z = DecodeSInt32(Buffer + 12) * 1e-6; + + Settings.Normal.x = DecodeSInt16(Buffer + 16) * 1e-6; + Settings.Normal.y = DecodeSInt16(Buffer + 18) * 1e-6; + Settings.Normal.z = DecodeSInt16(Buffer + 20) * 1e-6; + + Settings.Rotation = DecodeSInt16(Buffer + 22) * 1e-4; + + Settings.PositionIndex = DecodeUInt16(Buffer + 24); + Settings.NumPositions = DecodeUInt16(Buffer + 26); + + Settings.PositionType = PositionCalibrationReport::PositionTypeEnum(DecodeUInt16(Buffer + 28)); + } +}; + +struct PositionCalibrationImpl_Pre5 +{ + enum { PacketSize = 22 }; + UByte Buffer[PacketSize]; + + PositionCalibrationReport Settings; + + PositionCalibrationImpl_Pre5() + { + for (int i=0; i<PacketSize; i++) + { + Buffer[i] = 0; + } + + Buffer[0] = 15; + } + + PositionCalibrationImpl_Pre5(const PositionCalibrationReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 15; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.Version; + + Vector3d position = Settings.Position * 1e6; + EncodeSInt32(Buffer+4 , (SInt32) position.x); + EncodeSInt32(Buffer+8 , (SInt32) position.y); + EncodeSInt32(Buffer+12, (SInt32) position.z); + + EncodeUInt16(Buffer+16, Settings.PositionIndex); + EncodeUInt16(Buffer+18, Settings.NumPositions); + EncodeUInt16(Buffer+20, UInt16(Settings.PositionType)); + } + + void Unpack() + { + + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.Version = Buffer[3]; + + Settings.Position.x = DecodeSInt32(Buffer + 4) * 1e-6; + Settings.Position.y = DecodeSInt32(Buffer + 8) * 1e-6; + Settings.Position.z = DecodeSInt32(Buffer + 12) * 1e-6; + + Settings.PositionIndex = DecodeUInt16(Buffer + 16); + Settings.NumPositions = DecodeUInt16(Buffer + 18); + Settings.PositionType = PositionCalibrationReport::PositionTypeEnum(DecodeUInt16(Buffer + 20)); + } +}; + +// CustomPattern feature report. +struct CustomPatternImpl +{ + enum { PacketSize = 12 }; + UByte Buffer[PacketSize]; + + CustomPatternReport Settings; + + CustomPatternImpl() + { + for (int i=0; i<PacketSize; i++) + { + Buffer[i] = 0; + } + + Buffer[0] = 16; + } + + CustomPatternImpl(const CustomPatternReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 16; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.SequenceLength; + EncodeUInt32(Buffer+4 , Settings.Sequence); + EncodeUInt16(Buffer+8 , Settings.LEDIndex); + EncodeUInt16(Buffer+10, Settings.NumLEDs); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.SequenceLength = Buffer[3]; + Settings.Sequence = DecodeUInt32(Buffer+4); + Settings.LEDIndex = DecodeUInt16(Buffer+8); + Settings.NumLEDs = DecodeUInt16(Buffer+10); + } +}; + +// Manufacturing feature report. +struct ManufacturingImpl +{ + enum { PacketSize = 16 }; + UByte Buffer[PacketSize]; + + ManufacturingReport Settings; + + ManufacturingImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 18; + } + + ManufacturingImpl(const ManufacturingReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + Buffer[0] = 18; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.NumStages; + Buffer[4] = Settings.Stage; + Buffer[5] = Settings.StageVersion; + EncodeUInt16(Buffer+6, Settings.StageLocation); + EncodeUInt32(Buffer+8, Settings.StageTime); + EncodeUInt32(Buffer+12, Settings.Result); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.NumStages = Buffer[3]; + Settings.Stage = Buffer[4]; + Settings.StageVersion = Buffer[5]; + Settings.StageLocation = DecodeUInt16(Buffer+6); + Settings.StageTime = DecodeUInt32(Buffer+8); + Settings.Result = DecodeUInt32(Buffer+12); + } +}; + +// UUID feature report. +struct UUIDImpl +{ + enum { PacketSize = 23 }; + UByte Buffer[PacketSize]; + + UUIDReport Settings; + + UUIDImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 19; + } + + UUIDImpl(const UUIDReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + Buffer[0] = 19; + EncodeUInt16(Buffer+1, Settings.CommandId); + for (int i = 0; i < 20; ++i) + Buffer[3 + i] = Settings.UUIDValue[i]; + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + for (int i = 0; i < 20; ++i) + Settings.UUIDValue[i] = Buffer[3 + i]; + } +}; + +// LensDistortion feature report. +struct LensDistortionImpl +{ + enum { PacketSize = 64 }; + UByte Buffer[PacketSize]; + + LensDistortionReport Settings; + + LensDistortionImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 22; + } + + LensDistortionImpl(const LensDistortionReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + Buffer[0] = 19; + EncodeUInt16(Buffer+1, Settings.CommandId); + + Buffer[3] = Settings.NumDistortions; + Buffer[4] = Settings.DistortionIndex; + Buffer[5] = Settings.Bitmask; + EncodeUInt16(Buffer+6, Settings.LensType); + EncodeUInt16(Buffer+8, Settings.Version); + EncodeUInt16(Buffer+10, Settings.EyeRelief); + + for (int i = 0; i < 11; ++i) + EncodeUInt16(Buffer+12+2*i, Settings.KCoefficients[i]); + + EncodeUInt16(Buffer+34, Settings.MaxR); + EncodeUInt16(Buffer+36, Settings.MetersPerTanAngleAtCenter); + + for (int i = 0; i < 4; ++i) + EncodeUInt16(Buffer+38+2*i, Settings.ChromaticAberration[i]); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + + Settings.NumDistortions = Buffer[3]; + Settings.DistortionIndex = Buffer[4]; + Settings.Bitmask = Buffer[5]; + Settings.LensType = DecodeUInt16(Buffer+6); + Settings.Version = DecodeUInt16(Buffer+8); + Settings.EyeRelief = DecodeUInt16(Buffer+10); + + for (int i = 0; i < 11; ++i) + Settings.KCoefficients[i] = DecodeUInt16(Buffer+12+2*i); + + Settings.MaxR = DecodeUInt16(Buffer+34); + Settings.MetersPerTanAngleAtCenter = DecodeUInt16(Buffer+36); + + for (int i = 0; i < 4; ++i) + Settings.ChromaticAberration[i] = DecodeUInt16(Buffer+38+2*i); + } +}; + +// KeepAliveMux feature report. +struct KeepAliveMuxImpl +{ + enum { PacketSize = 6 }; + UByte Buffer[PacketSize]; + + KeepAliveMuxReport Settings; + + KeepAliveMuxImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 17; + } + + KeepAliveMuxImpl(const KeepAliveMuxReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + Buffer[0] = 17; + EncodeUInt16(Buffer+1, Settings.CommandId); + Buffer[3] = Settings.INReport; + EncodeUInt16(Buffer+4, Settings.Interval); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer+1); + Settings.INReport = Buffer[3]; + Settings.Interval = DecodeUInt16(Buffer+4); + } +}; + +// Temperature feature report. +struct TemperatureImpl +{ + enum { PacketSize = 24 }; + UByte Buffer[PacketSize]; + + TemperatureReport Settings; + + TemperatureImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 20; + } + + TemperatureImpl(const TemperatureReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 20; + EncodeUInt16(Buffer + 1, Settings.CommandId); + Buffer[3] = Settings.Version; + + Buffer[4] = Settings.NumBins; + Buffer[5] = Settings.Bin; + Buffer[6] = Settings.NumSamples; + Buffer[7] = Settings.Sample; + + EncodeSInt16(Buffer + 8 , SInt16(Settings.TargetTemperature * 1e2)); + EncodeSInt16(Buffer + 10, SInt16(Settings.ActualTemperature * 1e2)); + + EncodeUInt32(Buffer + 12, Settings.Time); + + Vector3d offset = Settings.Offset * 1e4; + PackSensor(Buffer + 16, (SInt16) offset.x, (SInt16) offset.y, (SInt16) offset.z); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer + 1); + Settings.Version = Buffer[3]; + + Settings.NumBins = Buffer[4]; + Settings.Bin = Buffer[5]; + Settings.NumSamples = Buffer[6]; + Settings.Sample = Buffer[7]; + + Settings.TargetTemperature = DecodeSInt16(Buffer + 8) * 1e-2; + Settings.ActualTemperature = DecodeSInt16(Buffer + 10) * 1e-2; + + Settings.Time = DecodeUInt32(Buffer + 12); + + SInt32 x, y, z; + UnpackSensor(Buffer + 16, &x, &y, &z); + Settings.Offset = Vector3d(x, y, z) * 1e-4; + } +}; + +// GyroOffset feature report. +struct GyroOffsetImpl +{ + enum { PacketSize = 18 }; + UByte Buffer[PacketSize]; + + GyroOffsetReport Settings; + + GyroOffsetImpl() + { + memset(Buffer, 0, sizeof(Buffer)); + Buffer[0] = 21; + } + + GyroOffsetImpl(const GyroOffsetReport& settings) + : Settings(settings) + { + Pack(); + } + + void Pack() + { + + Buffer[0] = 21; + Buffer[1] = UByte(Settings.CommandId & 0xFF); + Buffer[2] = UByte(Settings.CommandId >> 8); + Buffer[3] = UByte(Settings.Version); + + Vector3d offset = Settings.Offset * 1e4; + PackSensor(Buffer + 4, (SInt32) offset.x, (SInt32) offset.y, (SInt32) offset.z); + + EncodeSInt16(Buffer + 16, SInt16(Settings.Temperature * 1e2)); + } + + void Unpack() + { + Settings.CommandId = DecodeUInt16(Buffer + 1); + Settings.Version = GyroOffsetReport::VersionEnum(Buffer[3]); + + SInt32 x, y, z; + UnpackSensor(Buffer + 4, &x, &y, &z); + Settings.Offset = Vector3d(x, y, z) * 1e-4f; + + Settings.Temperature = DecodeSInt16(Buffer + 16) * 1e-2; + } +}; + +} // namespace OVR + +#endif // OVR_Sensor2ImplUtil_h diff --git a/LibOVR/Src/OVR_SensorCalibration.cpp b/LibOVR/Src/OVR_SensorCalibration.cpp new file mode 100644 index 0000000..665898b --- /dev/null +++ b/LibOVR/Src/OVR_SensorCalibration.cpp @@ -0,0 +1,276 @@ +/************************************************************************************ + +Filename : OVR_SensorCalibration.cpp +Content : Calibration data implementation for the IMU messages +Created : January 28, 2014 +Authors : Max Katsev + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "OVR_SensorCalibration.h" +#include "Kernel/OVR_Log.h" +#include <time.h> + +namespace OVR { + +using namespace Alg; + +const UByte VERSION = 2; +const UByte MAX_COMPAT_VERSION = 15; + +SensorCalibration::SensorCalibration(SensorDevice* pSensor) + : MagCalibrated(false), GyroAutoTemperature(0), GyroFilter(6000) +{ + this->pSensor = pSensor; +}; + +void SensorCalibration::Initialize() +{ + // read factory calibration + pSensor->GetFactoryCalibration(&AccelOffset, &GyroAutoOffset, &AccelMatrix, &GyroMatrix, &GyroAutoTemperature); + + // if the headset has an autocalibrated offset, prefer it over the factory defaults + GyroOffsetReport gyroReport; + bool result = pSensor->GetGyroOffsetReport(&gyroReport); + if (result && gyroReport.Version != GyroOffsetReport::Version_NoOffset) + { + GyroAutoOffset = (Vector3f) gyroReport.Offset; + GyroAutoTemperature = (float) gyroReport.Temperature; + } + + // read the temperature tables and prepare the interpolation structures + result = pSensor->GetAllTemperatureReports(&temperatureReports); + OVR_ASSERT(result); + for (int i = 0; i < 3; i++) + Interpolators[i].Initialize(temperatureReports, i); + + // read the mag calibration + MagCalibrationReport report; + result = pSensor->GetMagCalibrationReport(&report); + MagCalibrated = result && report.Version > 0; + MagMatrix = report.Calibration; + if (!MagCalibrated) + { + // OVR_ASSERT(false); + LogError("Magnetometer calibration not found!\n"); + } +} + +void SensorCalibration::Apply(MessageBodyFrame& msg) +{ + AutocalibrateGyro(msg); + + // compute the interpolated offset + Vector3f gyroOffset; + for (int i = 0; i < 3; i++) + gyroOffset[i] = (float) Interpolators[i].GetOffset(msg.Temperature, GyroAutoTemperature, GyroAutoOffset[i]); + + // apply calibration + msg.RotationRate = GyroMatrix.Transform(msg.RotationRate - gyroOffset); + msg.Acceleration = AccelMatrix.Transform(msg.Acceleration - AccelOffset); + if (MagCalibrated) + msg.MagneticField = MagMatrix.Transform(msg.MagneticField); + // TBD: don't report mag calibration for now, since it is used to enable the yaw correction + msg.MagCalibrated = false; +} + +void SensorCalibration::AutocalibrateGyro(MessageBodyFrame const& msg) +{ + const float alpha = 0.4f; + // 1.25f is a scaling factor related to conversion from per-axis comparison to length comparison + const float absLimit = 1.25f * 0.349066f; + const float noiseLimit = 1.25f * 0.03f; + + Vector3f gyro = msg.RotationRate; + // do a moving average to reject short term noise + Vector3f avg = (GyroFilter.IsEmpty()) ? gyro : gyro * alpha + GyroFilter.PeekBack() * (1 - alpha); + + // Make sure the absolute value is below what is likely motion + // Make sure it is close enough to the current average that it is probably noise and not motion + if (avg.Length() >= absLimit || (avg - GyroFilter.Mean()).Length() >= noiseLimit) + GyroFilter.Clear(); + GyroFilter.PushBack(avg); + + // if had a reasonable number of samples already use it for the current offset + if (GyroFilter.GetSize() > GyroFilter.GetCapacity() / 2) + { + GyroAutoOffset = GyroFilter.Mean(); + GyroAutoTemperature = msg.Temperature; + // After ~6 seconds of no motion, use the average as the new zero rate offset + if (GyroFilter.IsFull()) + StoreAutoOffset(); + } +} + +void SensorCalibration::StoreAutoOffset() +{ + const double maxDeltaT = 2.5; + const double minExtraDeltaT = 0.5; + const UInt32 minDelay = 24 * 3600; // 1 day in seconds + + // find the best bin + UPInt binIdx = 0; + for (UPInt i = 1; i < temperatureReports.GetSize(); i++) + if (Abs(GyroAutoTemperature - temperatureReports[i][0].TargetTemperature) < + Abs(GyroAutoTemperature - temperatureReports[binIdx][0].TargetTemperature)) + binIdx = i; + + // find the oldest and newest samples + // NB: uninitialized samples have Time == 0, so they will get picked as the oldest + UPInt newestIdx = 0, oldestIdx = 0; + for (UPInt i = 1; i < temperatureReports[binIdx].GetSize(); i++) + { + // if the version is newer - do nothing + if (temperatureReports[binIdx][i].Version > VERSION) + return; + if (temperatureReports[binIdx][i].Time > temperatureReports[binIdx][newestIdx].Time) + newestIdx = i; + if (temperatureReports[binIdx][i].Time < temperatureReports[binIdx][oldestIdx].Time) + oldestIdx = i; + } + TemperatureReport& oldestReport = temperatureReports[binIdx][oldestIdx]; + TemperatureReport& newestReport = temperatureReports[binIdx][newestIdx]; + OVR_ASSERT((oldestReport.Sample == 0 && newestReport.Sample == 0 && newestReport.Version == 0) || + oldestReport.Sample == (newestReport.Sample + 1) % newestReport.NumSamples); + + bool writeSuccess = false; + UInt32 now = (UInt32) time(0); + if (now - newestReport.Time > minDelay) + { + // only write a new sample if the temperature is close enough + if (Abs(GyroAutoTemperature - oldestReport.TargetTemperature) < maxDeltaT) + { + oldestReport.Time = now; + oldestReport.ActualTemperature = GyroAutoTemperature; + oldestReport.Offset = (Vector3d) GyroAutoOffset; + oldestReport.Version = VERSION; + writeSuccess = pSensor->SetTemperatureReport(oldestReport); + OVR_ASSERT(writeSuccess); + } + } + else + { + // if the newest sample is too recent - _update_ it if significantly closer to the target temp + if (Abs(GyroAutoTemperature - newestReport.TargetTemperature) + minExtraDeltaT + < Abs(newestReport.ActualTemperature - newestReport.TargetTemperature)) + { + // (do not update the time!) + newestReport.ActualTemperature = GyroAutoTemperature; + newestReport.Offset = (Vector3d) GyroAutoOffset; + newestReport.Version = VERSION; + writeSuccess = pSensor->SetTemperatureReport(newestReport); + OVR_ASSERT(writeSuccess); + } + } + + // update the interpolators with the new data + // this is not particularly expensive call and would only happen rarely + // but if performance is a problem, it's possible to only recompute the data that has changed + if (writeSuccess) + for (int i = 0; i < 3; i++) + Interpolators[i].Initialize(temperatureReports, i); +} + +void OffsetInterpolator::Initialize(Array<Array<TemperatureReport> > const& temperatureReports, int coord) +{ + int bins = (int) temperatureReports.GetSize(); + Temperatures.Clear(); + Temperatures.Reserve(bins); + Values.Clear(); + Values.Reserve(bins); + + for (int bin = 0; bin < bins; bin++) + { + OVR_ASSERT(temperatureReports[bin].GetSize() == temperatureReports[0].GetSize()); + //const TemperatureReport& report = median(temperatureReports[bin], coord); + const TemperatureReport& report = temperatureReports[bin][0]; + if (report.Version > 0 && report.Version <= MAX_COMPAT_VERSION) + { + Temperatures.PushBack(report.ActualTemperature); + Values.PushBack(report.Offset[coord]); + } + } +} + +double OffsetInterpolator::GetOffset(double targetTemperature, double autoTemperature, double autoValue) +{ + const double autoRangeExtra = 1.0; + const double minInterpolationDist = 0.5; + + // difference between current and autocalibrated temperature adjusted for preference over historical data + const double adjustedDeltaT = Abs(autoTemperature - targetTemperature) - autoRangeExtra; + + int count = (int) Temperatures.GetSize(); + // handle special cases when we don't have enough data for proper interpolation + if (count == 0) + return autoValue; + if (count == 1) + { + if (adjustedDeltaT < Abs(Temperatures[0] - targetTemperature)) + return autoValue; + else + return Values[0]; + } + + // first, find the interval that contains targetTemperature + // if all points are on the same side of targetTemperature, find the adjacent interval + int l; + if (targetTemperature < Temperatures[1]) + l = 0; + else if (targetTemperature >= Temperatures[count - 2]) + l = count - 2; + else + for (l = 1; l < count - 2; l++) + if (Temperatures[l] <= targetTemperature && targetTemperature < Temperatures[l+1]) + break; + int u = l + 1; + + // extend the interval if it's too small and the interpolation is unreliable + if (Temperatures[u] - Temperatures[l] < minInterpolationDist) + { + if (l > 0 + && (u == count - 1 || Temperatures[u] - Temperatures[l - 1] < Temperatures[u + 1] - Temperatures[l])) + l--; + else if (u < count - 1) + u++; + } + + // verify correctness + OVR_ASSERT(l >= 0 && u < count); + OVR_ASSERT(l == 0 || Temperatures[l] <= targetTemperature); + OVR_ASSERT(u == count - 1 || targetTemperature < Temperatures[u]); + OVR_ASSERT((l == 0 && u == count - 1) || Temperatures[u] - Temperatures[l] > minInterpolationDist); + OVR_ASSERT(Temperatures[l] <= Temperatures[u]); + + // perform the interpolation + double slope; + if (Temperatures[u] - Temperatures[l] >= minInterpolationDist) + slope = (Values[u] - Values[l]) / (Temperatures[u] - Temperatures[l]); + else + // avoid a badly conditioned problem + slope = 0; + if (adjustedDeltaT < Abs(Temperatures[u] - targetTemperature)) + // use the autocalibrated value, if it's close + return autoValue + slope * (targetTemperature - autoTemperature); + else + return Values[u] + slope * (targetTemperature - Temperatures[u]); +} + +} // namespace OVR diff --git a/LibOVR/Src/OVR_SensorCalibration.h b/LibOVR/Src/OVR_SensorCalibration.h new file mode 100644 index 0000000..2c437b4 --- /dev/null +++ b/LibOVR/Src/OVR_SensorCalibration.h @@ -0,0 +1,77 @@ +/************************************************************************************ + +Filename : OVR_SensorCalibration.h +Content : Calibration data implementation for the IMU messages +Created : January 28, 2014 +Authors : Max Katsev + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_SensorCalibration_h +#define OVR_SensorCalibration_h + +#include "OVR_Device.h" +#include "OVR_SensorFilter.h" + +namespace OVR { + +class OffsetInterpolator +{ +public: + void Initialize(Array<Array<TemperatureReport> > const& temperatureReports, int coord); + double GetOffset(double targetTemperature, double autoTemperature, double autoValue); + + Array<double> Temperatures; + Array<double> Values; +}; + +class SensorCalibration : public NewOverrideBase +{ +public: + SensorCalibration(SensorDevice* pSensor); + + // Load data from the HW and perform the necessary preprocessing + void Initialize(); + // Apply the calibration + void Apply(MessageBodyFrame& msg); + +protected: + void StoreAutoOffset(); + void AutocalibrateGyro(MessageBodyFrame const& msg); + + SensorDevice* pSensor; + + // Factory calibration data + bool MagCalibrated; + Matrix4f AccelMatrix, GyroMatrix, MagMatrix; + Vector3f AccelOffset; + + // Temperature based data + Array<Array<TemperatureReport> > temperatureReports; + OffsetInterpolator Interpolators[3]; + + // Autocalibration data + SensorFilterf GyroFilter; + Vector3f GyroAutoOffset; + float GyroAutoTemperature; +}; + +} // namespace OVR +#endif //OVR_SensorCalibration_h diff --git a/LibOVR/Src/OVR_SensorFilter.cpp b/LibOVR/Src/OVR_SensorFilter.cpp index 38caa5e..2c660ae 100644 --- a/LibOVR/Src/OVR_SensorFilter.cpp +++ b/LibOVR/Src/OVR_SensorFilter.cpp @@ -2,20 +2,20 @@ PublicHeader: OVR.h Filename : OVR_SensorFilter.cpp -Content : Basic filtering of sensor data +Content : Basic filtering of sensor this->Data Created : March 7, 2013 Authors : Steve LaValle, Anna Yershova, Max Katsev -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -29,95 +29,66 @@ limitations under the License. namespace OVR { -Vector3f SensorFilter::Median() const +template <typename T> +Vector3<T> SensorFilter<T>::Median() const { - int half_window = Count / 2; - float* sortx = (float*) OVR_ALLOC(Count * sizeof(float)); - float* sorty = (float*) OVR_ALLOC(Count * sizeof(float)); - float* sortz = (float*) OVR_ALLOC(Count * sizeof(float)); - float resultx = 0.0f, resulty = 0.0f, resultz = 0.0f; + Vector3<T> result; + T* slice = (T*) OVR_ALLOC(this->ElemCount * sizeof(T)); - for (int i = 0; i < Count; i++) + for (int coord = 0; coord < 3; coord++) { - sortx[i] = Elements[i].x; - sorty[i] = Elements[i].y; - sortz[i] = Elements[i].z; + for (int i = 0; i < this->ElemCount; i++) + slice[i] = this->Data[i][coord]; + result[coord] = Alg::Median(ArrayAdaptor(slice, this->ElemCount)); } - for (int j = 0; j <= half_window; j++) - { - int minx = j; - int miny = j; - int minz = j; - for (int k = j + 1; k < Count; k++) - { - if (sortx[k] < sortx[minx]) minx = k; - if (sorty[k] < sorty[miny]) miny = k; - if (sortz[k] < sortz[minz]) minz = k; - } - const float tempx = sortx[j]; - const float tempy = sorty[j]; - const float tempz = sortz[j]; - sortx[j] = sortx[minx]; - sortx[minx] = tempx; - - sorty[j] = sorty[miny]; - sorty[miny] = tempy; - - sortz[j] = sortz[minz]; - sortz[minz] = tempz; - } - resultx = sortx[half_window]; - resulty = sorty[half_window]; - resultz = sortz[half_window]; - - OVR_FREE(sortx); - OVR_FREE(sorty); - OVR_FREE(sortz); - return Vector3f(resultx, resulty, resultz); + OVR_FREE(slice); + return result; } // Only the diagonal of the covariance matrix. -Vector3f SensorFilter::Variance() const +template <typename T> +Vector3<T> SensorFilter<T>::Variance() const { - Vector3f mean = Mean(); - Vector3f total = Vector3f(0.0f, 0.0f, 0.0f); - for (int i = 0; i < Count; i++) + Vector3<T> mean = this->Mean(); + Vector3<T> total; + for (int i = 0; i < this->ElemCount; i++) { - total.x += (Elements[i].x - mean.x) * (Elements[i].x - mean.x); - total.y += (Elements[i].y - mean.y) * (Elements[i].y - mean.y); - total.z += (Elements[i].z - mean.z) * (Elements[i].z - mean.z); + total.x += (this->Data[i].x - mean.x) * (this->Data[i].x - mean.x); + total.y += (this->Data[i].y - mean.y) * (this->Data[i].y - mean.y); + total.z += (this->Data[i].z - mean.z) * (this->Data[i].z - mean.z); } - return total / (float) Count; + return total / (float) this->ElemCount; } -// Should be a 3x3 matrix returned, but OVR_math.h doesn't have one -Matrix4f SensorFilter::Covariance() const +template <typename T> +Matrix3<T> SensorFilter<T>::Covariance() const { - Vector3f mean = Mean(); - Matrix4f total = Matrix4f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); - for (int i = 0; i < Count; i++) + Vector3<T> mean = this->Mean(); + Matrix3<T> total; + for (int i = 0; i < this->ElemCount; i++) { - total.M[0][0] += (Elements[i].x - mean.x) * (Elements[i].x - mean.x); - total.M[1][0] += (Elements[i].y - mean.y) * (Elements[i].x - mean.x); - total.M[2][0] += (Elements[i].z - mean.z) * (Elements[i].x - mean.x); - total.M[1][1] += (Elements[i].y - mean.y) * (Elements[i].y - mean.y); - total.M[2][1] += (Elements[i].z - mean.z) * (Elements[i].y - mean.y); - total.M[2][2] += (Elements[i].z - mean.z) * (Elements[i].z - mean.z); + total.M[0][0] += (this->Data[i].x - mean.x) * (this->Data[i].x - mean.x); + total.M[1][0] += (this->Data[i].y - mean.y) * (this->Data[i].x - mean.x); + total.M[2][0] += (this->Data[i].z - mean.z) * (this->Data[i].x - mean.x); + total.M[1][1] += (this->Data[i].y - mean.y) * (this->Data[i].y - mean.y); + total.M[2][1] += (this->Data[i].z - mean.z) * (this->Data[i].y - mean.y); + total.M[2][2] += (this->Data[i].z - mean.z) * (this->Data[i].z - mean.z); } total.M[0][1] = total.M[1][0]; total.M[0][2] = total.M[2][0]; total.M[1][2] = total.M[2][1]; for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) - total.M[i][j] *= 1.0f / Count; + total.M[i][j] /= (float) this->ElemCount; return total; } -Vector3f SensorFilter::PearsonCoefficient() const +template <typename T> +Vector3<T> SensorFilter<T>::PearsonCoefficient() const { - Matrix4f cov = Covariance(); - Vector3f pearson = Vector3f(); + Matrix3<T> cov = this->Covariance(); + Vector3<T> pearson; pearson.x = cov.M[0][1]/(sqrt(cov.M[0][0])*sqrt(cov.M[1][1])); pearson.y = cov.M[1][2]/(sqrt(cov.M[1][1])*sqrt(cov.M[2][2])); pearson.z = cov.M[2][0]/(sqrt(cov.M[2][2])*sqrt(cov.M[0][0])); diff --git a/LibOVR/Src/OVR_SensorFilter.h b/LibOVR/Src/OVR_SensorFilter.h index 5f9481a..edd4360 100644 --- a/LibOVR/Src/OVR_SensorFilter.h +++ b/LibOVR/Src/OVR_SensorFilter.h @@ -6,16 +6,16 @@ Content : Basic filtering of sensor data Created : March 7, 2013 Authors : Steve LaValle, Anna Yershova, Max Katsev -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -29,67 +29,11 @@ limitations under the License. #define OVR_SensorFilter_h #include "Kernel/OVR_Math.h" - +#include "Kernel/OVR_Deque.h" +#include "Kernel/OVR_Alg.h" namespace OVR { -// A simple circular buffer data structure that stores last N elements in an array -template <typename T> -class CircularBuffer -{ -protected: - enum - { - DefaultFilterCapacity = 20 - }; - - int LastIdx; // The index of the last element that was added to the buffer - int Capacity; // The buffer size (maximum number of elements) - int Count; // Number of elements in the filter - T* Elements; - -public: - CircularBuffer(int capacity = DefaultFilterCapacity) - : LastIdx(-1), Capacity(capacity), Count(0) - { - Elements = (T*)OVR_ALLOC(capacity * sizeof(T)); - for (int i = 0; i < Capacity; i++) - Elements[i] = T(); - } - - ~CircularBuffer() { - OVR_FREE(Elements); - } - -private: - // Make the class non-copyable - CircularBuffer(const CircularBuffer& other); - CircularBuffer& operator=(const CircularBuffer& other); - -public: - // Add a new element to the filter - void AddElement (const T &e) - { - LastIdx = (LastIdx + 1) % Capacity; - Elements[LastIdx] = e; - if (Count < Capacity) - Count++; - } - - // Get element i. 0 is the most recent, 1 is one step ago, 2 is two steps ago, ... - T GetPrev(int i = 0) const - { - OVR_ASSERT(i >= 0); - if (i >= Count) // return 0 if the filter doesn't have enough elements - return T(); - int idx = (LastIdx - i); - if (idx < 0) // Fix the wraparound case - idx += Capacity; - OVR_ASSERT(idx >= 0); // Multiple wraparounds not allowed - return Elements[idx]; - } -}; - // A base class for filters that maintains a buffer of sensor data taken over time and implements // various simple filters, most of which are linear functions of the data history. // Maintains the running sum of its elements for better performance on large capacity values @@ -100,24 +44,59 @@ protected: T RunningTotal; // Cached sum of the elements public: - SensorFilterBase(int capacity = CircularBuffer<T>::DefaultFilterCapacity) : CircularBuffer<T>(capacity), RunningTotal() { }; + SensorFilterBase(int capacity = CircularBuffer<T>::DefaultCapacity) + : CircularBuffer<T>(capacity), RunningTotal() + { + this->Clear(); + }; - // Add a new element to the filter - // Updates the running sum value - void AddElement (const T &e) + // The following methods are augmented to update the cached running sum value + void PushBack(const T &e) { - int NextIdx = (this->LastIdx + 1) % this->Capacity; - RunningTotal += (e - this->Elements[NextIdx]); - CircularBuffer<T>::AddElement(e); - if (this->LastIdx == 0) + CircularBuffer<T>::PushBack(e); + RunningTotal += e; + if (this->End == 0) { // update the cached total to avoid error accumulation RunningTotal = T(); - for (int i = 0; i < this->Count; i++) - RunningTotal += this->Elements[i]; + for (int i = 0; i < this->ElemCount; i++) + RunningTotal += this->Data[i]; } } + void PushFront(const T &e) + { + CircularBuffer<T>::PushFront(e); + RunningTotal += e; + if (this->Beginning == 0) + { + // update the cached total to avoid error accumulation + RunningTotal = T(); + for (int i = 0; i < this->ElemCount; i++) + RunningTotal += this->Data[i]; + } + } + + T PopBack() + { + T e = CircularBuffer<T>::PopBack(); + RunningTotal -= e; + return e; + } + + T PopFront() + { + T e = CircularBuffer<T>::PopFront(); + RunningTotal -= e; + return e; + } + + void Clear() + { + CircularBuffer<T>::Clear(); + RunningTotal = T(); + } + // Simple statistics T Total() const { @@ -126,86 +105,201 @@ public: T Mean() const { - return (this->Count == 0) ? T() : (Total() / (float) this->Count); + return this->IsEmpty() ? T() : (Total() / (float) this->ElemCount); } + T MeanN(int n) const + { + OVR_ASSERT(n > 0); + OVR_ASSERT(this->Capacity >= n); + T total = T(); + for (int i = 0; i < n; i++) + { + total += this->PeekBack(i); + } + return total / n; + } + // A popular family of smoothing filters and smoothed derivatives + + T SavitzkyGolaySmooth4() + { + OVR_ASSERT(this->Capacity >= 4); + return this->PeekBack(0)*0.7f + + this->PeekBack(1)*0.4f + + this->PeekBack(2)*0.1f - + this->PeekBack(3)*0.2f; + } + T SavitzkyGolaySmooth8() const { OVR_ASSERT(this->Capacity >= 8); - return this->GetPrev(0)*0.41667f + - this->GetPrev(1)*0.33333f + - this->GetPrev(2)*0.25f + - this->GetPrev(3)*0.16667f + - this->GetPrev(4)*0.08333f - - this->GetPrev(6)*0.08333f - - this->GetPrev(7)*0.16667f; + return this->PeekBack(0)*0.41667f + + this->PeekBack(1)*0.33333f + + this->PeekBack(2)*0.25f + + this->PeekBack(3)*0.16667f + + this->PeekBack(4)*0.08333f - + this->PeekBack(6)*0.08333f - + this->PeekBack(7)*0.16667f; } T SavitzkyGolayDerivative4() const { OVR_ASSERT(this->Capacity >= 4); - return this->GetPrev(0)*0.3f + - this->GetPrev(1)*0.1f - - this->GetPrev(2)*0.1f - - this->GetPrev(3)*0.3f; + return this->PeekBack(0)*0.3f + + this->PeekBack(1)*0.1f - + this->PeekBack(2)*0.1f - + this->PeekBack(3)*0.3f; } T SavitzkyGolayDerivative5() const { OVR_ASSERT(this->Capacity >= 5); - return this->GetPrev(0)*0.2f + - this->GetPrev(1)*0.1f - - this->GetPrev(3)*0.1f - - this->GetPrev(4)*0.2f; - } + return this->PeekBack(0)*0.2f + + this->PeekBack(1)*0.1f - + this->PeekBack(3)*0.1f - + this->PeekBack(4)*0.2f; + } T SavitzkyGolayDerivative12() const { OVR_ASSERT(this->Capacity >= 12); - return this->GetPrev(0)*0.03846f + - this->GetPrev(1)*0.03147f + - this->GetPrev(2)*0.02448f + - this->GetPrev(3)*0.01748f + - this->GetPrev(4)*0.01049f + - this->GetPrev(5)*0.0035f - - this->GetPrev(6)*0.0035f - - this->GetPrev(7)*0.01049f - - this->GetPrev(8)*0.01748f - - this->GetPrev(9)*0.02448f - - this->GetPrev(10)*0.03147f - - this->GetPrev(11)*0.03846f; + return this->PeekBack(0)*0.03846f + + this->PeekBack(1)*0.03147f + + this->PeekBack(2)*0.02448f + + this->PeekBack(3)*0.01748f + + this->PeekBack(4)*0.01049f + + this->PeekBack(5)*0.0035f - + this->PeekBack(6)*0.0035f - + this->PeekBack(7)*0.01049f - + this->PeekBack(8)*0.01748f - + this->PeekBack(9)*0.02448f - + this->PeekBack(10)*0.03147f - + this->PeekBack(11)*0.03846f; } T SavitzkyGolayDerivativeN(int n) const { - OVR_ASSERT(this->Capacity >= n); + OVR_ASSERT(this->capacity >= n); int m = (n-1)/2; T result = T(); for (int k = 1; k <= m; k++) { int ind1 = m - k; int ind2 = n - m + k - 1; - result += (this->GetPrev(ind1) - this->GetPrev(ind2)) * (float) k; + result += (this->PeekBack(ind1) - this->PeekBack(ind2)) * (float) k; } float coef = 3.0f/(m*(m+1.0f)*(2.0f*m+1.0f)); result = result*coef; return result; } + + T Median() const + { + T* copy = (T*) OVR_ALLOC(this->ElemCount * sizeof(T)); + T result = Alg::Median(ArrayAdaptor(copy)); + OVR_FREE(copy); + return result; + } }; // This class maintains a buffer of sensor data taken over time and implements // various simple filters, most of which are linear functions of the data history. -class SensorFilter : public SensorFilterBase<Vector3f> +template <typename T> +class SensorFilter : public SensorFilterBase<Vector3<T> > { public: - SensorFilter(int capacity = DefaultFilterCapacity) : SensorFilterBase<Vector3f>(capacity) { }; + SensorFilter(int capacity = SensorFilterBase<Vector3<T> >::DefaultCapacity) : SensorFilterBase<Vector3<T> >(capacity) { }; // Simple statistics - Vector3f Median() const; - Vector3f Variance() const; // The diagonal of covariance matrix - Matrix4f Covariance() const; - Vector3f PearsonCoefficient() const; + Vector3<T> Median() const; + Vector3<T> Variance() const; // The diagonal of covariance matrix + Matrix3<T> Covariance() const; + Vector3<T> PearsonCoefficient() const; +}; + +typedef SensorFilter<float> SensorFilterf; +typedef SensorFilter<double> SensorFilterd; + +// This filter operates on the values that are measured in the body frame and rotate with the device +class SensorFilterBodyFrame : public SensorFilterBase<Vector3d> +{ +private: + // low pass filter gain + double gain; + // sum of squared norms of the values + double runningTotalLengthSq; + // cumulative rotation quaternion + Quatd Q; + // current low pass filter output + Vector3d output; + + // make private so it isn't used by accident + // in addition to the normal SensorFilterBase::PushBack, keeps track of running sum of LengthSq + // for the purpose of variance computations + void PushBack(const Vector3d &e) + { + runningTotalLengthSq += this->IsFull() ? (e.LengthSq() - this->PeekFront().LengthSq()) : e.LengthSq(); + SensorFilterBase<Vector3d>::PushBack(e); + if (this->End == 0) + { + // update the cached total to avoid error accumulation + runningTotalLengthSq = 0; + for (int i = 0; i < this->ElemCount; i++) + runningTotalLengthSq += this->Data[i].LengthSq(); + } + } + +public: + SensorFilterBodyFrame(int capacity = SensorFilterBase<Vector3d>::DefaultCapacity) + : SensorFilterBase<Vector3d>(capacity), gain(2.5), + output(), Q(), runningTotalLengthSq(0) { }; + + // return the scalar variance of the filter values (rotated to be in the same frame) + double Variance() const + { + return this->IsEmpty() ? 0 : (runningTotalLengthSq / this->ElemCount - this->Mean().LengthSq()); + } + + // return the scalar standard deviation of the filter values (rotated to be in the same frame) + double StdDev() const + { + return sqrt(Variance()); + } + + // confidence value based on the stddev of the data (between 0.0 and 1.0, more is better) + double Confidence() const + { + return Alg::Clamp(0.48 - 0.1 * log(StdDev()), 0.0, 1.0) * this->ElemCount / this->Capacity; + } + + // add a new element to the filter + // takes rotation increment since the last update + // in order to rotate the previous value to the current body frame + void Update(Vector3d value, double deltaT, Quatd deltaQ = Quatd()) + { + if (this->IsEmpty()) + { + output = value; + } + else + { + // rotate by deltaQ + output = deltaQ.Inverted().Rotate(output); + // apply low-pass filter + output += (value - output) * gain * deltaT; + } + + // put the value into the fixed frame for the stddev computation + Q = Q * deltaQ; + PushBack(Q.Rotate(output)); + } + + // returns the filter average in the current body frame + Vector3d GetFilteredValue() const + { + return Q.Inverted().Rotate(this->Mean()); + } }; } //namespace OVR diff --git a/LibOVR/Src/OVR_SensorFusion.cpp b/LibOVR/Src/OVR_SensorFusion.cpp index eac0366..6cfe00c 100644 --- a/LibOVR/Src/OVR_SensorFusion.cpp +++ b/LibOVR/Src/OVR_SensorFusion.cpp @@ -3,18 +3,18 @@ Filename : OVR_SensorFusion.cpp Content : Methods that determine head orientation from sensor data over time Created : October 9, 2012 -Authors : Michael Antonov, Steve LaValle, Max Katsev +Authors : Michael Antonov, Steve LaValle, Dov Katz, Max Katsev -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -29,8 +29,15 @@ limitations under the License. #include "Kernel/OVR_System.h" #include "OVR_JSON.h" #include "OVR_Profile.h" +#include "OVR_Stereo.h" +#include "Recording/Recorder.h" + +// Temporary for debugging +bool Global_Flag_1 = true; + +//Convenient global variable to temporarily extract this data. +float TPH_CameraPoseOrientationWxyz[4]; -#define MAX_DEVICE_PROFILE_MAJOR_VERSION 1 namespace OVR { @@ -38,579 +45,788 @@ namespace OVR { // ***** Sensor Fusion SensorFusion::SensorFusion(SensorDevice* sensor) - : Stage(0), RunningTime(0), DeltaT(0.001f), - Handler(getThis()), pDelegate(0), - Gain(0.05f), EnableGravity(true), - EnablePrediction(true), PredictionDT(0.03f), PredictionTimeIncrement(0.001f), - FRawMag(10), FAngV(20), - GyroOffset(), TiltAngleFilter(1000), - EnableYawCorrection(false), MagCalibrated(false), MagNumReferences(0), MagRefIdx(-1), MagRefScore(0), - MotionTrackingEnabled(true) + : MotionTrackingEnabled(true), VisionPositionEnabled(true), + EnableGravity(true), EnableYawCorrection(true), MagCalibrated(true), EnableCameraTiltCorrection(true), + FAngV(20), FAccelHeadset(1000), FAccelCamera(1000), + ExposureRecordHistory(100), LastMessageExposureFrame(NULL), + VisionMaxIMUTrackTime(4.0/60.0), // Integrate IMU up to 4 frames + HeadModel(0, OVR_DEFAULT_NECK_TO_EYE_VERTICAL, -OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL), + DefaultCameraPosition(0, 0, -1) { + pHandler = new BodyFrameHandler(this); + + // And the clock is running... + LogText("*** SensorFusion Startup: TimeSeconds = %f\n", Timer::GetSeconds()); + if (sensor) AttachToSensor(sensor); - MagCalibrationMatrix.SetIdentity(); + + // MA: 1/25/2014 for DK2 + SetCenterPupilDepth(0.076f); + + Reset(); } SensorFusion::~SensorFusion() -{ +{ + delete(pHandler); } - bool SensorFusion::AttachToSensor(SensorDevice* sensor) { - // clear the cached device information - CachedSensorInfo.SerialNumber[0] = 0; - CachedSensorInfo.VendorId = 0; - CachedSensorInfo.ProductId = 0; - if (sensor != NULL) { - // Cache the sensor device so we can access this information during - // mag saving and loading (avoid holding a reference to sensor to prevent - // deadlock on shutdown) - sensor->GetDeviceInfo(&CachedSensorInfo); // save the device information - MessageHandler* pCurrentHandler = sensor->GetMessageHandler(); + // Load IMU position + Array<PositionCalibrationReport> reports; - if (pCurrentHandler == &Handler) + bool result = sensor->GetAllPositionCalibrationReports(&reports); + if(result) { - Reset(); - return true; - } + PositionCalibrationReport const& imu = reports[reports.GetSize() - 1]; + OVR_ASSERT(imu.PositionType == PositionCalibrationReport::PositionType_IMU); + IMUPosition = imu.Position; + + Recorder::Buffer(imu); + Recorder::Buffer(reports); - if (pCurrentHandler != NULL) + // convert from vision to the world frame + IMUPosition.x *= -1.0; + IMUPosition.z *= -1.0; + } + else { - OVR_DEBUG_LOG( - ("SensorFusion::AttachToSensor failed - sensor %p already has handler", sensor)); - return false; + // TODO: set up IMUPosition for devices that don't have this report. } - - // Automatically load the default mag calibration for this sensor - LoadMagCalibration(); + // Repopulate CPFOrigin + SetCenterPupilDepth(CenterPupilDepth); } - if (Handler.IsHandlerInstalled()) - { - Handler.RemoveHandlerFromDevices(); - } + pHandler->RemoveHandlerFromDevices(); if (sensor != NULL) { - sensor->SetMessageHandler(&Handler); + sensor->AddMessageHandler(pHandler); } Reset(); + + // Initialize the sensor state + // TBD: This is a hack to avoid a race condition if sensor status is checked immediately + // after sensor creation but before any data has flowed through. We should probably + // not depend strictly on data flow to determine capabilites like orientation and position + // tracking, or else use some sort of synchronous method to wait for data + LocklessState init; + init.StatusFlags = Status_OrientationTracked; + UpdatedState.SetState(init); + return true; } - - // Resets the current orientation +// Resets the current orientation void SensorFusion::Reset() { - Lock::Locker lockScope(Handler.GetHandlerLock()); - Q = Quatf(); - QUncorrected = Quatf(); - Stage = 0; - RunningTime = 0; - MagNumReferences = 0; - MagRefIdx = -1; - GyroOffset = Vector3f(); + Lock::Locker lockScope(pHandler->GetHandlerLock()); + + UpdatedState.SetState(LocklessState()); + State = PoseState<double>(); + State.Transform.Position = -CPFPositionInIMUFrame; // place CPF at the origin, not the IMU + VisionState = PoseState<double>(); + VisionError = PoseState<double>(); + CurrentExposureIMUDelta = PoseState<double>(); + CameraPose = Pose<double>(Quatd(), DefaultCameraPosition); + CameraPoseConfidence = -1; + + ExposureRecordHistory.Clear(); + LastMessageExposureFrame = MessageExposureFrame(NULL); + LastVisionAbsoluteTime = 0; + FullVisionCorrectionExposureCounter = 0; + Stage = 0; + + MagNumReferences = 0; + MagRefIdx = -1; + MagRefScore = 0; + MagCorrectionIntegralTerm = Quatd(); + AccelOffset = Vector3d(); + + FAccelCamera.Clear(); + FAccelHeadset.Clear(); + FAngV.Clear(); + + setNeckPivotFromPose ( State.Transform ); } -// Compute a rotation required to transform "estimated" into "measured" -// Returns an approximation of the goal rotation in the Simultaneous Orthogonal Rotations Angle representation -// (vector direction is the axis of rotation, norm is the angle) -Vector3f SensorFusion_ComputeCorrection(Vector3f measured, Vector3f estimated) +//------------------------------------------------------------------------------------- +// Vision & message processing + +void SensorFusion::OnVisionFailure() { - measured.Normalize(); - estimated.Normalize(); - Vector3f correction = measured.Cross(estimated); - float cosError = measured.Dot(estimated); - // from the def. of cross product, correction.Length() = sin(error) - // therefore sin(error) * sqrt(2 / (1 + cos(error))) = 2 * sin(error / 2) ~= error in [-pi, pi] - // Mathf::Tolerance is used to avoid div by 0 if cos(error) = -1 - return correction * sqrt(2 / (1 + cosError + Mathf::Tolerance)); + // do nothing } -void SensorFusion::handleMessage(const MessageBodyFrame& msg) +void SensorFusion::OnVisionPreviousFrame(const Pose<double>& pose) { - if (msg.Type != Message_BodyFrame || !IsMotionTrackingEnabled()) - return; + // simply save the observation for use in the next OnVisionSuccess call; + // this should not have unintended side-effects for position filtering, + // since the vision time is not updated and the system keeps thinking we don't have vision yet + VisionState.Transform = pose; +} - // Put the sensor readings into convenient local variables - Vector3f gyro = msg.RotationRate; - Vector3f accel = msg.Acceleration; - Vector3f mag = msg.MagneticField; +void SensorFusion::OnVisionSuccess(const Pose<double>& pose, UInt32 exposureCounter) +{ + Lock::Locker lockScope(pHandler->GetHandlerLock()); - // Insert current sensor data into filter history - FRawMag.AddElement(mag); - FAngV.AddElement(gyro); + LastVisionAbsoluteTime = GetTime(); - // Apply the calibration parameters to raw mag - Vector3f calMag = MagCalibrated ? GetCalibratedMagValue(FRawMag.Mean()) : FRawMag.Mean(); + // ********* LastVisionExposureRecord ********* - // Set variables accessible through the class API - DeltaT = msg.TimeDelta; - AngV = gyro; - A = accel; - RawMag = mag; - CalMag = calMag; + // Skip old data and use the record that matches the exposure counter + while (!ExposureRecordHistory.IsEmpty() && + (ExposureRecordHistory.PeekFront().ExposureCounter <= exposureCounter)) + { + LastVisionExposureRecord = ExposureRecordHistory.PopFront(); + } - // Keep track of time - Stage++; - RunningTime += DeltaT; + // Use current values if we don't have historical data + // Right now, this will happen if we get first frame after prediction failure, + // and this exposure wasn't in the buffer. (TBD: Unlikely.. unless IMU message wasn't sent?) + if (LastVisionExposureRecord.ExposureCounter != exposureCounter) + LastVisionExposureRecord = ExposureRecord(exposureCounter, GetTime(), State, PoseState<double>()); - // Small preprocessing - Quatf Qinv = Q.Inverted(); - Vector3f up = Qinv.Rotate(Vector3f(0, 1, 0)); + // ********* VisionState ********* + + // This is stored in the camera frame, so need to be careful when combining with the IMU data, + // which is in the world frame - Vector3f gyroCorrected = gyro; + // convert to the world frame + Vector3d positionChangeW = CameraPose.Orientation.Rotate(pose.Position - VisionState.Transform.Position); - // Apply integral term - // All the corrections are stored in the Simultaneous Orthogonal Rotations Angle representation, - // which allows to combine and scale them by just addition and multiplication - if (EnableGravity || EnableYawCorrection) - gyroCorrected -= GyroOffset; + VisionState.TimeInSeconds = LastVisionExposureRecord.ExposureTime; + VisionState.Transform = pose; - if (EnableGravity) + // Check LastVisionExposureRecord.Delta.TimeInSeconds to avoid divide by zero, which we could (rarely) + // get if we didn't have exposures delta for history (skipped exposure counters + // due to video mode change that stalls USB, etc). + if (LastVisionExposureRecord.Delta.TimeInSeconds > 0.001) + { + // Use the accel data to estimate the velocity at the exposure time + // (as opposed to the average velocity between exposures) + Vector3d velocityW = LastVisionExposureRecord.Delta.LinearVelocity + + (positionChangeW - LastVisionExposureRecord.Delta.Transform.Position) / + LastVisionExposureRecord.Delta.TimeInSeconds; + VisionState.LinearVelocity = CameraPose.Orientation.Inverted().Rotate(velocityW); + } + else { - const float spikeThreshold = 0.01f; - const float gravityThreshold = 0.1f; - float proportionalGain = 5 * Gain; // Gain parameter should be removed in a future release - float integralGain = 0.0125f; + VisionState.LinearVelocity = Vector3d(0,0,0); + } - Vector3f tiltCorrection = SensorFusion_ComputeCorrection(accel, up); + // ********* VisionError ********* - if (Stage > 5) - { - // Spike detection - float tiltAngle = up.Angle(accel); - TiltAngleFilter.AddElement(tiltAngle); - if (tiltAngle > TiltAngleFilter.Mean() + spikeThreshold) - proportionalGain = integralGain = 0; - // Acceleration detection - const float gravity = 9.8f; - if (fabs(accel.Length() / gravity - 1) > gravityThreshold) - integralGain = 0; - } - else // Apply full correction at the startup - { - proportionalGain = 1 / DeltaT; - integralGain = 0; - } + // This is in the world frame, so transform the vision data appropriately - gyroCorrected += (tiltCorrection * proportionalGain); - GyroOffset -= (tiltCorrection * integralGain * DeltaT); - } + VisionError.Transform.Position = CameraPose.Orientation.Rotate(VisionState.Transform.Position) + CameraPose.Position - + LastVisionExposureRecord.State.Transform.Position; + VisionError.LinearVelocity = CameraPose.Orientation.Rotate(VisionState.LinearVelocity) - + LastVisionExposureRecord.State.LinearVelocity; + VisionError.Transform.Orientation = CameraPose.Orientation * VisionState.Transform.Orientation * + LastVisionExposureRecord.State.Transform.Orientation.Inverted(); +} - if (EnableYawCorrection && MagCalibrated && RunningTime > 2.0f) +Pose<double> SensorFusion::GetVisionPrediction(UInt32 exposureCounter) +{ + Lock::Locker lockScope(pHandler->GetHandlerLock()); + + // Combine the small deltas together + // Should only be one iteration, unless we are skipping camera frames + ExposureRecord record; + PoseState<double> delta = PoseState<double>(); + while (!ExposureRecordHistory.IsEmpty() && + (ExposureRecordHistory.PeekFront().ExposureCounter <= exposureCounter)) { - const float maxMagRefDist = 0.1f; - const float maxTiltError = 0.05f; - float proportionalGain = 0.01f; - float integralGain = 0.0005f; + record = ExposureRecordHistory.PopFront(); + delta.AdvanceByDelta(record.Delta); + } + // Put the combine exposure record back in the history, for use in HandleVisionSuccess(...) + record.Delta = delta; + ExposureRecordHistory.PushFront(record); - // Update the reference point if needed - if (MagRefIdx < 0 || calMag.Distance(MagRefsInBodyFrame[MagRefIdx]) > maxMagRefDist) - { - // Delete a bad point - if (MagRefIdx >= 0 && MagRefScore < 0) - { - MagNumReferences--; - MagRefsInBodyFrame[MagRefIdx] = MagRefsInBodyFrame[MagNumReferences]; - MagRefsInWorldFrame[MagRefIdx] = MagRefsInWorldFrame[MagNumReferences]; - } - // Find a new one - MagRefIdx = -1; - MagRefScore = 1000; - float bestDist = maxMagRefDist; - for (int i = 0; i < MagNumReferences; i++) - { - float dist = calMag.Distance(MagRefsInBodyFrame[i]); - if (bestDist > dist) - { - bestDist = dist; - MagRefIdx = i; - } - } - // Create one if needed - if (MagRefIdx < 0 && MagNumReferences < MagMaxReferences) - { - MagRefIdx = MagNumReferences; - MagRefsInBodyFrame[MagRefIdx] = calMag; - MagRefsInWorldFrame[MagRefIdx] = Q.Rotate(calMag).Normalized(); - MagNumReferences++; - } - } + // Add the effect of initial pose and velocity from vision. + // Don't forget to transform IMU to the camera frame + Pose<double> c(VisionState.Transform.Orientation * delta.Transform.Orientation, + VisionState.Transform.Position + VisionState.LinearVelocity * delta.TimeInSeconds + + CameraPose.Orientation.Inverted().Rotate(delta.Transform.Position)); - if (MagRefIdx >= 0) - { - Vector3f magEstimated = Qinv.Rotate(MagRefsInWorldFrame[MagRefIdx]); - Vector3f magMeasured = calMag.Normalized(); + return c; +} - // Correction is computed in the horizontal plane (in the world frame) - Vector3f yawCorrection = SensorFusion_ComputeCorrection(magMeasured.ProjectToPlane(up), - magEstimated.ProjectToPlane(up)); +void SensorFusion::handleMessage(const MessageBodyFrame& msg) +{ + if (msg.Type != Message_BodyFrame || !IsMotionTrackingEnabled()) + return; - if (fabs(up.Dot(magEstimated - magMeasured)) < maxTiltError) - { - MagRefScore += 2; - } - else // If the vertical angle is wrong, decrease the score - { - MagRefScore -= 1; - proportionalGain = integralGain = 0; - } - gyroCorrected += (yawCorrection * proportionalGain); - GyroOffset -= (yawCorrection * integralGain * DeltaT); - } - } + // Put the sensor readings into convenient local variables + Vector3d gyro(msg.RotationRate); + Vector3d accel(msg.Acceleration); + Vector3d mag(msg.MagneticField); + double DeltaT = msg.TimeDelta; + MagCalibrated = msg.MagCalibrated; + + // Keep track of time + State.TimeInSeconds = msg.AbsoluteTimeSeconds; + // We got an update in the last 60ms and the data is not very old + bool visionIsRecent = (GetTime() - LastVisionAbsoluteTime < VisionMaxIMUTrackTime) && (GetVisionLatency() < 0.25); + Stage++; + + // Insert current sensor data into filter history + FAngV.PushBack(gyro); + FAccelHeadset.Update(accel, DeltaT, Quatd(gyro, gyro.Length() * DeltaT)); - // Update the orientation quaternion based on the corrected angular velocity vector - float angle = gyroCorrected.Length() * DeltaT; - if (angle > 0.0f) - Q = Q * Quatf(gyroCorrected, angle); + // Process raw inputs + // in the future the gravity offset can be calibrated using vision feedback + Vector3d accelW = State.Transform.Orientation.Rotate(accel) - Vector3d(0, 9.8, 0); + + // Update headset orientation + State.StoreAndIntegrateGyro(gyro, DeltaT); + // Tilt correction based on accelerometer + if (EnableGravity) + applyTiltCorrection(DeltaT); + // Yaw correction based on camera + if (EnableYawCorrection && visionIsRecent) + applyVisionYawCorrection(DeltaT); + // Yaw correction based on magnetometer + if (EnableYawCorrection && MagCalibrated) // MagCalibrated is always false for DK2 for now + applyMagYawCorrection(mag, DeltaT); + + // Update camera orientation + if (EnableCameraTiltCorrection && visionIsRecent) + applyCameraTiltCorrection(accel, DeltaT); // The quaternion magnitude may slowly drift due to numerical error, // so it is periodically normalized. - if (Stage % 500 == 0) - Q.Normalize(); + if ((Stage & 0xFF) == 0) + { + State.Transform.Orientation.Normalize(); + CameraPose.Orientation.Normalize(); + } + + // Update headset position + if (VisionPositionEnabled && visionIsRecent) + { + // Integrate UMI and velocity here up to a fixed amount of time after vision. + State.StoreAndIntegrateAccelerometer(accelW + AccelOffset, DeltaT); + // Position correction based on camera + applyPositionCorrection(DeltaT); + // Compute where the neck pivot would be. + setNeckPivotFromPose(State.Transform); + } + else + { + // Fall back onto internal head model + // Use the last-known neck pivot position to figure out the expected IMU position. + // (should be the opposite of SensorFusion::setNeckPivotFromPose) + Vector3d imuInNeckPivotFrame = HeadModel - CPFPositionInIMUFrame; + State.Transform.Position = NeckPivotPosition + State.Transform.Orientation.Rotate(imuInNeckPivotFrame); + + // We can't trust velocity past this point. + State.LinearVelocity = Vector3d(0,0,0); + State.LinearAcceleration = accelW; + } + + // Compute the angular acceleration + State.AngularAcceleration = (FAngV.GetSize() >= 12 && DeltaT > 0) ? + (FAngV.SavitzkyGolayDerivative12() / DeltaT) : Vector3d(); + + // Update the dead reckoning state used for incremental vision tracking + CurrentExposureIMUDelta.StoreAndIntegrateGyro(gyro, DeltaT); + CurrentExposureIMUDelta.StoreAndIntegrateAccelerometer(accelW, DeltaT); + + // If we only compiled the stub version of Recorder, then branch prediction shouldn't + // have any problem with this if statement. Actually, it should be optimized out, but need to verify. + if(Recorder::GetRecorder()) + { + Posed savePose = static_cast<Posed>(GetPoseAtTime(GetTime())); + Recorder::LogData("sfTimeSeconds", State.TimeInSeconds); + Recorder::LogData("sfPose", savePose); + } + + // Store the lockless state. + LocklessState lstate; + lstate.StatusFlags = Status_OrientationTracked; + if (VisionPositionEnabled) + lstate.StatusFlags |= Status_PositionConnected; + if (VisionPositionEnabled && visionIsRecent) + lstate.StatusFlags |= Status_PositionTracked; + lstate.State = State; + lstate.Temperature = msg.Temperature; + lstate.Magnetometer = mag; + UpdatedState.SetState(lstate); } -// A predictive filter based on extrapolating the smoothed, current angular velocity -Quatf SensorFusion::GetPredictedOrientation(float pdt) -{ - Lock::Locker lockScope(Handler.GetHandlerLock()); - Quatf qP = Q; - - if (EnablePrediction) +void SensorFusion::handleExposure(const MessageExposureFrame& msg) +{ + if (msg.CameraFrameCount > LastMessageExposureFrame.CameraFrameCount + 1) { - // This method assumes a constant angular velocity - Vector3f angVelF = FAngV.SavitzkyGolaySmooth8(); - float angVelFL = angVelF.Length(); - - // Force back to raw measurement - angVelF = AngV; - angVelFL = AngV.Length(); - - // Dynamic prediction interval: Based on angular velocity to reduce vibration - const float minPdt = 0.001f; - const float slopePdt = 0.1f; - float newpdt = pdt; - float tpdt = minPdt + slopePdt * angVelFL; - if (tpdt < pdt) - newpdt = tpdt; - //LogText("PredictonDTs: %d\n",(int)(newpdt / PredictionTimeIncrement + 0.5f)); - - if (angVelFL > 0.001f) + LogText("Skipped %d tracker exposure counters\n", + msg.CameraFrameCount - (LastMessageExposureFrame.CameraFrameCount + 1)); + } + else + { + // MA: Check timing deltas + // Is seems repetitive tracking loss occurs when timing gets out of sync + // Could be caused by some bug in HW timing + time filter? + if (fabs(State.TimeInSeconds - msg.CameraTimeSeconds) > 0.1f) { - Vector3f rotAxisP = angVelF / angVelFL; - float halfRotAngleP = angVelFL * newpdt * 0.5f; - float sinaHRAP = sin(halfRotAngleP); - Quatf deltaQP(rotAxisP.x*sinaHRAP, rotAxisP.y*sinaHRAP, - rotAxisP.z*sinaHRAP, cos(halfRotAngleP)); - qP = Q * deltaQP; + static int logLimiter = 0; + if ((logLimiter & 0x3F) == 0) + { + LogText("Timing out of sync: State.T=%f, ExposureT=%f, delta=%f, Time()=%f\n", + State.TimeInSeconds, msg.CameraTimeSeconds, + State.TimeInSeconds - msg.CameraTimeSeconds, GetTime()); + } + logLimiter++; } + } - return qP; -} + CurrentExposureIMUDelta.TimeInSeconds = msg.CameraTimeSeconds - LastMessageExposureFrame.CameraTimeSeconds; + ExposureRecordHistory.PushBack(ExposureRecord( + msg.CameraFrameCount, msg.CameraTimeSeconds, State, CurrentExposureIMUDelta)); -Vector3f SensorFusion::GetCalibratedMagValue(const Vector3f& rawMag) const + // Every new exposure starts from zero + CurrentExposureIMUDelta = PoseState<double>(); + LastMessageExposureFrame = msg; +} + +// If you have a known-good pose, this sets the neck pivot position. +void SensorFusion::setNeckPivotFromPose(Posed const &pose) { - OVR_ASSERT(HasMagCalibration()); - return MagCalibrationMatrix.Transform(rawMag); - } + Vector3d imuInNeckPivotFrame = HeadModel - CPFPositionInIMUFrame; + NeckPivotPosition = pose.Position - pose.Orientation.Rotate(imuInNeckPivotFrame); +} -SensorFusion::BodyFrameHandler::~BodyFrameHandler() +// These two functions need to be moved into Quat class +// Compute a rotation required to transform "from" into "to". +Quatd vectorAlignmentRotation(const Vector3d &from, const Vector3d &to) { - RemoveHandlerFromDevices(); + Vector3d axis = from.Cross(to); + if (axis.LengthSq() == 0) + // this handles both collinear and zero-length input cases + return Quatd(); + double angle = from.Angle(to); + return Quatd(axis, angle); } -void SensorFusion::BodyFrameHandler::OnMessage(const Message& msg) +// Compute the part of the quaternion that rotates around Y axis +Quatd extractYawRotation(const Quatd &error) { - if (msg.Type == Message_BodyFrame) - pFusion->handleMessage(static_cast<const MessageBodyFrame&>(msg)); - if (pFusion->pDelegate) - pFusion->pDelegate->OnMessage(msg); + if (error.y == 0) + return Quatd(); + double phi = atan2(error.w, error.y); + double alpha = Mathd::Pi - 2 * phi; + return Quatd(Axis_Y, alpha); +} + +void SensorFusion::applyPositionCorrection(double deltaT) +{ + // Each component of gainPos is equivalent to a Kalman gain of (sigma_process / sigma_observation) + const Vector3d gainPos = Vector3d(10, 10, 8); + const Vector3d gainVel = gainPos.EntrywiseMultiply(gainPos) * 0.5; + const double snapThreshold = 0.1; // Large value (previously 0.01, which caused frequent jumping) + + if (LastVisionExposureRecord.ExposureCounter <= FullVisionCorrectionExposureCounter) + return; + + if (VisionError.Transform.Position.LengthSq() > (snapThreshold * snapThreshold) || + !(UpdatedState.GetState().StatusFlags & Status_PositionTracked)) + { + // high error or just reacquired position from vision - apply full correction + State.Transform.Position += VisionError.Transform.Position; + State.LinearVelocity += VisionError.LinearVelocity; + // record the frame counter to avoid additional correction until we see the new data + FullVisionCorrectionExposureCounter = LastMessageExposureFrame.CameraFrameCount; + } + else + { + State.Transform.Position += VisionError.Transform.Position.EntrywiseMultiply(gainPos) * deltaT; + State.LinearVelocity += VisionError.Transform.Position.EntrywiseMultiply(gainVel) * deltaT; + // Uncomment the line below to try acclerometer bias estimation in filter + //AccelOffset += VisionError.Pose.Position * gainAccel * deltaT; + } } -bool SensorFusion::BodyFrameHandler::SupportsMessageType(MessageType type) const +void SensorFusion::applyVisionYawCorrection(double deltaT) { - return (type == Message_BodyFrame); + const double gain = 0.25; + const double snapThreshold = 0.1; + + if (LastVisionExposureRecord.ExposureCounter <= FullVisionCorrectionExposureCounter) + return; + + Quatd yawError = extractYawRotation(VisionError.Transform.Orientation); + + Quatd correction; + if (Alg::Abs(yawError.w) < cos(snapThreshold / 2)) // angle(yawError) > snapThreshold + { + // high error, jump to the vision position + correction = yawError; + // record the frame counter to avoid additional correction until we see the new data + FullVisionCorrectionExposureCounter = LastMessageExposureFrame.CameraFrameCount; + } + else + correction = yawError.Nlerp(Quatd(), gain * deltaT); + + State.Transform.Orientation = correction * State.Transform.Orientation; } -// Writes the current calibration for a particular device to a device profile file -// sensor - the sensor that was calibrated -// cal_name - an optional name for the calibration or default if cal_name == NULL -bool SensorFusion::SaveMagCalibration(const char* calibrationName) const +void SensorFusion::applyMagYawCorrection(Vector3d mag, double deltaT) { - if (CachedSensorInfo.SerialNumber[0] == 0 || !HasMagCalibration()) - return false; - - // A named calibration may be specified for calibration in different - // environments, otherwise the default calibration is used - if (calibrationName == NULL) - calibrationName = "default"; - - // Generate a mag calibration event - JSON* calibration = JSON::CreateObject(); - // (hardcoded for now) the measurement and representation method - calibration->AddStringItem("Version", "2.0"); - calibration->AddStringItem("Name", "default"); - - // time stamp the calibration - char time_str[64]; - -#ifdef OVR_OS_WIN32 - struct tm caltime; - localtime_s(&caltime, &MagCalibrationTime); - strftime(time_str, 64, "%Y-%m-%d %H:%M:%S", &caltime); -#else - struct tm* caltime; - caltime = localtime(&MagCalibrationTime); - strftime(time_str, 64, "%Y-%m-%d %H:%M:%S", caltime); -#endif - - calibration->AddStringItem("Time", time_str); - - // write the full calibration matrix - char matrix[256]; - Matrix4f calmat = GetMagCalibration(); - calmat.ToString(matrix, 256); - calibration->AddStringItem("CalibrationMatrix", matrix); - // save just the offset, for backwards compatibility - // this can be removed when we don't want to support 0.2.4 anymore - Vector3f center(calmat.M[0][3], calmat.M[1][3], calmat.M[2][3]); - Matrix4f tmp = calmat; tmp.M[0][3] = tmp.M[1][3] = tmp.M[2][3] = 0; tmp.M[3][3] = 1; - center = tmp.Inverted().Transform(center); - Matrix4f oldcalmat; oldcalmat.M[0][3] = center.x; oldcalmat.M[1][3] = center.y; oldcalmat.M[2][3] = center.z; - oldcalmat.ToString(matrix, 256); - calibration->AddStringItem("Calibration", matrix); - + const double minMagLengthSq = Mathd::Tolerance; // need to use a real value to discard very weak fields + const double maxMagRefDist = 0.1; + const double maxTiltError = 0.05; + const double proportionalGain = 0.01; + const double integralGain = 0.0005; + + Vector3d magW = State.Transform.Orientation.Rotate(mag); + // verify that the horizontal component is sufficient + if (magW.x * magW.x + magW.z * magW.z < minMagLengthSq) + return; + magW.Normalize(); - String path = GetBaseOVRPath(true); - path += "/Devices.json"; - - // Look for a prexisting device file to edit - Ptr<JSON> root = *JSON::Load(path); - if (root) - { // Quick sanity check of the file type and format before we parse it - JSON* version = root->GetFirstItem(); - if (version && version->Name == "Oculus Device Profile Version") - { - int major = atoi(version->Value.ToCStr()); - if (major > MAX_DEVICE_PROFILE_MAJOR_VERSION) + // Update the reference point if needed + if (MagRefScore < 0 || MagRefIdx < 0 || + mag.Distance(MagRefsInBodyFrame[MagRefIdx]) > maxMagRefDist) + { + // Delete a bad point + if (MagRefIdx >= 0 && MagRefScore < 0) + { + MagNumReferences--; + MagRefsInBodyFrame[MagRefIdx] = MagRefsInBodyFrame[MagNumReferences]; + MagRefsInWorldFrame[MagRefIdx] = MagRefsInWorldFrame[MagNumReferences]; + MagRefsPoses[MagRefIdx] = MagRefsPoses[MagRefIdx]; + } + // Find a new one + MagRefIdx = -1; + MagRefScore = 1000; + double bestDist = maxMagRefDist; + for (int i = 0; i < MagNumReferences; i++) + { + double dist = mag.Distance(MagRefsInBodyFrame[i]); + if (bestDist > dist) { - // don't use the file on unsupported major version number - root->Release(); - root = NULL; + bestDist = dist; + MagRefIdx = i; } } - else + // Create one if needed + if (MagRefIdx < 0 && MagNumReferences < MagMaxReferences) { - root->Release(); - root = NULL; + MagRefIdx = MagNumReferences; + MagRefsInBodyFrame[MagRefIdx] = mag; + MagRefsInWorldFrame[MagRefIdx] = magW; + MagRefsPoses[MagRefIdx] = State.Transform.Orientation; + MagNumReferences++; } } - JSON* device = NULL; - if (root) + if (MagRefIdx >= 0) { - device = root->GetFirstItem(); // skip the header - device = root->GetNextItem(device); - while (device) - { // Search for a previous calibration with the same name for this device - // and remove it before adding the new one - if (device->Name == "Device") - { - JSON* item = device->GetItemByName("Serial"); - if (item && item->Value == CachedSensorInfo.SerialNumber) - { // found an entry for this device - item = device->GetNextItem(item); - while (item) - { - if (item->Name == "MagCalibration") - { - JSON* name = item->GetItemByName("Name"); - if (name && name->Value == calibrationName) - { // found a calibration of the same name - item->RemoveNode(); - item->Release(); - break; - } - } - item = device->GetNextItem(item); - } - - // update the auto-mag flag - item = device->GetItemByName("EnableYawCorrection"); - if (item) - item->dValue = (double)EnableYawCorrection; - else - device->AddBoolItem("EnableYawCorrection", EnableYawCorrection); - - break; - } - } + Vector3d magRefW = MagRefsInWorldFrame[MagRefIdx]; - device = root->GetNextItem(device); + // If the vertical angle is wrong, decrease the score and do nothing + if (Alg::Abs(magRefW.y - magW.y) > maxTiltError) + { + MagRefScore -= 1; + return; } + + MagRefScore += 2; +#if 0 + // this doesn't seem to work properly, need to investigate + Quatd error = vectorAlignmentRotation(magW, magRefW); + Quatd yawError = extractYawRotation(error); +#else + // Correction is computed in the horizontal plane + magW.y = magRefW.y = 0; + Quatd yawError = vectorAlignmentRotation(magW, magRefW); +#endif + Quatd correction = yawError.Nlerp(Quatd(), proportionalGain * deltaT) * + MagCorrectionIntegralTerm.Nlerp(Quatd(), deltaT); + MagCorrectionIntegralTerm = MagCorrectionIntegralTerm * yawError.Nlerp(Quatd(), integralGain * deltaT); + + State.Transform.Orientation = correction * State.Transform.Orientation; } +} + +void SensorFusion::applyTiltCorrection(double deltaT) +{ + const double gain = 0.25; + const double snapThreshold = 0.1; + const Vector3d up(0, 1, 0); + + Vector3d accelW = State.Transform.Orientation.Rotate(FAccelHeadset.GetFilteredValue()); + Quatd error = vectorAlignmentRotation(accelW, up); + + Quatd correction; + if (FAccelHeadset.GetSize() == 1 || + ((Alg::Abs(error.w) < cos(snapThreshold / 2) && FAccelHeadset.Confidence() > 0.75))) + // full correction for start-up + // or large error with high confidence + correction = error; + else if (FAccelHeadset.Confidence() > 0.5) + correction = error.Nlerp(Quatd(), gain * deltaT); else - { // Create a new device root - root = *JSON::CreateObject(); - root->AddStringItem("Oculus Device Profile Version", "1.0"); - } + // accelerometer is unreliable due to movement + return; - if (device == NULL) - { - device = JSON::CreateObject(); - device->AddStringItem("Product", CachedSensorInfo.ProductName); - device->AddNumberItem("ProductID", CachedSensorInfo.ProductId); - device->AddStringItem("Serial", CachedSensorInfo.SerialNumber); - device->AddBoolItem("EnableYawCorrection", EnableYawCorrection); + State.Transform.Orientation = correction * State.Transform.Orientation; +} + +void SensorFusion::applyCameraTiltCorrection(Vector3d accel, double deltaT) +{ + const double snapThreshold = 0.02; // in radians + const double maxCameraPositionOffset = 0.2; + const Vector3d up(0, 1, 0), forward(0, 0, -1); + + if (LastVisionExposureRecord.ExposureCounter <= FullVisionCorrectionExposureCounter) + return; + + // for startup use filtered value instead of instantaneous for stability + if (FAccelCamera.IsEmpty()) + accel = FAccelHeadset.GetFilteredValue(); - root->AddItem("Device", device); + Quatd headsetToCamera = CameraPose.Orientation.Inverted() * VisionError.Transform.Orientation * State.Transform.Orientation; + // this is what the hypothetical camera-mounted accelerometer would show + Vector3d accelCamera = headsetToCamera.Rotate(accel); + FAccelCamera.Update(accelCamera, deltaT); + Vector3d accelCameraW = CameraPose.Orientation.Rotate(FAccelCamera.GetFilteredValue()); + + Quatd error1 = vectorAlignmentRotation(accelCameraW, up); + // cancel out yaw rotation + Vector3d forwardCamera = (error1 * CameraPose.Orientation).Rotate(forward); + forwardCamera.y = 0; + Quatd error2 = vectorAlignmentRotation(forwardCamera, forward); + // combined error + Quatd error = error2 * error1; + + double confidence = FAccelCamera.Confidence(); + // penalize the confidence if looking away from the camera + // TODO: smooth fall-off + if (VisionState.Transform.Orientation.Rotate(forward).Angle(forward) > 1) + confidence *= 0.5; + + Quatd correction; + if (FAccelCamera.GetSize() == 1 || + confidence > CameraPoseConfidence + 0.2 || + // disabled due to false positives when moving side to side +// (Alg::Abs(error.w) < cos(5 * snapThreshold / 2) && confidence > 0.55) || + (Alg::Abs(error.w) < cos(snapThreshold / 2) && confidence > 0.75)) + { + // large error with high confidence + correction = error; + // update the confidence level + CameraPoseConfidence = confidence; + // record the frame counter to avoid additional correction until we see the new data + FullVisionCorrectionExposureCounter = LastMessageExposureFrame.CameraFrameCount; + + LogText("adjust camera tilt confidence %f angle %f\n", + CameraPoseConfidence, RadToDegree(correction.Angle(Quatd()))); } + else + { + // accelerometer is unreliable due to movement + return; + } + + Quatd newOrientation = correction * CameraPose.Orientation; + // compute a camera position change that together with the camera rotation would result in zero player movement + Vector3d newPosition = CameraPose.Orientation.Rotate(VisionState.Transform.Position) + CameraPose.Position - + newOrientation.Rotate(VisionState.Transform.Position); + // if the new position is too far, reset to default + // (can't hide the rotation, might as well use it to reset the position) + if (newPosition.DistanceSq(DefaultCameraPosition) > maxCameraPositionOffset * maxCameraPositionOffset) + newPosition = DefaultCameraPosition; - // Create and the add the new calibration event to the device - device->AddItem("MagCalibration", calibration); + CameraPose.Orientation = newOrientation; + CameraPose.Position = newPosition; - return root->Save(path); + //Convenient global variable to temporarily extract this data. + TPH_CameraPoseOrientationWxyz[0] = (float) newOrientation.w; + TPH_CameraPoseOrientationWxyz[1] = (float) newOrientation.x; + TPH_CameraPoseOrientationWxyz[2] = (float) newOrientation.y; + TPH_CameraPoseOrientationWxyz[3] = (float) newOrientation.z; + + + LogText("adjust camera position %f %f %f\n", newPosition.x, newPosition.y, newPosition.z); } -// Loads a saved calibration for the specified device from the device profile file -// sensor - the sensor that the calibration was saved for -// cal_name - an optional name for the calibration or the default if cal_name == NULL -bool SensorFusion::LoadMagCalibration(const char* calibrationName) +//------------------------------------------------------------------------------------- +// Head model functions. + +// Sets up head-and-neck model and device-to-pupil dimensions from the user's profile. +void SensorFusion::SetUserHeadDimensions(Profile const *profile, HmdRenderInfo const &hmdRenderInfo) { - if (CachedSensorInfo.SerialNumber[0] == 0) - return false; - - // A named calibration may be specified for calibration in different - // environments, otherwise the default calibration is used - if (calibrationName == NULL) - calibrationName = "default"; - - String path = GetBaseOVRPath(true); - path += "/Devices.json"; - - // Load the device profiles - Ptr<JSON> root = *JSON::Load(path); - if (root == NULL) - return false; - - // Quick sanity check of the file type and format before we parse it - JSON* version = root->GetFirstItem(); - if (version && version->Name == "Oculus Device Profile Version") - { - int major = atoi(version->Value.ToCStr()); - if (major > MAX_DEVICE_PROFILE_MAJOR_VERSION) - return false; // don't parse the file on unsupported major version number + float neckEyeHori = OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL; + float neckEyeVert = OVR_DEFAULT_NECK_TO_EYE_VERTICAL; + if ( profile != NULL ) + { + float neckeye[2]; + if (profile->GetFloatValues(OVR_KEY_NECK_TO_EYE_DISTANCE, neckeye, 2) == 2) + { + neckEyeHori = neckeye[0]; + neckEyeVert = neckeye[1]; + } } - else + // Make sure these are vaguely sensible values. + OVR_ASSERT ( ( neckEyeHori > 0.05f ) && ( neckEyeHori < 0.5f ) ); + OVR_ASSERT ( ( neckEyeVert > 0.05f ) && ( neckEyeVert < 0.5f ) ); + SetHeadModel ( Vector3f ( 0.0, neckEyeVert, -neckEyeHori ) ); + + // Find the distance from the center of the screen to the "center eye" + // This center eye is used by systems like rendering & audio to represent the player, + // and they will handle the offsets needed from there to each actual eye. + + // HACK HACK HACK + // We know for DK1 the screen->lens surface distance is roughly 0.049f, and that the faceplate->lens is 0.02357f. + // We're going to assume(!!!!) that all HMDs have the same screen->faceplate distance. + // Crystal Cove was measured to be roughly 0.025 screen->faceplate which agrees with this assumption. + // TODO: do this properly! Update: Measured this at 0.02733 with a CC prototype, CES era (PT7), on 2/19/14 -Steve + float screenCenterToMidplate = 0.02733f; + + float centerEyeRelief = hmdRenderInfo.GetEyeCenter().ReliefInMeters; + if ( profile == NULL ) { - return false; + // No valid profile, so the eye-relief won't be correct either, so fill in a default that feels good + centerEyeRelief = 0.020f; } + float centerPupilDepth = screenCenterToMidplate + hmdRenderInfo.LensSurfaceToMidplateInMeters + centerEyeRelief; + SetCenterPupilDepth ( centerPupilDepth ); +} - bool autoEnableCorrection = false; - - JSON* device = root->GetNextItem(version); - while (device) - { // Search for a previous calibration with the same name for this device - // and remove it before adding the new one - if (device->Name == "Device") - { - JSON* item = device->GetItemByName("Serial"); - if (item && item->Value == CachedSensorInfo.SerialNumber) - { // found an entry for this device - - JSON* autoyaw = device->GetItemByName("EnableYawCorrection"); - if (autoyaw) - autoEnableCorrection = (autoyaw->dValue != 0); - - int maxCalibrationVersion = 0; - item = device->GetNextItem(item); - while (item) - { - if (item->Name == "MagCalibration") - { - JSON* calibration = item; - JSON* name = calibration->GetItemByName("Name"); - if (name && name->Value == calibrationName) - { // found a calibration with this name - - int major = 0; - JSON* version = calibration->GetItemByName("Version"); - if (version) - major = atoi(version->Value.ToCStr()); - - if (major > maxCalibrationVersion && major <= 2) - { - time_t now; - time(&now); - - // parse the calibration time - time_t calibration_time = now; - JSON* caltime = calibration->GetItemByName("Time"); - if (caltime) - { - const char* caltime_str = caltime->Value.ToCStr(); - - tm ct; - memset(&ct, 0, sizeof(tm)); - -#ifdef OVR_OS_WIN32 - struct tm nowtime; - localtime_s(&nowtime, &now); - ct.tm_isdst = nowtime.tm_isdst; - sscanf_s(caltime_str, "%d-%d-%d %d:%d:%d", - &ct.tm_year, &ct.tm_mon, &ct.tm_mday, - &ct.tm_hour, &ct.tm_min, &ct.tm_sec); -#else - struct tm* nowtime = localtime(&now); - ct.tm_isdst = nowtime->tm_isdst; - sscanf(caltime_str, "%d-%d-%d %d:%d:%d", - &ct.tm_year, &ct.tm_mon, &ct.tm_mday, - &ct.tm_hour, &ct.tm_min, &ct.tm_sec); -#endif - ct.tm_year -= 1900; - ct.tm_mon--; - calibration_time = mktime(&ct); - } - - // parse the calibration matrix - JSON* cal = calibration->GetItemByName("CalibrationMatrix"); - if (cal == NULL) - cal = calibration->GetItemByName("Calibration"); - - if (cal) - { - Matrix4f calmat = Matrix4f::FromString(cal->Value.ToCStr()); - SetMagCalibration(calmat); - MagCalibrationTime = calibration_time; - EnableYawCorrection = autoEnableCorrection; - - maxCalibrationVersion = major; - } - } - } - } - item = device->GetNextItem(item); - } - - return (maxCalibrationVersion > 0); - } - } +Vector3f SensorFusion::GetHeadModel() const +{ + return (Vector3f)HeadModel; +} - device = root->GetNextItem(device); +void SensorFusion::SetHeadModel(const Vector3f &headModel, bool resetNeckPivot /*= true*/ ) +{ + Lock::Locker lockScope(pHandler->GetHandlerLock()); + // The head model should look something like (0, 0.12, -0.12), so + // these asserts are to try to prevent sign problems, as + // they can be subtle but nauseating! + OVR_ASSERT ( headModel.y > 0.0f ); + OVR_ASSERT ( headModel.z < 0.0f ); + HeadModel = (Vector3d)headModel; + if ( resetNeckPivot ) + { + setNeckPivotFromPose ( State.Transform ); } +} + +float SensorFusion::GetCenterPupilDepth() const +{ + return CenterPupilDepth; +} + + +void SensorFusion::SetCenterPupilDepth(float centerPupilDepth) +{ + CenterPupilDepth = centerPupilDepth; + + CPFPositionInIMUFrame = -IMUPosition; + CPFPositionInIMUFrame.z += CenterPupilDepth; + + setNeckPivotFromPose ( State.Transform ); +} + +//------------------------------------------------------------------------------------- + +// This is a "perceptually tuned predictive filter", which means that it is optimized +// for improvements in the VR experience, rather than pure error. In particular, +// jitter is more perceptible at lower speeds whereas latency is more perceptable +// after a high-speed motion. Therefore, the prediction interval is dynamically +// adjusted based on speed. Significant more research is needed to further improve +// this family of filters. +static Pose<double> calcPredictedPose(const PoseState<double>& poseState, double predictionDt) +{ + Pose<double> pose = poseState.Transform; + const double linearCoef = 1.0; + Vector3d angularVelocity = poseState.AngularVelocity; + double angularSpeed = angularVelocity.Length(); + + // This could be tuned so that linear and angular are combined with different coefficients + double speed = angularSpeed + linearCoef * poseState.LinearVelocity.Length(); + + const double slope = 0.2; // The rate at which the dynamic prediction interval varies + double candidateDt = slope * speed; // TODO: Replace with smoothstep function + + double dynamicDt = predictionDt; + + // Choose the candidate if it is shorter, to improve stability + if (candidateDt < predictionDt) + dynamicDt = candidateDt; + + if (angularSpeed > 0.001) + pose.Orientation = pose.Orientation * Quatd(angularVelocity, angularSpeed * dynamicDt); + + pose.Position += poseState.LinearVelocity * dynamicDt; + + return pose; +} + + +Posef SensorFusion::GetPoseAtTime(double absoluteTime) const +{ + SensorState ss = GetSensorStateAtTime ( absoluteTime ); + return ss.Predicted.Transform; +} + + +SensorState SensorFusion::GetSensorStateAtTime(double absoluteTime) const +{ + const LocklessState lstate = UpdatedState.GetState(); + // Delta time from the last processed message + const double pdt = absoluteTime - lstate.State.TimeInSeconds; + + SensorState ss; + ss.Recorded = PoseStatef(lstate.State); + ss.Temperature = lstate.Temperature; + ss.Magnetometer = Vector3f(lstate.Magnetometer); + ss.StatusFlags = lstate.StatusFlags; + + // Do prediction logic + ss.Predicted = ss.Recorded; + ss.Predicted.TimeInSeconds = absoluteTime; + ss.Predicted.Transform = Posef(calcPredictedPose(lstate.State, pdt)); - return false; + // CPFOriginInIMUFrame transformation + const Vector3f cpfOriginInIMUFrame(CPFPositionInIMUFrame); + ss.Recorded.Transform.Position += ss.Recorded.Transform.Orientation.Rotate(cpfOriginInIMUFrame); + ss.Predicted.Transform.Position += ss.Predicted.Transform.Orientation.Rotate(cpfOriginInIMUFrame); + return ss; +} + +unsigned SensorFusion::GetStatus() const +{ + return UpdatedState.GetState().StatusFlags; } +//------------------------------------------------------------------------------------- +void SensorFusion::OnMessage(const MessageBodyFrame& msg) +{ + OVR_ASSERT(!IsAttachedToSensor()); + handleMessage(msg); +} -} // namespace OVR +//------------------------------------------------------------------------------------- +void SensorFusion::BodyFrameHandler::OnMessage(const Message& msg) +{ + Recorder::Buffer(msg); + if (msg.Type == Message_BodyFrame) + pFusion->handleMessage(static_cast<const MessageBodyFrame&>(msg)); + if (msg.Type == Message_ExposureFrame) + pFusion->handleExposure(static_cast<const MessageExposureFrame&>(msg)); +} + +} // namespace OVR diff --git a/LibOVR/Src/OVR_SensorFusion.h b/LibOVR/Src/OVR_SensorFusion.h index 6f2ee4c..3d10c3d 100644 --- a/LibOVR/Src/OVR_SensorFusion.h +++ b/LibOVR/Src/OVR_SensorFusion.h @@ -4,18 +4,18 @@ PublicHeader: OVR.h Filename : OVR_SensorFusion.h Content : Methods that determine head orientation from sensor data over time Created : October 9, 2012 -Authors : Michael Antonov, Steve LaValle, Max Katsev +Authors : Michael Antonov, Steve LaValle, Dov Katz, Max Katsev -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -31,9 +31,146 @@ limitations under the License. #include "OVR_Device.h" #include "OVR_SensorFilter.h" #include <time.h> +#include "Kernel/OVR_Timer.h" +#include "Kernel/OVR_Threads.h" +#include "Kernel/OVR_Lockless.h" + +// CAPI forward declarations. +typedef struct ovrPoseStatef_ ovrPoseStatef; +typedef struct ovrSensorState_ ovrSensorState; namespace OVR { +struct HmdRenderInfo; + +//------------------------------------------------------------------------------------- +// ***** Sensor State + +// These values are reported as compatible with C API. + + +// PoseState describes the complete pose, or a rigid body configuration, at a +// point in time, including first and second derivatives. It is used to specify +// instantaneous location and movement of the headset. +// SensorState is returned as a part of the sensor state. + +template<class T> +class PoseState +{ +public: + typedef typename CompatibleTypes<Pose<T> >::Type CompatibleType; + + PoseState() : TimeInSeconds(0.0) { } + // float <-> double conversion constructor. + explicit PoseState(const PoseState<typename Math<T>::OtherFloatType> &src) + : Transform(src.Transform), + AngularVelocity(src.AngularVelocity), LinearVelocity(src.LinearVelocity), + AngularAcceleration(src.AngularAcceleration), LinearAcceleration(src.LinearAcceleration), + TimeInSeconds(src.TimeInSeconds) + { } + + // C-interop support: PoseStatef <-> ovrPoseStatef + PoseState(const typename CompatibleTypes<PoseState<T> >::Type& src) + : Transform(src.Pose), + AngularVelocity(src.AngularVelocity), LinearVelocity(src.LinearVelocity), + AngularAcceleration(src.AngularAcceleration), LinearAcceleration(src.LinearAcceleration), + TimeInSeconds(src.TimeInSeconds) + { } + + operator const typename CompatibleTypes<PoseState<T> >::Type () const + { + typename CompatibleTypes<PoseState<T> >::Type result; + result.Pose = Transform; + result.AngularVelocity = AngularVelocity; + result.LinearVelocity = LinearVelocity; + result.AngularAcceleration = AngularAcceleration; + result.LinearAcceleration = LinearAcceleration; + result.TimeInSeconds = TimeInSeconds; + return result; + } + + + Pose<T> Transform; + Vector3<T> AngularVelocity; + Vector3<T> LinearVelocity; + Vector3<T> AngularAcceleration; + Vector3<T> LinearAcceleration; + // Absolute time of this state sample; always a double measured in seconds. + double TimeInSeconds; + + + // ***** Helpers for Pose integration + + // Stores and integrates gyro angular velocity reading for a given time step. + void StoreAndIntegrateGyro(Vector3d angVel, double dt); + // Stores and integrates position/velocity from accelerometer reading for a given time step. + void StoreAndIntegrateAccelerometer(Vector3d linearAccel, double dt); + + // Performs integration of state by adding next state delta to it + // to produce a combined state change + void AdvanceByDelta(const PoseState<T>& delta); +}; + + + +// External API returns pose as float, but uses doubles internally for quaternion precision. +typedef PoseState<float> PoseStatef; +typedef PoseState<double> PoseStated; + + +//------------------------------------------------------------------------------------- +// ***** Sensor State + + +// Bit flags describing the current status of sensor tracking. +enum StatusBits +{ + Status_OrientationTracked = 0x0001, // Orientation is currently tracked (connected and in use). + Status_PositionTracked = 0x0002, // Position is currently tracked (false if out of range). + Status_PositionConnected = 0x0020, // Position tracking HW is conceded. + // Status_HMDConnected = 0x0080 // HMD Display is available & connected. +}; + + +// Full state of of the sensor reported by GetSensorState() at a given absolute time. +class SensorState +{ +public: + SensorState() : Temperature(0), StatusFlags(0) { } + + // C-interop support + SensorState(const ovrSensorState& s); + operator const ovrSensorState () const; + + // Pose state at the time that SensorState was requested. + PoseStatef Predicted; + // Actual recorded pose configuration based on sensor sample at a + // moment closest to the requested time. + PoseStatef Recorded; + + // Calibrated magnetometer reading, in Gauss, at sample time. + Vector3f Magnetometer; + // Sensor temperature reading, in degrees Celsius, at sample time. + float Temperature; + // Sensor status described by ovrStatusBits. + unsigned int StatusFlags; +}; + + + +//------------------------------------------------------------------------------------- + +class VisionHandler +{ +public: + virtual void OnVisionSuccess(const Pose<double>& pose, UInt32 exposureCounter) = 0; + virtual void OnVisionPreviousFrame(const Pose<double>& pose) = 0; + virtual void OnVisionFailure() = 0; + + // Get a configuration that represents the change over a short time interval + virtual Pose<double> GetVisionPrediction(UInt32 exposureCounter) = 0; +}; + //------------------------------------------------------------------------------------- // ***** SensorFusion @@ -50,7 +187,7 @@ namespace OVR { // automatically handle notifications from that device. -class SensorFusion : public NewOverrideBase +class SensorFusion : public NewOverrideBase, public VisionHandler { enum { @@ -58,207 +195,374 @@ class SensorFusion : public NewOverrideBase }; public: + + // ------------------------------------------------------------------------------- + // Critical components for tiny API + SensorFusion(SensorDevice* sensor = 0); ~SensorFusion(); - - // *** Setup - - // Attaches this SensorFusion to a sensor device, from which it will receive + // Attaches this SensorFusion to the IMU sensor device, from which it will receive // notification messages. If a sensor is attached, manual message notification // is not necessary. Calling this function also resets SensorFusion state. - bool AttachToSensor(SensorDevice* sensor); - - // Returns true if this Sensor fusion object is attached to a sensor. - bool IsAttachedToSensor() const { return Handler.IsHandlerInstalled(); } - + bool AttachToSensor(SensorDevice* sensor); + // Returns true if this Sensor fusion object is attached to the IMU. + bool IsAttachedToSensor() const; - // *** State Query + // Sets up head-and-neck model and device-to-pupil dimensions from the user's profile and the HMD stats. + // This copes elegantly if profile is NULL. + void SetUserHeadDimensions(Profile const *profile, HmdRenderInfo const &hmdRenderInfo); - // Obtain the current accumulated orientation. Many apps will want to use GetPredictedOrientation - // instead to reduce latency. - Quatf GetOrientation() const { return lockedGet(&Q); } + // Get the predicted pose (orientation, position) of the center pupil frame (CPF) at a specific point in time. + Posef GetPoseAtTime(double absoluteTime) const; - // Get predicted orientaion in the near future; predictDt is lookahead amount in seconds. - Quatf GetPredictedOrientation(float predictDt); - Quatf GetPredictedOrientation() { return GetPredictedOrientation(PredictionDT); } + // Get the full dynamical system state of the CPF, which includes velocities and accelerations, + // predicted at a specified absolute point in time. + SensorState GetSensorStateAtTime(double absoluteTime) const; - // Obtain the last absolute acceleration reading, in m/s^2. - Vector3f GetAcceleration() const { return lockedGet(&A); } - // Obtain the last angular velocity reading, in rad/s. - Vector3f GetAngularVelocity() const { return lockedGet(&AngV); } - - // Obtain the last raw magnetometer reading, in Gauss - Vector3f GetMagnetometer() const { return lockedGet(&RawMag); } - // Obtain the calibrated magnetometer reading (direction and field strength) - Vector3f GetCalibratedMagnetometer() const { OVR_ASSERT(MagCalibrated); return lockedGet(&CalMag); } + // Get the sensor status (same as GetSensorStateAtTime(...).Status) + unsigned int GetStatus() const; + // End tiny API components + // ------------------------------------------------------------------------------- // Resets the current orientation. - void Reset(); + void Reset (); + // Configuration + void EnableMotionTracking(bool enable = true) { MotionTrackingEnabled = enable; } + bool IsMotionTrackingEnabled() const { return MotionTrackingEnabled; } + + // Accelerometer/Gravity Correction Control + // Enables/disables gravity correction (on by default). + void SetGravityEnabled (bool enableGravity); + bool IsGravityEnabled () const; + // Vision Position and Orientation Configuration + // ----------------------------------------------- + bool IsVisionPositionEnabled () const; + void SetVisionPositionEnabled (bool enableVisionPosition); + + // compensates for a tilted camera + void SetCameraTiltCorrectionEnabled(bool enable); + bool IsCameraTiltCorrectionEnabled () const; + + // Message Handling Logic + // ----------------------------------------------- + // Notifies SensorFusion object about a new BodyFrame + // message from a sensor. + // Should be called by user if not attached to sensor. + void OnMessage (const MessageBodyFrame& msg); + + + // Interaction with vision + // ----------------------------------------------- + // Handle observation from vision system (orientation, position, time) + virtual void OnVisionSuccess(const Pose<double>& pose, UInt32 exposureCounter); + virtual void OnVisionPreviousFrame(const Pose<double>& pose); + virtual void OnVisionFailure(); + // Get a configuration that represents the change over a short time interval + virtual Pose<double> GetVisionPrediction(UInt32 exposureCounter); + + double GetTime () const; + double GetVisionLatency () const; + + + // Detailed head dimension control + // ----------------------------------------------- + // These are now deprecated in favour of SetUserHeadDimensions() + Vector3f GetHeadModel() const; + void SetHeadModel(const Vector3f &headModel, bool resetNeckPivot = true ); + float GetCenterPupilDepth() const; + void SetCenterPupilDepth(float centerPupilDepth); + + + // Magnetometer and Yaw Drift Section: + // --------------------------------------- + + // Enables/disables magnetometer based yaw drift correction. + // Must also have mag calibration data for this correction to work. + void SetYawCorrectionEnabled(bool enable); + // Determines if yaw correction is enabled. + bool IsYawCorrectionEnabled () const; - // *** Configuration + // True if mag has calibration values stored + bool HasMagCalibration () const; + // Clear the reference points associating + // mag readings with orientations + void ClearMagReferences (); - void EnableMotionTracking(bool enable = true) { MotionTrackingEnabled = enable; } - bool IsMotionTrackingEnabled() const { return MotionTrackingEnabled; } +private: + + // ----------------------------------------------- + + class BodyFrameHandler : public NewOverrideBase, public MessageHandler + { + SensorFusion* pFusion; + public: + BodyFrameHandler(SensorFusion* fusion) + : pFusion(fusion) {} + ~BodyFrameHandler(); + virtual void OnMessage(const Message& msg); + virtual bool SupportsMessageType(MessageType type) const; + }; - // *** Prediction Control - // Prediction functions. - // Prediction delta specifes how much prediction should be applied in seconds; it should in - // general be under the average rendering latency. Call GetPredictedOrientation() to get - // predicted orientation. - float GetPredictionDelta() const { return PredictionDT; } - void SetPrediction(float dt, bool enable = true) { PredictionDT = dt; EnablePrediction = enable; } - void SetPredictionEnabled(bool enable = true) { EnablePrediction = enable; } - bool IsPredictionEnabled() { return EnablePrediction; } + // ----------------------------------------------- + // State version stored in lockless updater "queue" and used for + // prediction by GetPoseAtTime/GetSensorStateAtTime + struct LocklessState + { + PoseState<double> State; + float Temperature; + Vector3d Magnetometer; + unsigned int StatusFlags; - // *** Accelerometer/Gravity Correction Control + LocklessState() : Temperature(0.0), StatusFlags(0) { }; + }; - // Enables/disables gravity correction (on by default). - void SetGravityEnabled(bool enableGravity) { EnableGravity = enableGravity; } - bool IsGravityEnabled() const { return EnableGravity;} - // Gain used to correct gyro with accel. Default value is appropriate for typical use. - float GetAccelGain() const { return Gain; } - void SetAccelGain(float ag) { Gain = ag; } + // ----------------------------------------------- + // Entry describing the state of the headset at the time of an exposure as reported by the DK2 board. + // This is used in combination with the vision data for + // incremental tracking based on IMU change and for drift correction + struct ExposureRecord + { + UInt32 ExposureCounter; + double ExposureTime; + PoseState<double> State; // State of the headset at the time of exposure. + PoseState<double> Delta; // IMU Delta between previous exposure (or a vision frame) and this one. + + ExposureRecord() : ExposureCounter(0), ExposureTime(0.0) { } + ExposureRecord(UInt32 exposureCounter, double exposureTime, + const PoseState<double>& state, + const PoseState<double>& stateDelta) + : ExposureCounter(exposureCounter), ExposureTime(exposureTime), + State(state), Delta(stateDelta) { } + }; + + // ----------------------------------------------- + + // The phase of the head as estimated by sensor fusion + PoseState<double> State; + + // State that can be read without any locks, so that high priority rendering thread + // doesn't have to worry about being blocked by a sensor/vision threads that got preempted. + LocklessUpdater<LocklessState> UpdatedState; + + // The pose we got from Vision, augmented with velocity information from numerical derivatives + // This is the only state that is stored in the camera reference frame; the rest are in the world frame + PoseState<double> VisionState; + // Difference between the state from vision and the main State at the time of exposure + PoseState<double> VisionError; + // ExposureRecord that corresponds to the same exposure/frame as VisionState + ExposureRecord LastVisionExposureRecord; + // Change in state since the last exposure based on IMU data only + // (used for incremental tracking predictions) + PoseState<double> CurrentExposureIMUDelta; + // Past exposure records between the last update from vision and now + // (should only be one record unless vision latency is high) + CircularBuffer<ExposureRecord> ExposureRecordHistory; + // Timings of the previous exposure, used to populate ExposureRecordHistory + MessageExposureFrame LastMessageExposureFrame; + // Time of the last vision update + double LastVisionAbsoluteTime; + // Used by the head model. + Vector3d NeckPivotPosition; + + bool EnableCameraTiltCorrection; + // Pose of the camera in the world coordinate system + Pose<double> CameraPose; + double CameraPoseConfidence; + Vector3d DefaultCameraPosition; + + UInt32 FullVisionCorrectionExposureCounter; + + // This is a signed distance, but positive because Z increases looking inward. + // This is expressed relative to the IMU in the HMD and corresponds to the location + // of the cyclopean virtual camera focal point if both the physical and virtual + // worlds are isometrically mapped onto each other. -Steve + float CenterPupilDepth; + Vector3d CPFPositionInIMUFrame; + // Position of the IMU relative to the center of the screen (loaded from the headset firmware) + Vector3d IMUPosition; + // Origin of the positional coordinate system in the real world relative to the camera. + Vector3d PositionOrigin; + + double VisionMaxIMUTrackTime; + + unsigned int Stage; + BodyFrameHandler *pHandler; + volatile bool EnableGravity; + + SensorFilterBodyFrame FAccelHeadset, FAccelCamera; + SensorFilterd FAngV; + + Vector3d AccelOffset; + + bool EnableYawCorrection; + bool MagCalibrated; +public: // The below made public for access during rendering for debugging + int MagNumReferences; + Vector3d MagRefsInBodyFrame[MagMaxReferences]; + Vector3d MagRefsInWorldFrame[MagMaxReferences]; + Quatd MagRefsPoses[MagMaxReferences]; + int MagRefIdx; +private: + int MagRefScore; + Quatd MagCorrectionIntegralTerm; - // *** Magnetometer and Yaw Drift Correction Control + bool MotionTrackingEnabled; + bool VisionPositionEnabled; - // Methods to load and save a mag calibration. Calibrations can optionally - // be specified by name to differentiate multiple calibrations under different conditions - // If LoadMagCalibration succeeds, it will override YawCorrectionEnabled based on - // saved calibration setting. - bool SaveMagCalibration(const char* calibrationName = NULL) const; - bool LoadMagCalibration(const char* calibrationName = NULL); + // Built-in head model for faking + // position using orientation only + Vector3d HeadModel; - // Enables/disables magnetometer based yaw drift correction. Must also have mag calibration - // data for this correction to work. - void SetYawCorrectionEnabled(bool enable) { EnableYawCorrection = enable; } - // Determines if yaw correction is enabled. - bool IsYawCorrectionEnabled() const { return EnableYawCorrection;} + //--------------------------------------------- - // Store the calibration matrix for the magnetometer - void SetMagCalibration(const Matrix4f& m) - { - MagCalibrationMatrix = m; - time(&MagCalibrationTime); // time stamp the calibration - MagCalibrated = true; - } + // Internal handler for messages + // bypasses error checking. + void handleMessage(const MessageBodyFrame& msg); + void handleExposure(const MessageExposureFrame& msg); + + // Returns new gyroCorrection + Vector3d calcMagYawCorrectionForMessage(Vector3d gyroCorrection, + Quatd q, Quatd qInv, + Vector3d calMag, Vector3d up, double deltaT); + // Apply headset yaw correction from magnetometer + // for models without camera or when camera isn't available + void applyMagYawCorrection(Vector3d mag, double deltaT); + // Apply headset tilt correction from the accelerometer + void applyTiltCorrection(double deltaT); + // Apply headset yaw correction from the camera + void applyVisionYawCorrection(double deltaT); + // Apply headset position correction from the camera + void applyPositionCorrection(double deltaT); + // Apply camera tilt correction from the accelerometer + void applyCameraTiltCorrection(Vector3d accel, double deltaT); + + // If you have a known-good pose, this sets the neck pivot position. + void setNeckPivotFromPose ( Posed const &pose ); +}; - // Retrieves the magnetometer calibration matrix - Matrix4f GetMagCalibration() const { return MagCalibrationMatrix; } - // Retrieve the time of the calibration - time_t GetMagCalibrationTime() const { return MagCalibrationTime; } - // True only if the mag has calibration values stored - bool HasMagCalibration() const { return MagCalibrated;} - // Force the mag into the uncalibrated state - void ClearMagCalibration() { MagCalibrated = false; } - // These refer to reference points that associate mag readings with orientations - void ClearMagReferences() { MagNumReferences = 0; } +//------------------------------------------------------------------------------------- +// ***** SensorFusion - Inlines +inline bool SensorFusion::IsAttachedToSensor() const +{ + return pHandler->IsHandlerInstalled(); +} - Vector3f GetCalibratedMagValue(const Vector3f& rawMag) const; +inline void SensorFusion::SetGravityEnabled(bool enableGravity) +{ + EnableGravity = enableGravity; +} +inline bool SensorFusion::IsGravityEnabled() const +{ + return EnableGravity; +} +inline void SensorFusion::SetYawCorrectionEnabled(bool enable) +{ + EnableYawCorrection = enable; +} - // *** Message Handler Logic +inline bool SensorFusion::IsYawCorrectionEnabled() const +{ + return EnableYawCorrection; +} - // Notifies SensorFusion object about a new BodyFrame message from a sensor. - // Should be called by user if not attaching to a sensor. - void OnMessage(const MessageBodyFrame& msg) - { - OVR_ASSERT(!IsAttachedToSensor()); - handleMessage(msg); - } +inline bool SensorFusion::HasMagCalibration() const +{ + return MagCalibrated; +} - void SetDelegateMessageHandler(MessageHandler* handler) - { pDelegate = handler; } +inline void SensorFusion::ClearMagReferences() +{ + MagNumReferences = 0; +} +inline bool SensorFusion::IsVisionPositionEnabled() const +{ + return VisionPositionEnabled; +} +inline void SensorFusion::SetVisionPositionEnabled(bool enableVisionPosition) +{ + VisionPositionEnabled = enableVisionPosition; +} -private: +inline void SensorFusion::SetCameraTiltCorrectionEnabled(bool enable) +{ + EnableCameraTiltCorrection = enable; +} - SensorFusion* getThis() { return this; } +inline bool SensorFusion::IsCameraTiltCorrectionEnabled() const +{ + return EnableCameraTiltCorrection; +} - // Helper used to read and return value within a Lock. - template<class C> - C lockedGet(const C* p) const - { - Lock::Locker lockScope(Handler.GetHandlerLock()); - return *p; - } +inline double SensorFusion::GetVisionLatency() const +{ + return LastVisionAbsoluteTime - VisionState.TimeInSeconds; +} - // Internal handler for messages; bypasses error checking. - void handleMessage(const MessageBodyFrame& msg); +inline double SensorFusion::GetTime() const +{ + return Timer::GetSeconds(); +} - // Set the magnetometer's reference orientation for use in yaw correction - // The supplied mag is an uncalibrated value - void setMagReference(const Quatf& q, const Vector3f& rawMag); - // Default to current HMD orientation - void setMagReference() { setMagReference(Q, RawMag); } +inline SensorFusion::BodyFrameHandler::~BodyFrameHandler() +{ + RemoveHandlerFromDevices(); +} - class BodyFrameHandler : public MessageHandler - { - SensorFusion* pFusion; - public: - BodyFrameHandler(SensorFusion* fusion) : pFusion(fusion) { } - ~BodyFrameHandler(); +inline bool SensorFusion::BodyFrameHandler::SupportsMessageType(MessageType type) const +{ + return (type == Message_BodyFrame || type == Message_ExposureFrame); +} - virtual void OnMessage(const Message& msg); - virtual bool SupportsMessageType(MessageType type) const; - }; - SensorInfo CachedSensorInfo; - - Quatf Q; - Quatf QUncorrected; - Vector3f A; - Vector3f AngV; - Vector3f CalMag; - Vector3f RawMag; - unsigned int Stage; - float RunningTime; - float DeltaT; - BodyFrameHandler Handler; - MessageHandler* pDelegate; - float Gain; - volatile bool EnableGravity; - - bool EnablePrediction; - float PredictionDT; - float PredictionTimeIncrement; - - SensorFilter FRawMag; - SensorFilter FAngV; - - Vector3f GyroOffset; - SensorFilterBase<float> TiltAngleFilter; - - - bool EnableYawCorrection; - bool MagCalibrated; - Matrix4f MagCalibrationMatrix; - time_t MagCalibrationTime; - int MagNumReferences; - Vector3f MagRefsInBodyFrame[MagMaxReferences]; - Vector3f MagRefsInWorldFrame[MagMaxReferences]; - int MagRefIdx; - int MagRefScore; - - bool MotionTrackingEnabled; -}; +//------------------------------------------------------------------------------------- +// ***** PoseState - Inlines +// Stores and integrates gyro angular velocity reading for a given time step. +template<class T> +void PoseState<T>::StoreAndIntegrateGyro(Vector3d angVel, double dt) +{ + AngularVelocity = angVel; + double angle = angVel.Length() * dt; + if (angle > 0) + Transform.Orientation = Transform.Orientation * Quatd(angVel, angle); +} + +template<class T> +void PoseState<T>::StoreAndIntegrateAccelerometer(Vector3d linearAccel, double dt) +{ + LinearAcceleration = linearAccel; + Transform.Position += LinearVelocity * dt + LinearAcceleration * (dt * dt * 0.5); + LinearVelocity += LinearAcceleration * dt; +} + +// Performs integration of state by adding next state delta to it +// to produce a combined state change +template<class T> +void PoseState<T>::AdvanceByDelta(const PoseState<T>& delta) +{ + Transform.Orientation = Transform.Orientation * delta.Transform.Orientation; + Transform.Position += delta.Transform.Position + LinearVelocity * delta.TimeInSeconds; + LinearVelocity += delta.LinearVelocity; + TimeInSeconds += delta.TimeInSeconds; +} } // namespace OVR - #endif diff --git a/LibOVR/Src/OVR_SensorImpl.cpp b/LibOVR/Src/OVR_SensorImpl.cpp index 1646ee3..43f3b67 100644 --- a/LibOVR/Src/OVR_SensorImpl.cpp +++ b/LibOVR/Src/OVR_SensorImpl.cpp @@ -3,18 +3,18 @@ Filename : OVR_SensorImpl.cpp Content : Oculus Sensor device implementation. Created : March 7, 2013 -Authors : Lee Cooper +Authors : Lee Cooper, Dov Katz -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -25,23 +25,31 @@ limitations under the License. *************************************************************************************/ #include "OVR_SensorImpl.h" +#include "OVR_Sensor2Impl.h" +#include "OVR_SensorImpl_Common.h" +#include "OVR_JSON.h" +#include "OVR_Profile.h" +#include "Kernel/OVR_Alg.h" +#include <time.h> // HMDDeviceDesc can be created/updated through Sensor carrying DisplayInfo. #include "Kernel/OVR_Timer.h" +//extern FILE *SF_LOG_fp; + namespace OVR { - + +using namespace Alg; + //------------------------------------------------------------------------------------- // ***** Oculus Sensor-specific packet data structures enum { Sensor_VendorId = Oculus_VendorId, - Sensor_ProductId = 0x0001, - - // ST's VID used originally; should be removed in the future - Sensor_OldVendorId = 0x0483, - Sensor_OldProductId = 0x5750, + Sensor_Tracker_ProductId = Device_Tracker_ProductId, + Sensor_Tracker2_ProductId = Device_Tracker2_ProductId, + Sensor_KTracker_ProductId = Device_KTracker_ProductId, Sensor_BootLoader = 0x1001, @@ -49,45 +57,6 @@ enum { Sensor_MaxReportRate = 1000 // Hz }; -// Reported data is little-endian now -static UInt16 DecodeUInt16(const UByte* buffer) -{ - return (UInt16(buffer[1]) << 8) | UInt16(buffer[0]); -} - -static SInt16 DecodeSInt16(const UByte* buffer) -{ - return (SInt16(buffer[1]) << 8) | SInt16(buffer[0]); -} - -static UInt32 DecodeUInt32(const UByte* buffer) -{ - return (buffer[0]) | UInt32(buffer[1] << 8) | UInt32(buffer[2] << 16) | UInt32(buffer[3] << 24); -} - -static float DecodeFloat(const UByte* buffer) -{ - union { - UInt32 U; - float F; - }; - - U = DecodeUInt32(buffer); - return F; -} - - -static void UnpackSensor(const UByte* buffer, SInt32* x, SInt32* y, SInt32* z) -{ - // Sign extending trick - // from http://graphics.stanford.edu/~seander/bithacks.html#FixedSignExtend - struct {SInt32 x:21;} s; - - *x = s.x = (buffer[0] << 13) | (buffer[1] << 5) | ((buffer[2] & 0xF8) >> 3); - *y = s.x = ((buffer[2] & 0x07) << 18) | (buffer[3] << 10) | (buffer[4] << 2) | - ((buffer[5] & 0xC0) >> 6); - *z = s.x = ((buffer[5] & 0x3F) << 15) | (buffer[6] << 7) | (buffer[7] >> 1); -} // Messages we care for enum TrackerMessageType @@ -98,12 +67,6 @@ enum TrackerMessageType TrackerMessage_SizeError = 0x101, }; -struct TrackerSample -{ - SInt32 AccelX, AccelY, AccelZ; - SInt32 GyroX, GyroY, GyroZ; -}; - struct TrackerSensors { @@ -130,9 +93,9 @@ struct TrackerSensors // OVR_DEBUG_LOG_TEXT(("TackerSensor::Decode SampleCount=%d\n", SampleCount)); // Only unpack as many samples as there actually are - UByte iterationCount = (SampleCount > 2) ? 3 : SampleCount; + int iterationCount = (SampleCount > 2) ? 3 : SampleCount; - for (UByte i = 0; i < iterationCount; i++) + for (int i = 0; i < iterationCount; i++) { UnpackSensor(buffer + 8 + 16 * i, &Samples[i].AccelX, &Samples[i].AccelY, &Samples[i].AccelZ); UnpackSensor(buffer + 16 + 16 * i, &Samples[i].GyroX, &Samples[i].GyroY, &Samples[i].GyroZ); @@ -152,210 +115,6 @@ struct TrackerMessage TrackerSensors Sensors; }; -bool DecodeTrackerMessage(TrackerMessage* message, UByte* buffer, int size) -{ - memset(message, 0, sizeof(TrackerMessage)); - - if (size < 4) - { - message->Type = TrackerMessage_SizeError; - return false; - } - - switch (buffer[0]) - { - case TrackerMessage_Sensors: - message->Type = message->Sensors.Decode(buffer, size); - break; - - default: - message->Type = TrackerMessage_Unknown; - break; - } - - return (message->Type < TrackerMessage_Unknown) && (message->Type != TrackerMessage_None); -} - - -// ***** SensorRangeImpl Implementation - -// Sensor HW only accepts specific maximum range values, used to maximize -// the 16-bit sensor outputs. Use these ramps to specify and report appropriate values. -static const UInt16 AccelRangeRamp[] = { 2, 4, 8, 16 }; -static const UInt16 GyroRangeRamp[] = { 250, 500, 1000, 2000 }; -static const UInt16 MagRangeRamp[] = { 880, 1300, 1900, 2500 }; - -static UInt16 SelectSensorRampValue(const UInt16* ramp, unsigned count, - float val, float factor, const char* label) -{ - UInt16 threshold = (UInt16)(val * factor); - - for (unsigned i = 0; i<count; i++) - { - if (ramp[i] >= threshold) - return ramp[i]; - } - OVR_DEBUG_LOG(("SensorDevice::SetRange - %s clamped to %0.4f", - label, float(ramp[count-1]) / factor)); - OVR_UNUSED2(factor, label); - return ramp[count-1]; -} - -// SensorScaleImpl provides buffer packing logic for the Sensor Range -// record that can be applied to DK1 sensor through Get/SetFeature. We expose this -// through SensorRange class, which has different units. -struct SensorRangeImpl -{ - enum { PacketSize = 8 }; - UByte Buffer[PacketSize]; - - UInt16 CommandId; - UInt16 AccelScale; - UInt16 GyroScale; - UInt16 MagScale; - - SensorRangeImpl(const SensorRange& r, UInt16 commandId = 0) - { - SetSensorRange(r, commandId); - } - - void SetSensorRange(const SensorRange& r, UInt16 commandId = 0) - { - CommandId = commandId; - AccelScale = SelectSensorRampValue(AccelRangeRamp, sizeof(AccelRangeRamp)/sizeof(AccelRangeRamp[0]), - r.MaxAcceleration, (1.0f / 9.81f), "MaxAcceleration"); - GyroScale = SelectSensorRampValue(GyroRangeRamp, sizeof(GyroRangeRamp)/sizeof(GyroRangeRamp[0]), - r.MaxRotationRate, Math<float>::RadToDegreeFactor, "MaxRotationRate"); - MagScale = SelectSensorRampValue(MagRangeRamp, sizeof(MagRangeRamp)/sizeof(MagRangeRamp[0]), - r.MaxMagneticField, 1000.0f, "MaxMagneticField"); - Pack(); - } - - void GetSensorRange(SensorRange* r) - { - r->MaxAcceleration = AccelScale * 9.81f; - r->MaxRotationRate = DegreeToRad((float)GyroScale); - r->MaxMagneticField= MagScale * 0.001f; - } - - static SensorRange GetMaxSensorRange() - { - return SensorRange(AccelRangeRamp[sizeof(AccelRangeRamp)/sizeof(AccelRangeRamp[0]) - 1] * 9.81f, - GyroRangeRamp[sizeof(GyroRangeRamp)/sizeof(GyroRangeRamp[0]) - 1] * - Math<float>::DegreeToRadFactor, - MagRangeRamp[sizeof(MagRangeRamp)/sizeof(MagRangeRamp[0]) - 1] * 0.001f); - } - - void Pack() - { - Buffer[0] = 4; - Buffer[1] = UByte(CommandId & 0xFF); - Buffer[2] = UByte(CommandId >> 8); - Buffer[3] = UByte(AccelScale); - Buffer[4] = UByte(GyroScale & 0xFF); - Buffer[5] = UByte(GyroScale >> 8); - Buffer[6] = UByte(MagScale & 0xFF); - Buffer[7] = UByte(MagScale >> 8); - } - - void Unpack() - { - CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); - AccelScale= Buffer[3]; - GyroScale = Buffer[4] | (UInt16(Buffer[5]) << 8); - MagScale = Buffer[6] | (UInt16(Buffer[7]) << 8); - } -}; - - -// Sensor configuration command, ReportId == 2. - -struct SensorConfigImpl -{ - enum { PacketSize = 7 }; - UByte Buffer[PacketSize]; - - // Flag values for Flags. - enum { - Flag_RawMode = 0x01, - Flag_CallibrationTest = 0x02, // Internal test mode - Flag_UseCallibration = 0x04, - Flag_AutoCallibration = 0x08, - Flag_MotionKeepAlive = 0x10, - Flag_CommandKeepAlive = 0x20, - Flag_SensorCoordinates = 0x40 - }; - - UInt16 CommandId; - UByte Flags; - UInt16 PacketInterval; - UInt16 KeepAliveIntervalMs; - - SensorConfigImpl() : CommandId(0), Flags(0), PacketInterval(0), KeepAliveIntervalMs(0) - { - memset(Buffer, 0, PacketSize); - Buffer[0] = 2; - } - - void SetSensorCoordinates(bool sensorCoordinates) - { Flags = (Flags & ~Flag_SensorCoordinates) | (sensorCoordinates ? Flag_SensorCoordinates : 0); } - bool IsUsingSensorCoordinates() const - { return (Flags & Flag_SensorCoordinates) != 0; } - - void Pack() - { - Buffer[0] = 2; - Buffer[1] = UByte(CommandId & 0xFF); - Buffer[2] = UByte(CommandId >> 8); - Buffer[3] = Flags; - Buffer[4] = UByte(PacketInterval); - Buffer[5] = UByte(KeepAliveIntervalMs & 0xFF); - Buffer[6] = UByte(KeepAliveIntervalMs >> 8); - } - - void Unpack() - { - CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); - Flags = Buffer[3]; - PacketInterval = Buffer[4]; - KeepAliveIntervalMs= Buffer[5] | (UInt16(Buffer[6]) << 8); - } - -}; - - -// SensorKeepAlive - feature report that needs to be sent at regular intervals for sensor -// to receive commands. -struct SensorKeepAliveImpl -{ - enum { PacketSize = 5 }; - UByte Buffer[PacketSize]; - - UInt16 CommandId; - UInt16 KeepAliveIntervalMs; - - SensorKeepAliveImpl(UInt16 interval = 0, UInt16 commandId = 0) - : CommandId(commandId), KeepAliveIntervalMs(interval) - { - Pack(); - } - - void Pack() - { - Buffer[0] = 8; - Buffer[1] = UByte(CommandId & 0xFF); - Buffer[2] = UByte(CommandId >> 8); - Buffer[3] = UByte(KeepAliveIntervalMs & 0xFF); - Buffer[4] = UByte(KeepAliveIntervalMs >> 8); - } - - void Unpack() - { - CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); - KeepAliveIntervalMs= Buffer[3] | (UInt16(Buffer[4]) << 8); - } -}; - //------------------------------------------------------------------------------------- // ***** SensorDisplayInfoImpl @@ -368,22 +127,39 @@ SensorDisplayInfoImpl::SensorDisplayInfoImpl() void SensorDisplayInfoImpl::Unpack() { - CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); - DistortionType = Buffer[3]; - HResolution = DecodeUInt16(Buffer+4); - VResolution = DecodeUInt16(Buffer+6); - HScreenSize = DecodeUInt32(Buffer+8) * (1/1000000.f); - VScreenSize = DecodeUInt32(Buffer+12) * (1/1000000.f); - VCenter = DecodeUInt32(Buffer+16) * (1/1000000.f); - LensSeparation = DecodeUInt32(Buffer+20) * (1/1000000.f); - EyeToScreenDistance[0] = DecodeUInt32(Buffer+24) * (1/1000000.f); - EyeToScreenDistance[1] = DecodeUInt32(Buffer+28) * (1/1000000.f); - DistortionK[0] = DecodeFloat(Buffer+32); - DistortionK[1] = DecodeFloat(Buffer+36); - DistortionK[2] = DecodeFloat(Buffer+40); - DistortionK[3] = DecodeFloat(Buffer+44); - DistortionK[4] = DecodeFloat(Buffer+48); - DistortionK[5] = DecodeFloat(Buffer+52); + CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); + DistortionType = Buffer[3]; + HResolution = DecodeUInt16(Buffer+4); + VResolution = DecodeUInt16(Buffer+6); + HScreenSize = DecodeUInt32(Buffer+8) * (1/1000000.f); + VScreenSize = DecodeUInt32(Buffer+12) * (1/1000000.f); + VCenter = DecodeUInt32(Buffer+16) * (1/1000000.f); + LensSeparation = DecodeUInt32(Buffer+20) * (1/1000000.f); + +#if 0 + // These are not well-measured on most devices - probably best to ignore them. + OutsideLensSurfaceToScreen[0] = DecodeUInt32(Buffer+24) * (1/1000000.f); + OutsideLensSurfaceToScreen[1] = DecodeUInt32(Buffer+28) * (1/1000000.f); + // TODO: add spline-based distortion. + // TODO: currently these values are all zeros in the HMD itself. + DistortionK[0] = DecodeFloat(Buffer+32); + DistortionK[1] = DecodeFloat(Buffer+36); + DistortionK[2] = DecodeFloat(Buffer+40); + DistortionK[3] = DecodeFloat(Buffer+44); + DistortionK[4] = DecodeFloat(Buffer+48); + DistortionK[5] = DecodeFloat(Buffer+52); +#else + // The above are either measured poorly, or don't have values at all. + // To remove the temptation to use them, set them to junk. + OutsideLensSurfaceToScreen[0] = -1.0f; + OutsideLensSurfaceToScreen[1] = -1.0f; + DistortionK[0] = -1.0f; + DistortionK[1] = -1.0f; + DistortionK[2] = -1.0f; + DistortionK[3] = -1.0f; + DistortionK[4] = -1.0f; + DistortionK[5] = -1.0f; +#endif } @@ -456,10 +232,9 @@ void SensorDeviceFactory::EnumerateDevices(EnumerateVisitor& visitor) bool SensorDeviceFactory::MatchVendorProduct(UInt16 vendorId, UInt16 productId) const { - // search for a tracker sensor or a tracker boot loader device - return ((vendorId == Sensor_VendorId) && (productId == Sensor_ProductId)) || - ((vendorId == Sensor_OldVendorId) && (productId == Sensor_OldProductId)) || - ((vendorId == Sensor_VendorId) && (productId == Sensor_BootLoader)); + return ((vendorId == Sensor_VendorId) && (productId == Sensor_Tracker_ProductId)) || + ((vendorId == Sensor_VendorId) && (productId == Sensor_Tracker2_ProductId)) || + ((vendorId == Sensor_VendorId) && (productId == Sensor_KTracker_ProductId)); } bool SensorDeviceFactory::DetectHIDDevice(DeviceManager* pdevMgr, const HIDDeviceDesc& desc) @@ -488,6 +263,11 @@ bool SensorDeviceFactory::DetectHIDDevice(DeviceManager* pdevMgr, const HIDDevic DeviceBase* SensorDeviceCreateDesc::NewDeviceInstance() { + if (HIDDesc.ProductId == Sensor_Tracker2_ProductId) + { + return new Sensor2DeviceImpl(this); + } + return new SensorDeviceImpl(this); } @@ -497,18 +277,18 @@ bool SensorDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const (info->InfoClassType != Device_None)) return false; - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, HIDDesc.Product.ToCStr()); - OVR_strcpy(info->Manufacturer, DeviceInfo::MaxNameLength, HIDDesc.Manufacturer.ToCStr()); - info->Type = Device_Sensor; + info->Type = Device_Sensor; + info->ProductName = HIDDesc.Product; + info->Manufacturer = HIDDesc.Manufacturer; + info->Version = HIDDesc.VersionNumber; if (info->InfoClassType == Device_Sensor) { SensorInfo* sinfo = (SensorInfo*)info; sinfo->VendorId = HIDDesc.VendorId; sinfo->ProductId = HIDDesc.ProductId; - sinfo->Version = HIDDesc.VersionNumber; sinfo->MaxRanges = SensorRangeImpl::GetMaxSensorRange(); - OVR_strcpy(sinfo->SerialNumber, sizeof(sinfo->SerialNumber),HIDDesc.SerialNumber.ToCStr()); + sinfo->SerialNumber = HIDDesc.SerialNumber; } return true; } @@ -520,14 +300,22 @@ SensorDeviceImpl::SensorDeviceImpl(SensorDeviceCreateDesc* createDesc) : OVR::HIDDeviceImpl<OVR::SensorDevice>(createDesc, 0), Coordinates(SensorDevice::Coord_Sensor), HWCoordinates(SensorDevice::Coord_HMD), // HW reports HMD coordinates by default. - NextKeepAliveTicks(0), - MaxValidRange(SensorRangeImpl::GetMaxSensorRange()) + NextKeepAliveTickSeconds(0), + FullTimestamp(0), + MaxValidRange(SensorRangeImpl::GetMaxSensorRange()), + magCalibrated(false) { - SequenceValid = false; - LastSampleCount= 0; + SequenceValid = false; + LastSampleCount = 0; LastTimestamp = 0; OldCommandId = 0; + + PrevAbsoluteTime = 0.0; + +#ifdef OVR_OS_ANDROID + pPhoneSensors = PhoneSensors::Create(); +#endif } SensorDeviceImpl::~SensorDeviceImpl() @@ -536,15 +324,13 @@ SensorDeviceImpl::~SensorDeviceImpl() OVR_ASSERT(!pCreateDesc->pDevice); } + // Internal creation APIs. bool SensorDeviceImpl::Initialize(DeviceBase* parent) { if (HIDDeviceImpl<OVR::SensorDevice>::Initialize(parent)) { openDevice(); - - LogText("OVR::SensorDevice initialized.\n"); - return true; } @@ -566,6 +352,17 @@ void SensorDeviceImpl::openDevice() setRange(CurrentRange); } + // Read the currently configured calibration from sensor. + SensorFactoryCalibrationImpl sc; + if (GetInternalDevice()->GetFeatureReport(sc.Buffer, SensorFactoryCalibrationImpl::PacketSize)) + { + sc.Unpack(); + AccelCalibrationOffset = sc.AccelOffset; + GyroCalibrationOffset = sc.GyroOffset; + AccelCalibrationMatrix = sc.AccelMatrix; + GyroCalibrationMatrix = sc.GyroMatrix; + CalibrationTemperature = sc.Temperature; + } // If the sensor has "DisplayInfo" data, use HMD coordinate frame by default. SensorDisplayInfoImpl displayInfo; @@ -583,12 +380,21 @@ void SensorDeviceImpl::openDevice() // Set Keep-alive at 10 seconds. SensorKeepAliveImpl skeepAlive(10 * 1000); GetInternalDevice()->SetFeatureReport(skeepAlive.Buffer, SensorKeepAliveImpl::PacketSize); + + // Load mag calibration + MagCalibrationReport report; + bool res = GetMagCalibrationReport(&report); + if (res && report.Version > 0) + { + magCalibration = report.Calibration; + magCalibrated = true; + } } void SensorDeviceImpl::closeDeviceOnError() { LogText("OVR::SensorDevice - Lost connection to '%s'\n", getHIDDesc()->Path.ToCStr()); - NextKeepAliveTicks = 0; + NextKeepAliveTickSeconds = 0; } void SensorDeviceImpl::Shutdown() @@ -598,16 +404,14 @@ void SensorDeviceImpl::Shutdown() LogText("OVR::SensorDevice - Closed '%s'\n", getHIDDesc()->Path.ToCStr()); } - void SensorDeviceImpl::OnInputReport(UByte* pData, UInt32 length) { - bool processed = false; + bool processed = false; if (!processed) { - TrackerMessage message; - if (DecodeTrackerMessage(&message, pData, length)) + if (decodeTrackerMessage(&message, pData, length)) { processed = true; onTrackerMessage(&message); @@ -615,13 +419,12 @@ void SensorDeviceImpl::OnInputReport(UByte* pData, UInt32 length) } } -UInt64 SensorDeviceImpl::OnTicks(UInt64 ticksMks) +double SensorDeviceImpl::OnTicks(double tickSeconds) { - - if (ticksMks >= NextKeepAliveTicks) + if (tickSeconds >= NextKeepAliveTickSeconds) { // Use 3-seconds keep alive by default. - UInt64 keepAliveDelta = Timer::MksPerSecond * 3; + double keepAliveDelta = 3.0; // Set Keep-alive at 10 seconds. SensorKeepAliveImpl skeepAlive(10 * 1000); @@ -629,9 +432,9 @@ UInt64 SensorDeviceImpl::OnTicks(UInt64 ticksMks) GetInternalDevice()->SetFeatureReport(skeepAlive.Buffer, SensorKeepAliveImpl::PacketSize); // Emit keep-alive every few seconds. - NextKeepAliveTicks = ticksMks + keepAliveDelta; + NextKeepAliveTickSeconds = tickSeconds + keepAliveDelta; } - return NextKeepAliveTicks - ticksMks; + return NextKeepAliveTickSeconds - tickSeconds; } bool SensorDeviceImpl::SetRange(const SensorRange& range, bool waitFlag) @@ -758,17 +561,49 @@ Void SensorDeviceImpl::setReportRate(unsigned rateHz) return 0; } -void SensorDeviceImpl::SetMessageHandler(MessageHandler* handler) +void SensorDeviceImpl::GetFactoryCalibration(Vector3f* AccelOffset, Vector3f* GyroOffset, + Matrix4f* AccelMatrix, Matrix4f* GyroMatrix, + float* Temperature) { - if (handler) + *AccelOffset = AccelCalibrationOffset; + *GyroOffset = GyroCalibrationOffset; + *AccelMatrix = AccelCalibrationMatrix; + *GyroMatrix = GyroCalibrationMatrix; + *Temperature = CalibrationTemperature; +} + +void SensorDeviceImpl::SetOnboardCalibrationEnabled(bool enabled) +{ + // Push call with wait. + GetManagerImpl()->GetThreadQueue()-> + PushCall(this, &SensorDeviceImpl::setOnboardCalibrationEnabled, enabled, true); +} + +Void SensorDeviceImpl::setOnboardCalibrationEnabled(bool enabled) +{ + // Read the original configuration + SensorConfigImpl scfg; + if (GetInternalDevice()->GetFeatureReport(scfg.Buffer, SensorConfigImpl::PacketSize)) { - SequenceValid = false; - DeviceBase::SetMessageHandler(handler); + scfg.Unpack(); } + + if (enabled) + scfg.Flags |= (SensorConfigImpl::Flag_AutoCalibration | SensorConfigImpl::Flag_UseCalibration); else - { - DeviceBase::SetMessageHandler(handler); - } + scfg.Flags &= ~(SensorConfigImpl::Flag_AutoCalibration | SensorConfigImpl::Flag_UseCalibration); + + scfg.Pack(); + + GetInternalDevice()->SetFeatureReport(scfg.Buffer, SensorConfigImpl::PacketSize); + return 0; +} + +void SensorDeviceImpl::AddMessageHandler(MessageHandler* handler) +{ + if (handler) + SequenceValid = false; + DeviceBase::AddMessageHandler(handler); } // Sensor reports data in the following coordinate system: @@ -793,20 +628,18 @@ Vector3f AccelFromBodyFrameUpdate(const TrackerSensors& update, UByte sampleNumb Vector3f MagFromBodyFrameUpdate(const TrackerSensors& update, + Matrix4f magCalibration, bool convertHMDToSensor = false) { + float mx = (float)update.MagX; + float my = (float)update.MagY; + float mz = (float)update.MagZ; // Note: Y and Z are swapped in comparison to the Accel. // This accounts for DK1 sensor firmware axis swap, which should be undone in future releases. - if (!convertHMDToSensor) - { - return Vector3f( (float)update.MagX, - (float)update.MagZ, - (float)update.MagY) * 0.0001f; - } - - return Vector3f( (float)update.MagX, - (float)update.MagY, - -(float)update.MagZ) * 0.0001f; + Vector3f mag = convertHMDToSensor ? Vector3f(mx, my, -mz) : Vector3f(mx, mz, my); + mag *= 0.0001f; + // Apply calibration + return magCalibration.Transform(mag); } Vector3f EulerFromBodyFrameUpdate(const TrackerSensors& update, UByte sampleNumber, @@ -821,42 +654,86 @@ Vector3f EulerFromBodyFrameUpdate(const TrackerSensors& update, UByte sampleNumb return val * 0.0001f; } +bool SensorDeviceImpl::decodeTrackerMessage(TrackerMessage* message, UByte* buffer, int size) +{ + memset(message, 0, sizeof(TrackerMessage)); + + if (size < 4) + { + message->Type = TrackerMessage_SizeError; + return false; + } + + switch (buffer[0]) + { + case TrackerMessage_Sensors: + message->Type = message->Sensors.Decode(buffer, size); + break; + + default: + message->Type = TrackerMessage_Unknown; + break; + } + + return (message->Type < TrackerMessage_Unknown) && (message->Type != TrackerMessage_None); +} void SensorDeviceImpl::onTrackerMessage(TrackerMessage* message) { if (message->Type != TrackerMessage_Sensors) return; - const float timeUnit = (1.0f / 1000.f); - TrackerSensors& s = message->Sensors; + const double timeUnit = (1.0 / 1000.0); + double scaledTimeUnit = timeUnit; + TrackerSensors& s = message->Sensors; + // DK1 timestamps the first sample, so the actual device time will be later + // by the time we get the message if there are multiple samples. + int timestampAdjust = (s.SampleCount > 0) ? s.SampleCount-1 : 0; + + const double now = Timer::GetSeconds(); + double absoluteTimeSeconds = 0.0; - // Call OnMessage() within a lock to avoid conflicts with handlers. - Lock::Locker scopeLock(HandlerRef.GetLock()); - - if (SequenceValid) { unsigned timestampDelta; if (s.Timestamp < LastTimestamp) + { + // The timestamp rolled around the 16 bit counter, so FullTimeStamp + // needs a high word increment. + FullTimestamp += 0x10000; timestampDelta = ((((int)s.Timestamp) + 0x10000) - (int)LastTimestamp); + } else + { timestampDelta = (s.Timestamp - LastTimestamp); + } + // Update the low word of FullTimeStamp + FullTimestamp = ( FullTimestamp & ~0xffff ) | s.Timestamp; - // If we missed a small number of samples, replicate the last sample. + double deviceTime = (FullTimestamp + timestampAdjust) * timeUnit; + absoluteTimeSeconds = TimeFilter.SampleToSystemTime(deviceTime, now, PrevAbsoluteTime); + scaledTimeUnit = TimeFilter.ScaleTimeUnit(timeUnit); + PrevAbsoluteTime = absoluteTimeSeconds; + + // If we missed a small number of samples, generate the sample that would have immediately + // proceeded the current one. Re-use the IMU values from the last processed sample. if ((timestampDelta > LastSampleCount) && (timestampDelta <= 254)) { - if (HandlerRef.GetHandler()) + if (HandlerRef.HasHandlers()) { MessageBodyFrame sensors(this); - sensors.TimeDelta = (timestampDelta - LastSampleCount) * timeUnit; - sensors.Acceleration = LastAcceleration; - sensors.RotationRate = LastRotationRate; - sensors.MagneticField = LastMagneticField; - sensors.Temperature = LastTemperature; - HandlerRef.GetHandler()->OnMessage(sensors); + sensors.AbsoluteTimeSeconds = absoluteTimeSeconds - s.SampleCount * scaledTimeUnit; + sensors.TimeDelta = (float)((timestampDelta - LastSampleCount) * scaledTimeUnit); + sensors.Acceleration = LastAcceleration; + sensors.RotationRate = LastRotationRate; + sensors.MagneticField = LastMagneticField; + sensors.Temperature = LastTemperature; + sensors.MagCalibrated = magCalibrated; + + HandlerRef.Call(sensors); } } } @@ -867,37 +744,58 @@ void SensorDeviceImpl::onTrackerMessage(TrackerMessage* message) LastMagneticField= Vector3f(0); LastTemperature = 0; SequenceValid = true; + + // This is our baseline sensor to host time delta, + // it will be adjusted with each new message. + FullTimestamp = s.Timestamp; + + double deviceTime = (FullTimestamp + timestampAdjust) * timeUnit; + absoluteTimeSeconds = TimeFilter.SampleToSystemTime(deviceTime, now, PrevAbsoluteTime); + scaledTimeUnit = TimeFilter.ScaleTimeUnit(timeUnit); + PrevAbsoluteTime = absoluteTimeSeconds; } LastSampleCount = s.SampleCount; LastTimestamp = s.Timestamp; bool convertHMDToSensor = (Coordinates == Coord_Sensor) && (HWCoordinates == Coord_HMD); - - if (HandlerRef.GetHandler()) + +#ifdef OVR_OS_ANDROID + // LDC - Normally we get the coordinate system from the tracker. + // Since KTracker doesn't store it we'll always assume HMD coordinate system. + convertHMDToSensor = false; +#endif + + if (HandlerRef.HasHandlers()) { - MessageBodyFrame sensors(this); + MessageBodyFrame sensors(this); + sensors.MagCalibrated = magCalibrated; UByte iterations = s.SampleCount; if (s.SampleCount > 3) { iterations = 3; - sensors.TimeDelta = (s.SampleCount - 2) * timeUnit; + sensors.TimeDelta = (float)((s.SampleCount - 2) * scaledTimeUnit); } else { - sensors.TimeDelta = timeUnit; + sensors.TimeDelta = (float)scaledTimeUnit; } for (UByte i = 0; i < iterations; i++) - { - sensors.Acceleration = AccelFromBodyFrameUpdate(s, i, convertHMDToSensor); - sensors.RotationRate = EulerFromBodyFrameUpdate(s, i, convertHMDToSensor); - sensors.MagneticField= MagFromBodyFrameUpdate(s, convertHMDToSensor); - sensors.Temperature = s.Temperature * 0.01f; - HandlerRef.GetHandler()->OnMessage(sensors); + { + sensors.AbsoluteTimeSeconds = absoluteTimeSeconds - ( iterations - 1 - i ) * scaledTimeUnit; + sensors.Acceleration = AccelFromBodyFrameUpdate(s, i, convertHMDToSensor); + sensors.RotationRate = EulerFromBodyFrameUpdate(s, i, convertHMDToSensor); + sensors.MagneticField = MagFromBodyFrameUpdate(s, magCalibration, convertHMDToSensor); + +#ifdef OVR_OS_ANDROID + replaceWithPhoneMag(&(sensors.MagneticField)); +#endif + sensors.Temperature = s.Temperature * 0.01f; + HandlerRef.Call(sensors); // TimeDelta for the last two sample is always fixed. - sensors.TimeDelta = timeUnit; + sensors.TimeDelta = (float)scaledTimeUnit; } LastAcceleration = sensors.Acceleration; @@ -910,11 +808,309 @@ void SensorDeviceImpl::onTrackerMessage(TrackerMessage* message) UByte i = (s.SampleCount > 3) ? 2 : (s.SampleCount - 1); LastAcceleration = AccelFromBodyFrameUpdate(s, i, convertHMDToSensor); LastRotationRate = EulerFromBodyFrameUpdate(s, i, convertHMDToSensor); - LastMagneticField = MagFromBodyFrameUpdate(s, convertHMDToSensor); + LastMagneticField = MagFromBodyFrameUpdate(s, magCalibration, convertHMDToSensor); + +#ifdef OVR_OS_ANDROID + replaceWithPhoneMag(&LastMagneticField); +#endif LastTemperature = s.Temperature * 0.01f; } } + +#ifdef OVR_OS_ANDROID + +void SensorDeviceImpl::replaceWithPhoneMag(Vector3f* val) +{ + + // Native calibrated. + pPhoneSensors->SetMagSource(PhoneSensors::MagnetometerSource_Native); + + Vector3f magPhone; + pPhoneSensors->GetLatestMagValue(&magPhone); + + // Phone value is in micro-Tesla. Convert it to Gauss and flip axes. + magPhone *= 10000.0f/1000000.0f; + + Vector3f res; + res.x = -magPhone.y; + res.y = magPhone.x; + res.z = magPhone.z; + + *val = res; +} +#endif + +const int MAX_DEVICE_PROFILE_MAJOR_VERSION = 1; + +// Writes the current calibration for a particular device to a device profile file +bool SensorDeviceImpl::SetMagCalibrationReport(const MagCalibrationReport &data) +{ + // Get device info + SensorInfo sinfo; + GetDeviceInfo(&sinfo); + + // A named calibration may be specified for calibration in different + // environments, otherwise the default calibration is used + const char* calibrationName = "default"; + + // Generate a mag calibration event + JSON* calibration = JSON::CreateObject(); + // (hardcoded for now) the measurement and representation method + calibration->AddStringItem("Version", "2.0"); + calibration->AddStringItem("Name", "default"); + + // time stamp the calibration + char time_str[64]; + +#ifdef OVR_OS_WIN32 + struct tm caltime; + time_t now = time(0); + localtime_s(&caltime, &now); + strftime(time_str, 64, "%Y-%m-%d %H:%M:%S", &caltime); +#else + struct tm* caltime; + time_t now = time(0); + caltime = localtime(&now); + strftime(time_str, 64, "%Y-%m-%d %H:%M:%S", caltime); +#endif + + calibration->AddStringItem("Time", time_str); + + // write the full calibration matrix + char matrix[256]; + data.Calibration.ToString(matrix, 256); + calibration->AddStringItem("CalibrationMatrix", matrix); + // save just the offset, for backwards compatibility + // this can be removed when we don't want to support 0.2.4 anymore + Vector3f center(data.Calibration.M[0][3], data.Calibration.M[1][3], data.Calibration.M[2][3]); + Matrix4f tmp = data.Calibration; tmp.M[0][3] = tmp.M[1][3] = tmp.M[2][3] = 0; tmp.M[3][3] = 1; + center = tmp.Inverted().Transform(center); + Matrix4f oldcalmat; oldcalmat.M[0][3] = center.x; oldcalmat.M[1][3] = center.y; oldcalmat.M[2][3] = center.z; + oldcalmat.ToString(matrix, 256); + calibration->AddStringItem("Calibration", matrix); + + String path = GetBaseOVRPath(true); + path += "/Devices.json"; + + // Look for a preexisting device file to edit + Ptr<JSON> root = *JSON::Load(path); + if (root) + { // Quick sanity check of the file type and format before we parse it + JSON* version = root->GetFirstItem(); + if (version && version->Name == "Oculus Device Profile Version") + { + int major = atoi(version->Value.ToCStr()); + if (major > MAX_DEVICE_PROFILE_MAJOR_VERSION) + { + // don't use the file on unsupported major version number + root->Release(); + root = NULL; + } + } + else + { + root->Release(); + root = NULL; + } + } + + JSON* device = NULL; + if (root) + { + device = root->GetFirstItem(); // skip the header + device = root->GetNextItem(device); + while (device) + { // Search for a previous calibration with the same name for this device + // and remove it before adding the new one + if (device->Name == "Device") + { + JSON* item = device->GetItemByName("Serial"); + if (item && item->Value == sinfo.SerialNumber) + { // found an entry for this device + item = device->GetNextItem(item); + while (item) + { + if (item->Name == "MagCalibration") + { + JSON* name = item->GetItemByName("Name"); + if (name && name->Value == calibrationName) + { // found a calibration of the same name + item->RemoveNode(); + item->Release(); + break; + } + } + item = device->GetNextItem(item); + } + + + /* + this is removed temporarily, since this is a sensor fusion setting, not sensor itself + should be moved to the correct place when Brant has finished the user profile implementation + // update the auto-mag flag + item = device->GetItemByName("EnableYawCorrection"); + if (item) + item->dValue = (double)EnableYawCorrection; + else + device->AddBoolItem("EnableYawCorrection", EnableYawCorrection);*/ + + break; + } + } + + device = root->GetNextItem(device); + } + } + else + { // Create a new device root + root = *JSON::CreateObject(); + root->AddStringItem("Oculus Device Profile Version", "1.0"); + } + + if (device == NULL) + { + device = JSON::CreateObject(); + device->AddStringItem("Product", sinfo.ProductName); + device->AddNumberItem("ProductID", sinfo.ProductId); + device->AddStringItem("Serial", sinfo.SerialNumber); + // removed temporarily, see above + //device->AddBoolItem("EnableYawCorrection", EnableYawCorrection); + + root->AddItem("Device", device); + } + + // Create and the add the new calibration event to the device + device->AddItem("MagCalibration", calibration); + return root->Save(path); +} + +// Loads a saved calibration for the specified device from the device profile file +bool SensorDeviceImpl::GetMagCalibrationReport(MagCalibrationReport* data) +{ + data->Version = 0; + data->Calibration.SetIdentity(); + + // Get device info + SensorInfo sinfo; + GetDeviceInfo(&sinfo); + + // A named calibration may be specified for calibration in different + // environments, otherwise the default calibration is used + const char* calibrationName = "default"; + + String path = GetBaseOVRPath(true); + path += "/Devices.json"; + + // Load the device profiles + Ptr<JSON> root = *JSON::Load(path); + if (root == NULL) + return false; + + // Quick sanity check of the file type and format before we parse it + JSON* version = root->GetFirstItem(); + if (version && version->Name == "Oculus Device Profile Version") + { + int major = atoi(version->Value.ToCStr()); + if (major > MAX_DEVICE_PROFILE_MAJOR_VERSION) + return false; // don't parse the file on unsupported major version number + } + else + { + return false; + } + + JSON* device = root->GetNextItem(version); + while (device) + { // Search for a previous calibration with the same name for this device + // and remove it before adding the new one + if (device->Name == "Device") + { + JSON* item = device->GetItemByName("Serial"); + if (item && item->Value == sinfo.SerialNumber) + { // found an entry for this device + + JSON* autoyaw = device->GetItemByName("EnableYawCorrection"); + // as a temporary HACK, return no calibration if EnableYawCorrection is off + // this will force disable yaw correction in SensorFusion + // proper solution would load the value in the Profile, which SensorFusion can access + if (autoyaw && autoyaw->dValue == 0) + return true; + + item = device->GetNextItem(item); + while (item) + { + if (item->Name == "MagCalibration") + { + JSON* calibration = item; + JSON* name = calibration->GetItemByName("Name"); + if (name && name->Value == calibrationName) + { // found a calibration with this name + + int major = 0; + JSON* version = calibration->GetItemByName("Version"); + if (version) + major = atoi(version->Value.ToCStr()); + + if (major > data->Version && major <= 2) + { + time_t now; + time(&now); + + // parse the calibration time + time_t calibration_time = now; + JSON* caltime = calibration->GetItemByName("Time"); + if (caltime) + { + const char* caltime_str = caltime->Value.ToCStr(); + + tm ct; + memset(&ct, 0, sizeof(tm)); + +#ifdef OVR_OS_WIN32 + struct tm nowtime; + localtime_s(&nowtime, &now); + ct.tm_isdst = nowtime.tm_isdst; + sscanf_s(caltime_str, "%d-%d-%d %d:%d:%d", + &ct.tm_year, &ct.tm_mon, &ct.tm_mday, + &ct.tm_hour, &ct.tm_min, &ct.tm_sec); +#else + struct tm* nowtime = localtime(&now); + ct.tm_isdst = nowtime->tm_isdst; + sscanf(caltime_str, "%d-%d-%d %d:%d:%d", + &ct.tm_year, &ct.tm_mon, &ct.tm_mday, + &ct.tm_hour, &ct.tm_min, &ct.tm_sec); +#endif + ct.tm_year -= 1900; + ct.tm_mon--; + calibration_time = mktime(&ct); + } + + // parse the calibration matrix + JSON* cal = calibration->GetItemByName("CalibrationMatrix"); + if (!cal) + cal = calibration->GetItemByName("Calibration"); + if (cal) + { + data->Calibration = Matrix4f::FromString(cal->Value.ToCStr()); + data->Version = (UByte)major; + } + } + } + } + item = device->GetNextItem(item); + } + + return true; + } + } + + device = root->GetNextItem(device); + } + + return true; +} + } // namespace OVR diff --git a/LibOVR/Src/OVR_SensorImpl.h b/LibOVR/Src/OVR_SensorImpl.h index 8b9eefb..70e05f8 100644 --- a/LibOVR/Src/OVR_SensorImpl.h +++ b/LibOVR/Src/OVR_SensorImpl.h @@ -5,16 +5,16 @@ Content : Sensor device specific implementation. Created : March 7, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -28,9 +28,14 @@ limitations under the License. #define OVR_SensorImpl_h #include "OVR_HIDDeviceImpl.h" +#include "OVR_SensorTimeFilter.h" + +#ifdef OVR_OS_ANDROID +#include "OVR_PhoneSensors.h" +#endif namespace OVR { - + struct TrackerMessage; class ExternalVisitor; @@ -150,18 +155,23 @@ struct SensorDisplayInfoImpl { Mask_BaseFmt = 0x0f, Mask_OptionFmts = 0xf0, - Base_None = 0x00, - Base_Screen = 0x01, - Base_Distortion = 0x02, + Base_None = 0, + Base_ScreenOnly = 1, + Base_Distortion = 2, }; UInt16 CommandId; + UByte DistortionType; UInt16 HResolution, VResolution; float HScreenSize, VScreenSize; float VCenter; float LensSeparation; - float EyeToScreenDistance[2]; + // Currently these values are not well-measured. + float OutsideLensSurfaceToScreen[2]; + // TODO: add DistortionEqn + // TODO: currently these values are all zeros and the + // distortion is hard-coded in HMDDeviceCreateDesc::GetDeviceInfo() float DistortionK[6]; SensorDisplayInfoImpl(); @@ -169,7 +179,6 @@ struct SensorDisplayInfoImpl void Unpack(); }; - //------------------------------------------------------------------------------------- // ***** OVR::SensorDeviceImpl @@ -186,11 +195,11 @@ public: virtual bool Initialize(DeviceBase* parent); virtual void Shutdown(); - virtual void SetMessageHandler(MessageHandler* handler); + virtual void AddMessageHandler(MessageHandler* handler); // HIDDevice::Notifier interface. virtual void OnInputReport(UByte* pData, UInt32 length); - virtual UInt64 OnTicks(UInt64 ticksMks); + virtual double OnTicks(double tickSeconds); // HMD-Mounted sensor has a different coordinate frame. virtual void SetCoordinateFrame(CoordinateFrame coordframe); @@ -200,6 +209,11 @@ public: virtual bool SetRange(const SensorRange& range, bool waitFlag); virtual void GetRange(SensorRange* range) const; + virtual void GetFactoryCalibration(Vector3f* AccelOffset, Vector3f* GyroOffset, + Matrix4f* AccelMatrix, Matrix4f* GyroMatrix, + float* Temperature); + virtual void SetOnboardCalibrationEnabled(bool enabled); + // Sets report rate (in Hz) of MessageBodyFrame messages (delivered through MessageHandler::OnMessage call). // Currently supported maximum rate is 1000Hz. If the rate is set to 500 or 333 Hz then OnMessage will be // called twice or thrice at the same 'tick'. @@ -213,20 +227,28 @@ public: virtual unsigned GetReportRate() const; // Hack to create HMD device from sensor display info. - static void EnumerateHMDFromSensorDisplayInfo(const SensorDisplayInfoImpl& displayInfo, - DeviceFactory::EnumerateVisitor& visitor); + static void EnumerateHMDFromSensorDisplayInfo(const SensorDisplayInfoImpl& displayInfo, + DeviceFactory::EnumerateVisitor& visitor); + + // These methods actually store data in a JSON file + virtual bool SetMagCalibrationReport(const MagCalibrationReport& data); + virtual bool GetMagCalibrationReport(MagCalibrationReport* data); + protected: - void openDevice(); - void closeDeviceOnError(); + virtual void openDevice(); + void closeDeviceOnError(); - Void setCoordinateFrame(CoordinateFrame coordframe); - bool setRange(const SensorRange& range); + Void setCoordinateFrame(CoordinateFrame coordframe); + bool setRange(const SensorRange& range); - Void setReportRate(unsigned rateHz); + Void setReportRate(unsigned rateHz); + + Void setOnboardCalibrationEnabled(bool enabled); // Called for decoded messages - void onTrackerMessage(TrackerMessage* message); + void onTrackerMessage(TrackerMessage* message); + bool decodeTrackerMessage(TrackerMessage* message, UByte* buffer, int size); // Helpers to reduce casting. /* @@ -242,23 +264,45 @@ protected: // so we track its state. CoordinateFrame Coordinates; CoordinateFrame HWCoordinates; - UInt64 NextKeepAliveTicks; + double NextKeepAliveTickSeconds; bool SequenceValid; - SInt16 LastTimestamp; + UInt16 LastTimestamp; UByte LastSampleCount; float LastTemperature; Vector3f LastAcceleration; Vector3f LastRotationRate; Vector3f LastMagneticField; + // This tracks wrap around, and should be monotonically increasing. + UInt32 FullTimestamp; + // Current sensor range obtained from device. SensorRange MaxValidRange; SensorRange CurrentRange; + + // IMU calibration obtained from device. + Vector3f AccelCalibrationOffset; + Vector3f GyroCalibrationOffset; + Matrix4f AccelCalibrationMatrix; + Matrix4f GyroCalibrationMatrix; + float CalibrationTemperature; UInt16 OldCommandId; -}; + SensorTimeFilter TimeFilter; + double PrevAbsoluteTime; + +#ifdef OVR_OS_ANDROID + void replaceWithPhoneMag(Vector3f* val); + + PhoneSensors* pPhoneSensors; +#endif + +private: + Matrix4f magCalibration; + bool magCalibrated; +}; } // namespace OVR diff --git a/LibOVR/Src/OVR_SensorImpl_Common.cpp b/LibOVR/Src/OVR_SensorImpl_Common.cpp new file mode 100644 index 0000000..a84d50a --- /dev/null +++ b/LibOVR/Src/OVR_SensorImpl_Common.cpp @@ -0,0 +1,245 @@ +/************************************************************************************ + +Filename : OVR_SensorImpl_Common.cpp +Content : Source common to SensorImpl and Sensor2Impl. +Created : January 21, 2014 +Authors : Lee Cooper + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "OVR_SensorImpl_Common.h" +#include "Kernel/OVR_Alg.h" + +namespace OVR +{ + +void UnpackSensor(const UByte* buffer, SInt32* x, SInt32* y, SInt32* z) +{ + // Sign extending trick + // from http://graphics.stanford.edu/~seander/bithacks.html#FixedSignExtend + struct {SInt32 x:21;} s; + + *x = s.x = (buffer[0] << 13) | (buffer[1] << 5) | ((buffer[2] & 0xF8) >> 3); + *y = s.x = ((buffer[2] & 0x07) << 18) | (buffer[3] << 10) | (buffer[4] << 2) | + ((buffer[5] & 0xC0) >> 6); + *z = s.x = ((buffer[5] & 0x3F) << 15) | (buffer[6] << 7) | (buffer[7] >> 1); +} + +void PackSensor(UByte* buffer, SInt32 x, SInt32 y, SInt32 z) +{ + // Pack 3 32 bit integers into 8 bytes + buffer[0] = UByte(x >> 13); + buffer[1] = UByte(x >> 5); + buffer[2] = UByte((x << 3) | ((y >> 18) & 0x07)); + buffer[3] = UByte(y >> 10); + buffer[4] = UByte(y >> 2); + buffer[5] = UByte((y << 6) | ((z >> 15) & 0x3F)); + buffer[6] = UByte(z >> 7); + buffer[7] = UByte(z << 1); +} + +UInt16 SelectSensorRampValue(const UInt16* ramp, unsigned count, + float val, float factor, const char* label) +{ + UInt16 threshold = (UInt16)(val * factor); + + for (unsigned i = 0; i<count; i++) + { + if (ramp[i] >= threshold) + return ramp[i]; + } + OVR_DEBUG_LOG(("SensorDevice::SetRange - %s clamped to %0.4f", + label, float(ramp[count-1]) / factor)); + OVR_UNUSED2(factor, label); + return ramp[count-1]; +} + +SensorRangeImpl::SensorRangeImpl(const SensorRange& r, UInt16 commandId) +{ + SetSensorRange(r, commandId); +} + +void SensorRangeImpl::SetSensorRange(const SensorRange& r, UInt16 commandId) +{ + CommandId = commandId; + AccelScale = SelectSensorRampValue(AccelRangeRamp, sizeof(AccelRangeRamp)/sizeof(AccelRangeRamp[0]), + r.MaxAcceleration, (1.0f / 9.81f), "MaxAcceleration"); + GyroScale = SelectSensorRampValue(GyroRangeRamp, sizeof(GyroRangeRamp)/sizeof(GyroRangeRamp[0]), + r.MaxRotationRate, Math<float>::RadToDegreeFactor, "MaxRotationRate"); + MagScale = SelectSensorRampValue(MagRangeRamp, sizeof(MagRangeRamp)/sizeof(MagRangeRamp[0]), + r.MaxMagneticField, 1000.0f, "MaxMagneticField"); + Pack(); +} + +void SensorRangeImpl::GetSensorRange(SensorRange* r) +{ + r->MaxAcceleration = AccelScale * 9.81f; + r->MaxRotationRate = DegreeToRad((float)GyroScale); + r->MaxMagneticField= MagScale * 0.001f; +} + +SensorRange SensorRangeImpl::GetMaxSensorRange() +{ + return SensorRange(AccelRangeRamp[sizeof(AccelRangeRamp)/sizeof(AccelRangeRamp[0]) - 1] * 9.81f, + GyroRangeRamp[sizeof(GyroRangeRamp)/sizeof(GyroRangeRamp[0]) - 1] * + Math<float>::DegreeToRadFactor, + MagRangeRamp[sizeof(MagRangeRamp)/sizeof(MagRangeRamp[0]) - 1] * 0.001f); +} + +void SensorRangeImpl::Pack() +{ + Buffer[0] = 4; + Buffer[1] = UByte(CommandId & 0xFF); + Buffer[2] = UByte(CommandId >> 8); + Buffer[3] = UByte(AccelScale); + Buffer[4] = UByte(GyroScale & 0xFF); + Buffer[5] = UByte(GyroScale >> 8); + Buffer[6] = UByte(MagScale & 0xFF); + Buffer[7] = UByte(MagScale >> 8); +} + +void SensorRangeImpl::Unpack() +{ + CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); + AccelScale= Buffer[3]; + GyroScale = Buffer[4] | (UInt16(Buffer[5]) << 8); + MagScale = Buffer[6] | (UInt16(Buffer[7]) << 8); +} + +SensorConfigImpl::SensorConfigImpl() + : CommandId(0), Flags(0), PacketInterval(0), KeepAliveIntervalMs(0) +{ + memset(Buffer, 0, PacketSize); + Buffer[0] = 2; +} + +void SensorConfigImpl::SetSensorCoordinates(bool sensorCoordinates) +{ + Flags = (Flags & ~Flag_SensorCoordinates) | (sensorCoordinates ? Flag_SensorCoordinates : 0); +} + +bool SensorConfigImpl::IsUsingSensorCoordinates() const +{ + return (Flags & Flag_SensorCoordinates) != 0; +} + +void SensorConfigImpl::Pack() +{ + Buffer[0] = 2; + Buffer[1] = UByte(CommandId & 0xFF); + Buffer[2] = UByte(CommandId >> 8); + Buffer[3] = Flags; + Buffer[4] = UByte(PacketInterval); + Buffer[5] = UByte(KeepAliveIntervalMs & 0xFF); + Buffer[6] = UByte(KeepAliveIntervalMs >> 8); +} + +void SensorConfigImpl::Unpack() +{ + CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); + Flags = Buffer[3]; + PacketInterval = Buffer[4]; + KeepAliveIntervalMs= Buffer[5] | (UInt16(Buffer[6]) << 8); +} + +SensorFactoryCalibrationImpl::SensorFactoryCalibrationImpl() + : AccelOffset(), GyroOffset(), AccelMatrix(), GyroMatrix(), Temperature(0) +{ + memset(Buffer, 0, PacketSize); + Buffer[0] = 3; +} + +void SensorFactoryCalibrationImpl::Pack() +{ + SInt32 x, y, z; + + Buffer[0] = 3; + + x = SInt32(AccelOffset.x * 1e4f); + y = SInt32(AccelOffset.y * 1e4f); + z = SInt32(AccelOffset.z * 1e4f); + PackSensor(Buffer + 3, x, y, z); + + x = SInt32(GyroOffset.x * 1e4f); + y = SInt32(GyroOffset.y * 1e4f); + z = SInt32(GyroOffset.z * 1e4f); + PackSensor(Buffer + 11, x, y, z); + + // ignore the scale matrices for now +} + +void SensorFactoryCalibrationImpl::Unpack() +{ + static const float sensorMax = (1 << 20) - 1; + SInt32 x, y, z; + + UnpackSensor(Buffer + 3, &x, &y, &z); + AccelOffset.y = (float) y * 1e-4f; + AccelOffset.z = (float) z * 1e-4f; + AccelOffset.x = (float) x * 1e-4f; + + UnpackSensor(Buffer + 11, &x, &y, &z); + GyroOffset.x = (float) x * 1e-4f; + GyroOffset.y = (float) y * 1e-4f; + GyroOffset.z = (float) z * 1e-4f; + + for (int i = 0; i < 3; i++) + { + UnpackSensor(Buffer + 19 + 8 * i, &x, &y, &z); + AccelMatrix.M[i][0] = (float) x / sensorMax; + AccelMatrix.M[i][1] = (float) y / sensorMax; + AccelMatrix.M[i][2] = (float) z / sensorMax; + AccelMatrix.M[i][i] += 1.0f; + } + + for (int i = 0; i < 3; i++) + { + UnpackSensor(Buffer + 43 + 8 * i, &x, &y, &z); + GyroMatrix.M[i][0] = (float) x / sensorMax; + GyroMatrix.M[i][1] = (float) y / sensorMax; + GyroMatrix.M[i][2] = (float) z / sensorMax; + GyroMatrix.M[i][i] += 1.0f; + } + + Temperature = (float) Alg::DecodeSInt16(Buffer + 67) / 100.0f; +} + +SensorKeepAliveImpl::SensorKeepAliveImpl(UInt16 interval, UInt16 commandId) + : CommandId(commandId), KeepAliveIntervalMs(interval) +{ + Pack(); +} + +void SensorKeepAliveImpl::Pack() +{ + Buffer[0] = 8; + Buffer[1] = UByte(CommandId & 0xFF); + Buffer[2] = UByte(CommandId >> 8); + Buffer[3] = UByte(KeepAliveIntervalMs & 0xFF); + Buffer[4] = UByte(KeepAliveIntervalMs >> 8); +} + +void SensorKeepAliveImpl::Unpack() +{ + CommandId = Buffer[1] | (UInt16(Buffer[2]) << 8); + KeepAliveIntervalMs= Buffer[3] | (UInt16(Buffer[4]) << 8); +} + +} // namespace OVR diff --git a/LibOVR/Src/OVR_SensorImpl_Common.h b/LibOVR/Src/OVR_SensorImpl_Common.h new file mode 100644 index 0000000..293330c --- /dev/null +++ b/LibOVR/Src/OVR_SensorImpl_Common.h @@ -0,0 +1,150 @@ +/************************************************************************************ + +Filename : OVR_SensorImpl_Common.h +Content : Source common to SensorImpl and Sensor2Impl. +Created : January 21, 2014 +Authors : Lee Cooper + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_SensorImpl_Common_h +#define OVR_SensorImpl_Common_h + +#include "Kernel/OVR_System.h" +#include "OVR_Device.h" + +namespace OVR +{ + +void UnpackSensor(const UByte* buffer, SInt32* x, SInt32* y, SInt32* z); +void PackSensor(UByte* buffer, SInt32 x, SInt32 y, SInt32 z); + +// Sensor HW only accepts specific maximum range values, used to maximize +// the 16-bit sensor outputs. Use these ramps to specify and report appropriate values. +const UInt16 AccelRangeRamp[] = { 2, 4, 8, 16 }; +const UInt16 GyroRangeRamp[] = { 250, 500, 1000, 2000 }; +const UInt16 MagRangeRamp[] = { 880, 1300, 1900, 2500 }; + +UInt16 SelectSensorRampValue(const UInt16* ramp, unsigned count, + float val, float factor, const char* label); + +// SensorScaleImpl provides buffer packing logic for the Sensor Range +// record that can be applied to DK1 sensor through Get/SetFeature. We expose this +// through SensorRange class, which has different units. +struct SensorRangeImpl +{ + enum { PacketSize = 8 }; + UByte Buffer[PacketSize]; + + UInt16 CommandId; + UInt16 AccelScale; + UInt16 GyroScale; + UInt16 MagScale; + + SensorRangeImpl(const SensorRange& r, UInt16 commandId = 0); + + void SetSensorRange(const SensorRange& r, UInt16 commandId = 0); + void GetSensorRange(SensorRange* r); + + static SensorRange GetMaxSensorRange(); + + void Pack(); + void Unpack(); +}; + +struct SensorConfigImpl +{ + enum { PacketSize = 7 }; + UByte Buffer[PacketSize]; + + // Flag values for Flags. + enum { + Flag_RawMode = 0x01, + Flag_CalibrationTest = 0x02, // Internal test mode + Flag_UseCalibration = 0x04, + Flag_AutoCalibration = 0x08, + Flag_MotionKeepAlive = 0x10, + Flag_CommandKeepAlive = 0x20, + Flag_SensorCoordinates = 0x40 + }; + + UInt16 CommandId; + UByte Flags; + UInt16 PacketInterval; + UInt16 KeepAliveIntervalMs; + + SensorConfigImpl(); + + void SetSensorCoordinates(bool sensorCoordinates); + bool IsUsingSensorCoordinates() const; + + void Pack(); + void Unpack(); +}; + +struct SensorFactoryCalibrationImpl +{ + enum { PacketSize = 69 }; + UByte Buffer[PacketSize]; + + Vector3f AccelOffset; + Vector3f GyroOffset; + Matrix4f AccelMatrix; + Matrix4f GyroMatrix; + float Temperature; + + SensorFactoryCalibrationImpl(); + + void Pack(); // Not yet implemented. + void Unpack(); +}; + + +// SensorKeepAlive - feature report that needs to be sent at regular intervals for sensor +// to receive commands. +struct SensorKeepAliveImpl +{ + enum { PacketSize = 5 }; + UByte Buffer[PacketSize]; + + UInt16 CommandId; + UInt16 KeepAliveIntervalMs; + + SensorKeepAliveImpl(UInt16 interval = 0, UInt16 commandId = 0); + + void Pack(); + void Unpack(); +}; + +struct TrackerSample +{ + SInt32 AccelX, AccelY, AccelZ; + SInt32 GyroX, GyroY, GyroZ; +}; + +enum LastCommandIdFlags +{ + LastCommandId_Shutter = 1, + LastCommandId_LEDs = 2 +}; + +} // namespace OVR + +#endif // OVR_SensorImpl_Common_h diff --git a/LibOVR/Src/OVR_SensorTimeFilter.cpp b/LibOVR/Src/OVR_SensorTimeFilter.cpp new file mode 100644 index 0000000..ee0c385 --- /dev/null +++ b/LibOVR/Src/OVR_SensorTimeFilter.cpp @@ -0,0 +1,385 @@ +/************************************************************************************ + +PublicHeader: None +Filename : OVR_SensorTimeFilter.cpp +Content : Class to filter HMD time and convert it to system time +Created : December 20, 2013 +Author : Michael Antonov +Notes : + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#include "OVR_SensorTimeFilter.h" +#include "Kernel/OVR_Log.h" + + +#include <stdio.h> +#include <math.h> + +namespace OVR { + +// Comment out for debug logging to file +//#define OVR_TIMEFILTER_LOG_CODE( code ) code +#define OVR_TIMEFILTER_LOG_CODE( code ) + +#if defined(OVR_OS_ANDROID) + #define OVR_TIMEFILTER_LOG_FILENAME "/sdcard/TimeFilterLog.txt" +#elif defined(OVR_OS_WIN32) + #define OVR_TIMEFILTER_LOG_FILENAME "C:\\TimeFilterLog.txt" +#else + #define OVR_TIMEFILTER_LOG_FILENAME "TimeFilterLog.txt" +#endif + +OVR_TIMEFILTER_LOG_CODE( FILE* pTFLogFile = 0; ) + + +// Ideally, the following would always be true: +// - NewSampleTime > PrevSample +// - NewSampleTime < now systemTime +// - (NewSampleTime - PrevSampleTime) == integration delta, matching +// HW sample time difference + drift +// +// In practice, these issues affect us: +// - System thread can be suspended for a while +// - System de-buffering of recorded samples cause deviceTime to advance up +// much faster then system time for ~100+ samples +// - Device (DK1) and system clock granularities are high; this can +// lead to potentially having estimations in the future +// + + +// ***** TimerFilter + +SensorTimeFilter::SensorTimeFilter(const Settings& settings) +{ + FilterSettings = settings; + + ClockInitialized = false; + ClockDelta = 0; + ClockDeltaDriftPerSecond = 0; + ClockDeltaCorrectPerSecond = 0; + ClockDeltaCorrectSecondsLeft = 0; + OldClockDeltaDriftExpire = 0; + + LastLargestDeviceTime = 0; + PrevSystemTime = 0; + PastSampleResetTime = 0; + + MinWindowsCollected = 0; + MinWindowDuration = 0; // assigned later + MinWindowLastTime = 0; + MinWindowSamples = settings.MinSamples; // Force initialization + + OVR_TIMEFILTER_LOG_CODE( pTFLogFile = fopen(OVR_TIMEFILTER_LOG_FILENAME, "w+"); ) +} + + +double SensorTimeFilter::SampleToSystemTime(double sampleDeviceTime, double systemTime, + double prevResult, const char* debugTag) +{ + double clockDelta = systemTime - sampleDeviceTime + FilterSettings.ClockDeltaAdjust; + double deviceTimeDelta = sampleDeviceTime - LastLargestDeviceTime; + double result; + + // Collect a sample ClockDelta for a "MinimumWindow" or process + // the window by adjusting drift rates if it's full of samples. + // - (deviceTimeDelta < 1.0f) is a corner cases, as it would imply timestamp skip/wrap. + + if (ClockInitialized) + { + // Samples in the past commonly occur if they come from separately incrementing + // data channels. Just adjust them with ClockDelta. + + if (deviceTimeDelta < 0.0) + { + result = sampleDeviceTime + ClockDelta; + + if (result > (prevResult - 0.00001)) + goto clamp_and_log_result; + + // Consistent samples less then prevResult for indicate a back-jump or bad input. + // In this case we return prevResult for a while, then reset filter if it keeps going. + if (PastSampleResetTime < 0.0001) + { + PastSampleResetTime = systemTime + FilterSettings.PastSampleResetSeconds; + goto clamp_and_log_result; + } + else if (systemTime > PastSampleResetTime) + { + OVR_DEBUG_LOG(("SensorTimeFilter - Filtering reset due to samples in the past!\n")); + initClockSampling(sampleDeviceTime, clockDelta); + // Fall through to below, to ' PastSampleResetTime = 0.0; ' + } + else + { + goto clamp_and_log_result; + } + } + + // Most common case: Record window sample. + else if ( (deviceTimeDelta < 1.0f) && + ( (sampleDeviceTime < MinWindowLastTime) || + (MinWindowSamples < FilterSettings.MinSamples) ) ) + { + // Pick minimum ClockDelta sample. + if (clockDelta < MinWindowClockDelta) + MinWindowClockDelta = clockDelta; + MinWindowSamples++; + } + else + { + processFinishedMinWindow(sampleDeviceTime, clockDelta); + } + + PastSampleResetTime = 0.0; + } + else + { + initClockSampling(sampleDeviceTime, clockDelta); + } + + + // Clock adjustment for drift. + ClockDelta += ClockDeltaDriftPerSecond * deviceTimeDelta; + + // ClockDelta "nudging" towards last known MinWindowClockDelta. + if (ClockDeltaCorrectSecondsLeft > 0.000001) + { + double correctTimeDelta = deviceTimeDelta; + if (deviceTimeDelta > ClockDeltaCorrectSecondsLeft) + correctTimeDelta = ClockDeltaCorrectSecondsLeft; + ClockDeltaCorrectSecondsLeft -= correctTimeDelta; + + ClockDelta += ClockDeltaCorrectPerSecond * correctTimeDelta; + } + + // Record largest device time, so we know what samples to use in accumulation + // of min-window in the future. + LastLargestDeviceTime = sampleDeviceTime; + + // Compute our resulting sample time after ClockDelta adjustment. + result = sampleDeviceTime + ClockDelta; + + +clamp_and_log_result: + + OVR_TIMEFILTER_LOG_CODE( double savedResult = result; ) + + // Clamp to ensure that result >= PrevResult, or not to far in the future. + // Future clamp primarily happens in the very beginning if we are de-queuing + // system buffer full of samples. + if (result < prevResult) + { + result = prevResult; + } + if (result > (systemTime + FilterSettings.FutureClamp)) + { + result = (systemTime + FilterSettings.FutureClamp); + } + + OVR_TIMEFILTER_LOG_CODE( + + // Tag lines that were outside desired range, with '<' or '>'. + char rangeClamp = ' '; + char resultDeltaFar = ' '; + + if (savedResult > (systemTime + 0.0000001)) + rangeClamp = '>'; + if (savedResult < prevResult) + rangeClamp = '<'; + + // Tag any result delta outside desired threshold with a '*'. + if (fabs(deviceTimeDelta - (result - prevResult)) >= 0.00002) + resultDeltaFar = '*'; + + fprintf(pTFLogFile, "Res%s = %13.7f, dt = % 8.7f, ClkD = %13.6f " + "sysT = %13.6f, sysDt = %f, " + "sysDiff = % f, devT = %11.6f, ddevT = %9.6f %c%c\n", + debugTag, result, result - prevResult, ClockDelta, + systemTime, systemTime - PrevSystemTime, + -(systemTime - result), // Negatives in the past, positive > now. + sampleDeviceTime, deviceTimeDelta, rangeClamp, resultDeltaFar); + + ) // OVR_TIMEFILTER_LOG_CODE() + OVR_UNUSED(debugTag); + + // Record prior values. Useful or logging and clamping. + PrevSystemTime = systemTime; + + return result; +} + + +void SensorTimeFilter::initClockSampling(double sampleDeviceTime, double clockDelta) +{ + ClockInitialized = true; + ClockDelta = clockDelta; + ClockDeltaDriftPerSecond = 0; + OldClockDeltaDriftExpire = 0; + ClockDeltaCorrectSecondsLeft = 0; + ClockDeltaCorrectPerSecond = 0; + + MinWindowsCollected = 0; + MinWindowDuration = 0.25; + MinWindowClockDelta = clockDelta; + MinWindowLastTime = sampleDeviceTime + MinWindowDuration; + MinWindowSamples = 0; +} + + +void SensorTimeFilter::processFinishedMinWindow(double sampleDeviceTime, double clockDelta) +{ + MinRecord newRec = { MinWindowClockDelta, sampleDeviceTime }; + + double clockDeltaDiff = MinWindowClockDelta - ClockDelta; + double absClockDeltaDiff = fabs(clockDeltaDiff); + + + // Abrupt change causes Reset of minClockDelta collection. + // > 8 ms would a Large jump in a minimum sample, as those are usually stable. + // > 1 second intantaneous jump would land us here as well, as that would imply + // device being suspended, clock wrap or some other unexpected issue. + if ((absClockDeltaDiff > 0.008) || + ((sampleDeviceTime - LastLargestDeviceTime) >= 1.0)) + { + OVR_TIMEFILTER_LOG_CODE( + fprintf(pTFLogFile, + "\nMinWindow Finished: %d Samples, MinWindowClockDelta=%f, MW-CD=%f," + " ** ClockDelta Reset **\n\n", + MinWindowSamples, MinWindowClockDelta, MinWindowClockDelta-ClockDelta); + ) + + // Use old collected ClockDeltaDriftPerSecond drift value + // up to 1 minute until we collect better samples. + if (!MinRecords.IsEmpty()) + { + OldClockDeltaDriftExpire = MinRecords.GetNewest().LastSampleDeviceTime - + MinRecords.GetOldest().LastSampleDeviceTime; + if (OldClockDeltaDriftExpire > 60.0) + OldClockDeltaDriftExpire = 60.0; + OldClockDeltaDriftExpire += sampleDeviceTime; + } + + // Jump to new ClockDelta value. + if ((sampleDeviceTime - LastLargestDeviceTime) > 1.0) + ClockDelta = clockDelta; + else + ClockDelta = MinWindowClockDelta; + + ClockDeltaCorrectSecondsLeft = 0; + ClockDeltaCorrectPerSecond = 0; + + // Reset buffers, we'll be collecting a new MinWindow. + MinRecords.Reset(); + MinWindowsCollected = 0; + MinWindowDuration = 0.25; + MinWindowSamples = 0; + } + else + { + OVR_ASSERT(MinWindowSamples >= FilterSettings.MinSamples); + + double timeElapsed = 0; + + // If we have older values, use them to update clock drift in + // ClockDeltaDriftPerSecond + if (!MinRecords.IsEmpty() && (sampleDeviceTime > OldClockDeltaDriftExpire)) + { + MinRecord rec = MinRecords.GetOldest(); + + // Compute clock rate of drift. + timeElapsed = sampleDeviceTime - rec.LastSampleDeviceTime; + + // Check for divide by zero shouldn't be necessary here, but just be be safe... + if (timeElapsed > 0.000001) + { + ClockDeltaDriftPerSecond = (MinWindowClockDelta - rec.MinClockDelta) / timeElapsed; + ClockDeltaDriftPerSecond = clampRate(ClockDeltaDriftPerSecond, + FilterSettings.MaxChangeRate); + } + else + { + ClockDeltaDriftPerSecond = 0.0; + } + } + + MinRecords.AddRecord(newRec); + + + // Catchup correction nudges ClockDelta towards MinWindowClockDelta. + // These are needed because clock drift correction alone is not enough + // for past accumulated error/high-granularity clock delta changes. + // The further away we are, the stronger correction we apply. + // Correction has timeout, as we don't want it to overshoot in case + // of a large delay between samples. + + if (absClockDeltaDiff >= 0.00125) + { + // Correct large discrepancy immediately. + if (absClockDeltaDiff > 0.00175) + { + if (clockDeltaDiff > 0) + ClockDelta += (clockDeltaDiff - 0.00175); + else + ClockDelta += (clockDeltaDiff + 0.00175); + + clockDeltaDiff = MinWindowClockDelta - ClockDelta; + } + + ClockDeltaCorrectPerSecond = clockDeltaDiff; + ClockDeltaCorrectSecondsLeft = 1.0; + } + else if (absClockDeltaDiff > 0.0005) + { + ClockDeltaCorrectPerSecond = clockDeltaDiff / 8.0; + ClockDeltaCorrectSecondsLeft = 8.0; + } + else + { + ClockDeltaCorrectPerSecond = clockDeltaDiff / 15.0; + ClockDeltaCorrectSecondsLeft = 15.0; + } + + ClockDeltaCorrectPerSecond = clampRate(ClockDeltaCorrectPerSecond, + FilterSettings.MaxCorrectRate); + + OVR_TIMEFILTER_LOG_CODE( + fprintf(pTFLogFile, + "\nMinWindow Finished: %d Samples, MinWindowClockDelta=%f, MW-CD=%f," + " tileElapsed=%f, ClockChange=%f, ClockCorrect=%f\n\n", + MinWindowSamples, MinWindowClockDelta, MinWindowClockDelta-ClockDelta, + timeElapsed, ClockDeltaDriftPerSecond, ClockDeltaCorrectPerSecond); + ) + } + + // New MinClockDelta collection window. + // Switch to longer duration after first few windows. + MinWindowsCollected ++; + if (MinWindowsCollected > 5) + MinWindowDuration = 0.5; + + MinWindowClockDelta = clockDelta; + MinWindowLastTime = sampleDeviceTime + MinWindowDuration; + MinWindowSamples = 0; +} + + +} // namespace OVR + diff --git a/LibOVR/Src/OVR_SensorTimeFilter.h b/LibOVR/Src/OVR_SensorTimeFilter.h new file mode 100644 index 0000000..409fe66 --- /dev/null +++ b/LibOVR/Src/OVR_SensorTimeFilter.h @@ -0,0 +1,226 @@ +/************************************************************************************ + +PublicHeader: None +Filename : OVR_SensorTimeFilter.h +Content : Class to filter HMD time and convert it to system time +Created : December 20, 2013 +Author : Michael Antonov +Notes : + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_SensorTimeFilter_h +#define OVR_SensorTimeFilter_h + +#include "Kernel/OVR_Types.h" + +namespace OVR { + + +//----------------------------------------------------------------------------------- +// ***** SensorTimeFilter + +// SensorTimeFilter converts sample device time, in seconds, to absolute system +// time. It filter maintains internal state to estimate the following: +// +// - Difference between system and device time values (ClockDelta). +// ~= (systemTime - deviceTime) +// - Drift rate between system and device clocks (ClockDeltaDriftPerSecond). +// +// Additionally, the following criteria are enforced: +// - Resulting samples must be increasing, compared to prevSample. +// - Returned sample time should not exceed 'now' system time by more then a fixed +// value. +// * Ideally this should be 0, however, enforcing this is hard when clocks +// have high discrete values. +// - Returned sample AbsoluteTime values deltas are very close to HW samples, +// adjusted by drift rate. Note that this is not always possible due to clamping, +// in which case it is better to use ScaleTimeUnit(deviceTimeDelta) +// for integration. +// +// Algorithm: We collect minimum ClockDelta on windows of +// consecutive samples (500 ms each set). Long term difference between sample +// set minimums is drift. ClockDelta is also continually nudged towards most recent +// minimum. + +class SensorTimeFilter +{ +public: + + // It may be desirable to configure these per device/platform. + // For example, rates can be tighter for DK2 because of microsecond clock. + struct Settings + { + Settings(int minSamples = 50, + double clockDeltaAdjust = -0.0002, // 200 mks in the past. + double futureClamp = 0.0008) + : MinSamples(minSamples), + ClockDeltaAdjust(clockDeltaAdjust), + // PastClamp(-0.032), + FutureClamp(futureClamp), + PastSampleResetSeconds(0.2), + MaxChangeRate(0.004), + MaxCorrectRate(0.004) + { } + + // Minimum number of samples in a window. Different number may be desirable + // based on how often samples come in. + int MinSamples; + + // Factor always added to ClockDelta, used to skew all values into the past by fixed + // value and reduce the chances we report a sample "in the future". + double ClockDeltaAdjust; + // How much away in a past can a sample be before being shifted closer to system time. + //double PastClamp; + // How much larger then systemTime can a value be? Set to 0 to clamp to null, + // put small positive value is better. + double FutureClamp; + + // How long (in system time) do we take to reset the system if a device sample. + // comes in the past. Generally, this should never happened, but exists as a way to + // address bad timing coming form firmware (temp CCove issue, presumably fixed) + // or buggy input. + double PastSampleResetSeconds; + + // Maximum drift change and near-term correction rates, in seconds. + double MaxChangeRate; + double MaxCorrectRate; + }; + + + SensorTimeFilter(const Settings& settings = Settings()); + + + // Convert device sample time to system time, driving clock drift estimation. + // Input: SampleTime, System Time + // Return: Absolute system time for sample + double SampleToSystemTime(double sampleDeviceTime, double systemTime, + double prevResult, const char* debugTag = ""); + + + // Scales device time to account for drift. + double ScaleTimeUnit(double deviceClockDelta) + { + return deviceClockDelta * (1.0 + ClockDeltaDriftPerSecond); + } + + // Return currently estimated difference between the clocks. + double GetClockDelta() const { return ClockDelta; } + + +private: + + void initClockSampling(double sampleDeviceTime, double clockDelta); + void processFinishedMinWindow(double sampleDeviceTime, double systemTime); + + static double clampRate(double rate, double limit) + { + if (rate > limit) + rate = limit; + else if (rate < -limit) + rate = -limit; + return rate; + } + + + // Describes minimum observed ClockDelta for sample set seen in the past. + struct MinRecord + { + double MinClockDelta; + double LastSampleDeviceTime; + }; + + // Circular buffer storing MinRecord(s) several minutes into the past. + // Oldest value here is used to help estimate drift. + class MinRecordBuffer + { + enum { BufferSize = 60*6 }; // 3 min + public: + + MinRecordBuffer() : Head(0), Tail(0) { } + + void Reset() { Head = Tail = 0; } + bool IsEmpty() const { return Head == Tail; } + + const MinRecord& GetOldest() const + { + OVR_ASSERT(!IsEmpty()); + return Records[Tail]; + } + const MinRecord& GetNewest() const + { + OVR_ASSERT(!IsEmpty()); + return Records[(BufferSize + Head - 1) % BufferSize]; + } + + void AddRecord(const MinRecord& rec) + { + Records[Head] = rec; + Head = advanceIndex(Head); + if (Head == Tail) + Tail = advanceIndex(Tail); + } + + private: + + static int advanceIndex(int index) + { + index++; + if (index >= BufferSize) + index = 0; + return index; + } + + MinRecord Records[BufferSize]; + int Head; // Location we will most recent entry, unused. + int Tail; // Oldest entry. + }; + + + Settings FilterSettings; + + // Clock correction state. + bool ClockInitialized; + double ClockDelta; + double ClockDeltaDriftPerSecond; + double ClockDeltaCorrectPerSecond; + double ClockDeltaCorrectSecondsLeft; + double OldClockDeltaDriftExpire; + + double LastLargestDeviceTime; + double PrevSystemTime; + // Used to reset timing if we get multiple "samples in the past" + double PastSampleResetTime; + + // "MinWindow" is a block of time during which minimum ClockDelta values + // are collected into MinWindowClockDelta. + int MinWindowsCollected; + double MinWindowDuration; // Device sample seconds + double MinWindowLastTime; + double MinWindowClockDelta; + int MinWindowSamples; + + // Historic buffer used to determine rate of clock change over time. + MinRecordBuffer MinRecords; +}; + +} // namespace OVR + +#endif // OVR_SensorTimeFilter_h diff --git a/LibOVR/Src/OVR_Stereo.cpp b/LibOVR/Src/OVR_Stereo.cpp new file mode 100644 index 0000000..7e78b82 --- /dev/null +++ b/LibOVR/Src/OVR_Stereo.cpp @@ -0,0 +1,1794 @@ +/************************************************************************************ + +Filename : OVR_Stereo.cpp +Content : Stereo rendering functions +Created : November 30, 2013 +Authors : Tom Fosyth + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "OVR_Stereo.h" +#include "OVR_Profile.h" +#include "Kernel/OVR_Log.h" +#include "Kernel/OVR_Alg.h" + +//To allow custom distortion to be introduced to CatMulSpline. +float (*CustomDistortion)(float) = NULL; +float (*CustomDistortionInv)(float) = NULL; + + +namespace OVR { + + +using namespace Alg; + +//----------------------------------------------------------------------------------- + +// Inputs are 4 points (pFitX[0],pFitY[0]) through (pFitX[3],pFitY[3]) +// Result is four coefficients in pResults[0] through pResults[3] such that +// y = pResult[0] + x * ( pResult[1] + x * ( pResult[2] + x * ( pResult[3] ) ) ); +// passes through all four input points. +// Return is true if it succeeded, false if it failed (because two control points +// have the same pFitX value). +bool FitCubicPolynomial ( float *pResult, const float *pFitX, const float *pFitY ) +{ + float d0 = ( ( pFitX[0]-pFitX[1] ) * ( pFitX[0]-pFitX[2] ) * ( pFitX[0]-pFitX[3] ) ); + float d1 = ( ( pFitX[1]-pFitX[2] ) * ( pFitX[1]-pFitX[3] ) * ( pFitX[1]-pFitX[0] ) ); + float d2 = ( ( pFitX[2]-pFitX[3] ) * ( pFitX[2]-pFitX[0] ) * ( pFitX[2]-pFitX[1] ) ); + float d3 = ( ( pFitX[3]-pFitX[0] ) * ( pFitX[3]-pFitX[1] ) * ( pFitX[3]-pFitX[2] ) ); + + if ( ( d0 == 0.0f ) || ( d1 == 0.0f ) || ( d2 == 0.0f ) || ( d3 == 0.0f ) ) + { + return false; + } + + float f0 = pFitY[0] / d0; + float f1 = pFitY[1] / d1; + float f2 = pFitY[2] / d2; + float f3 = pFitY[3] / d3; + + pResult[0] = -( f0*pFitX[1]*pFitX[2]*pFitX[3] + + f1*pFitX[0]*pFitX[2]*pFitX[3] + + f2*pFitX[0]*pFitX[1]*pFitX[3] + + f3*pFitX[0]*pFitX[1]*pFitX[2] ); + pResult[1] = f0*(pFitX[1]*pFitX[2] + pFitX[2]*pFitX[3] + pFitX[3]*pFitX[1]) + + f1*(pFitX[0]*pFitX[2] + pFitX[2]*pFitX[3] + pFitX[3]*pFitX[0]) + + f2*(pFitX[0]*pFitX[1] + pFitX[1]*pFitX[3] + pFitX[3]*pFitX[0]) + + f3*(pFitX[0]*pFitX[1] + pFitX[1]*pFitX[2] + pFitX[2]*pFitX[0]); + pResult[2] = -( f0*(pFitX[1]+pFitX[2]+pFitX[3]) + + f1*(pFitX[0]+pFitX[2]+pFitX[3]) + + f2*(pFitX[0]+pFitX[1]+pFitX[3]) + + f3*(pFitX[0]+pFitX[1]+pFitX[2]) ); + pResult[3] = f0 + f1 + f2 + f3; + + return true; +} + + + +float EvalCatmullRom10Spline ( float const *K, float scaledVal ) +{ + int const NumSegments = LensConfig::NumCoefficients; + + float scaledValFloor = floorf ( scaledVal ); + scaledValFloor = Alg::Max ( 0.0f, Alg::Min ( (float)(NumSegments-1), scaledValFloor ) ); + float t = scaledVal - scaledValFloor; + int k = (int)scaledValFloor; + + float p0, p1; + float m0, m1; + switch ( k ) + { + case 0: + // Curve starts at 1.0 with gradient K[1]-K[0] + p0 = 1.0f; + m0 = ( K[1] - K[0] ); // general case would have been (K[1]-K[-1])/2 + p1 = K[1]; + m1 = 0.5f * ( K[2] - K[0] ); + break; + default: + // General case + p0 = K[k ]; + m0 = 0.5f * ( K[k+1] - K[k-1] ); + p1 = K[k+1]; + m1 = 0.5f * ( K[k+2] - K[k ] ); + break; + case NumSegments-2: + // Last tangent is just the slope of the last two points. + p0 = K[NumSegments-2]; + m0 = 0.5f * ( K[NumSegments-1] - K[NumSegments-2] ); + p1 = K[NumSegments-1]; + m1 = K[NumSegments-1] - K[NumSegments-2]; + break; + case NumSegments-1: + // Beyond the last segment it's just a straight line + p0 = K[NumSegments-1]; + m0 = K[NumSegments-1] - K[NumSegments-2]; + p1 = p0 + m0; + m1 = m0; + break; + } + + float omt = 1.0f - t; + float res = ( p0 * ( 1.0f + 2.0f * t ) + m0 * t ) * omt * omt + + ( p1 * ( 1.0f + 2.0f * omt ) - m1 * omt ) * t * t; + + return res; +} + + + + +// Converts a Profile eyecup string into an eyecup enumeration +void SetEyeCup(HmdRenderInfo* renderInfo, const char* cup) +{ + if (OVR_strcmp(cup, "A") == 0) + renderInfo->EyeCups = EyeCup_DK1A; + else if (OVR_strcmp(cup, "B") == 0) + renderInfo->EyeCups = EyeCup_DK1B; + else if (OVR_strcmp(cup, "C") == 0) + renderInfo->EyeCups = EyeCup_DK1C; + else if (OVR_strcmp(cup, "Orange A") == 0) + renderInfo->EyeCups = EyeCup_OrangeA; + else if (OVR_strcmp(cup, "Red A") == 0) + renderInfo->EyeCups = EyeCup_RedA; + else if (OVR_strcmp(cup, "Pink A") == 0) + renderInfo->EyeCups = EyeCup_PinkA; + else if (OVR_strcmp(cup, "Blue A") == 0) + renderInfo->EyeCups = EyeCup_BlueA; + else + renderInfo->EyeCups = EyeCup_DK1A; +} + + + +//----------------------------------------------------------------------------------- + + +// The result is a scaling applied to the distance. +float LensConfig::DistortionFnScaleRadiusSquared (float rsq) const +{ + float scale = 1.0f; + switch ( Eqn ) + { + case Distortion_Poly4: + // This version is deprecated! Prefer one of the other two. + scale = ( K[0] + rsq * ( K[1] + rsq * ( K[2] + rsq * K[3] ) ) ); + break; + case Distortion_RecipPoly4: + scale = 1.0f / ( K[0] + rsq * ( K[1] + rsq * ( K[2] + rsq * K[3] ) ) ); + break; + case Distortion_CatmullRom10:{ + // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[10] + // evenly spaced in R^2 from 0.0 to MaxR^2 + // K[0] controls the slope at radius=0.0, rather than the actual value. + const int NumSegments = LensConfig::NumCoefficients; + OVR_ASSERT ( NumSegments <= NumCoefficients ); + float scaledRsq = (float)(NumSegments-1) * rsq / ( MaxR * MaxR ); + scale = EvalCatmullRom10Spline ( K, scaledRsq ); + + + //Intercept, and overrule if needed + if (CustomDistortion) + { + scale = CustomDistortion(rsq); + } + + }break; + default: + OVR_ASSERT ( false ); + break; + } + return scale; +} + +// x,y,z components map to r,g,b +Vector3f LensConfig::DistortionFnScaleRadiusSquaredChroma (float rsq) const +{ + float scale = DistortionFnScaleRadiusSquared ( rsq ); + Vector3f scaleRGB; + scaleRGB.x = scale * ( 1.0f + ChromaticAberration[0] + rsq * ChromaticAberration[1] ); // Red + scaleRGB.y = scale; // Green + scaleRGB.z = scale * ( 1.0f + ChromaticAberration[2] + rsq * ChromaticAberration[3] ); // Blue + return scaleRGB; +} + +// DistortionFnInverse computes the inverse of the distortion function on an argument. +float LensConfig::DistortionFnInverse(float r) const +{ + OVR_ASSERT((r <= 20.0f)); + + float s, d; + float delta = r * 0.25f; + + // Better to start guessing too low & take longer to converge than too high + // and hit singularities. Empirically, r * 0.5f is too high in some cases. + s = r * 0.25f; + d = fabs(r - DistortionFn(s)); + + for (int i = 0; i < 20; i++) + { + float sUp = s + delta; + float sDown = s - delta; + float dUp = fabs(r - DistortionFn(sUp)); + float dDown = fabs(r - DistortionFn(sDown)); + + if (dUp < d) + { + s = sUp; + d = dUp; + } + else if (dDown < d) + { + s = sDown; + d = dDown; + } + else + { + delta *= 0.5f; + } + } + + return s; +} + + + +float LensConfig::DistortionFnInverseApprox(float r) const +{ + float rsq = r * r; + float scale = 1.0f; + switch ( Eqn ) + { + case Distortion_Poly4: + // Deprecated + OVR_ASSERT ( false ); + break; + case Distortion_RecipPoly4: + scale = 1.0f / ( InvK[0] + rsq * ( InvK[1] + rsq * ( InvK[2] + rsq * InvK[3] ) ) ); + break; + case Distortion_CatmullRom10:{ + // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[9] + // evenly spaced in R^2 from 0.0 to MaxR^2 + // K[0] controls the slope at radius=0.0, rather than the actual value. + const int NumSegments = LensConfig::NumCoefficients; + OVR_ASSERT ( NumSegments <= NumCoefficients ); + float scaledRsq = (float)(NumSegments-1) * rsq / ( MaxInvR * MaxInvR ); + scale = EvalCatmullRom10Spline ( InvK, scaledRsq ); + + //Intercept, and overrule if needed + if (CustomDistortionInv) + { + scale = CustomDistortionInv(rsq); + } + + }break; + default: + OVR_ASSERT ( false ); + break; + } + return r * scale; +} + +void LensConfig::SetUpInverseApprox() +{ + float maxR = MaxInvR; + + switch ( Eqn ) + { + case Distortion_Poly4: + // Deprecated + OVR_ASSERT ( false ); + break; + case Distortion_RecipPoly4:{ + + float sampleR[4]; + float sampleRSq[4]; + float sampleInv[4]; + float sampleFit[4]; + + // Found heuristically... + sampleR[0] = 0.0f; + sampleR[1] = maxR * 0.4f; + sampleR[2] = maxR * 0.8f; + sampleR[3] = maxR * 1.5f; + for ( int i = 0; i < 4; i++ ) + { + sampleRSq[i] = sampleR[i] * sampleR[i]; + sampleInv[i] = DistortionFnInverse ( sampleR[i] ); + sampleFit[i] = sampleR[i] / sampleInv[i]; + } + sampleFit[0] = 1.0f; + FitCubicPolynomial ( InvK, sampleRSq, sampleFit ); + + #if 0 + // Should be a nearly exact match on the chosen points. + OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[0] ) - DistortionFnInverseApprox ( sampleR[0] ) ) / maxR < 0.0001f ); + OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[1] ) - DistortionFnInverseApprox ( sampleR[1] ) ) / maxR < 0.0001f ); + OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[2] ) - DistortionFnInverseApprox ( sampleR[2] ) ) / maxR < 0.0001f ); + OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[3] ) - DistortionFnInverseApprox ( sampleR[3] ) ) / maxR < 0.0001f ); + // Should be a decent match on the rest of the range. + const int maxCheck = 20; + for ( int i = 0; i < maxCheck; i++ ) + { + float checkR = (float)i * maxR / (float)maxCheck; + float realInv = DistortionFnInverse ( checkR ); + float testInv = DistortionFnInverseApprox ( checkR ); + float error = fabsf ( realInv - testInv ) / maxR; + OVR_ASSERT ( error < 0.1f ); + } + #endif + + }break; + case Distortion_CatmullRom10:{ + + const int NumSegments = LensConfig::NumCoefficients; + OVR_ASSERT ( NumSegments <= NumCoefficients ); + for ( int i = 1; i < NumSegments; i++ ) + { + float scaledRsq = (float)i; + float rsq = scaledRsq * MaxInvR * MaxInvR / (float)( NumSegments - 1); + float r = sqrtf ( rsq ); + float inv = DistortionFnInverse ( r ); + InvK[i] = inv / r; + InvK[0] = 1.0f; // TODO: fix this. + } + +#if 0 + const int maxCheck = 20; + for ( int i = 0; i <= maxCheck; i++ ) + { + float checkR = (float)i * MaxInvR / (float)maxCheck; + float realInv = DistortionFnInverse ( checkR ); + float testInv = DistortionFnInverseApprox ( checkR ); + float error = fabsf ( realInv - testInv ) / MaxR; + OVR_ASSERT ( error < 0.01f ); + } +#endif + + }break; + } +} + + +void LensConfig::SetToIdentity() +{ + for ( int i = 0; i < NumCoefficients; i++ ) + { + K[i] = 0.0f; + InvK[i] = 0.0f; + } + Eqn = Distortion_RecipPoly4; + K[0] = 1.0f; + InvK[0] = 1.0f; + MaxR = 1.0f; + MaxInvR = 1.0f; + ChromaticAberration[0] = 0.0f; + ChromaticAberration[1] = 0.0f; + ChromaticAberration[2] = 0.0f; + ChromaticAberration[3] = 0.0f; + MetersPerTanAngleAtCenter = 0.05f; +} + + +enum LensConfigStoredVersion +{ + LCSV_CatmullRom10Version1 = 1 +}; + +// DO NOT CHANGE THESE ONCE THEY HAVE BEEN BAKED INTO FIRMWARE. +// If something needs to change, add a new one! +struct LensConfigStored_CatmullRom10Version1 +{ + // All these items must be fixed-length integers - no "float", no "int", etc. + UInt16 VersionNumber; // Must be LCSV_CatmullRom10Version1 + + UInt16 K[11]; + UInt16 MaxR; + UInt16 MetersPerTanAngleAtCenter; + UInt16 ChromaticAberration[4]; + // InvK and MaxInvR are calculated on load. +}; + +UInt16 EncodeFixedPointUInt16 ( float val, UInt16 zeroVal, int fractionalBits ) +{ + OVR_ASSERT ( ( fractionalBits >= 0 ) && ( fractionalBits < 31 ) ); + float valWhole = val * (float)( 1 << fractionalBits ); + valWhole += (float)zeroVal + 0.5f; + valWhole = floorf ( valWhole ); + OVR_ASSERT ( ( valWhole >= 0.0f ) && ( valWhole < (float)( 1 << 16 ) ) ); + return (UInt16)valWhole; +} + +float DecodeFixedPointUInt16 ( UInt16 val, UInt16 zeroVal, int fractionalBits ) +{ + OVR_ASSERT ( ( fractionalBits >= 0 ) && ( fractionalBits < 31 ) ); + float valFloat = (float)val; + valFloat -= (float)zeroVal; + valFloat *= 1.0f / (float)( 1 << fractionalBits ); + return valFloat; +} + + +// Returns true on success. +bool LoadLensConfig ( LensConfig *presult, UByte const *pbuffer, int bufferSizeInBytes ) +{ + if ( bufferSizeInBytes < 2 ) + { + // Can't even tell the version number! + return false; + } + UInt16 version = DecodeUInt16 ( pbuffer + 0 ); + switch ( version ) + { + case LCSV_CatmullRom10Version1: + { + if ( bufferSizeInBytes < sizeof(LensConfigStored_CatmullRom10Version1) ) + { + return false; + } + LensConfigStored_CatmullRom10Version1 lcs; + lcs.VersionNumber = DecodeUInt16 ( pbuffer + 0 ); + for ( int i = 0; i < 11; i++ ) + { + lcs.K[i] = DecodeUInt16 ( pbuffer + 2 + 2*i ); + } + lcs.MaxR = DecodeUInt16 ( pbuffer + 24 ); + lcs.MetersPerTanAngleAtCenter = DecodeUInt16 ( pbuffer + 26 ); + for ( int i = 0; i < 4; i++ ) + { + lcs.ChromaticAberration[i] = DecodeUInt16 ( pbuffer + 28 + 2*i ); + } + OVR_COMPILER_ASSERT ( sizeof(lcs) == 36 ); + + // Convert to the real thing. + LensConfig result; + result.Eqn = Distortion_CatmullRom10; + for ( int i = 0; i < 11; i++ ) + { + // K[] are mostly 1.something. They may get significantly bigger, but they never hit 0.0. + result.K[i] = DecodeFixedPointUInt16 ( lcs.K[i], 0, 14 ); + } + // MaxR is tan(angle), so always >0, typically just over 1.0 (45 degrees half-fov), + // but may get arbitrarily high. tan(76)=4 is a very reasonable limit! + result.MaxR = DecodeFixedPointUInt16 ( lcs.MaxR, 0, 14 ); + // MetersPerTanAngleAtCenter is also known as focal length! + // Typically around 0.04 for our current screens, minimum of 0, sensible maximum of 0.125 (i.e. 3 "extra" bits of fraction) + result.MetersPerTanAngleAtCenter = DecodeFixedPointUInt16 ( lcs.MetersPerTanAngleAtCenter, 0, 16+3 ); + for ( int i = 0; i < 4; i++ ) + { + // ChromaticAberration[] are mostly 0.0something, centered on 0.0. Largest seen is 0.04, so set max to 0.125 (i.e. 3 "extra" bits of fraction) + result.ChromaticAberration[i] = DecodeFixedPointUInt16 ( lcs.ChromaticAberration[i], 0x8000, 16+3 ); + } + result.MaxInvR = result.DistortionFn ( result.MaxR ); + result.SetUpInverseApprox(); + + OVR_ASSERT ( version == lcs.VersionNumber ); + + *presult = result; + } + break; + default: + // Unknown format. + return false; + break; + } + return true; +} + +// Returns number of bytes needed. +int SaveLensConfigSizeInBytes ( LensConfig const &config ) +{ + OVR_UNUSED ( config ); + return sizeof ( LensConfigStored_CatmullRom10Version1 ); +} + +// Returns true on success. +bool SaveLensConfig ( UByte *pbuffer, int bufferSizeInBytes, LensConfig const &config ) +{ + if ( bufferSizeInBytes < sizeof ( LensConfigStored_CatmullRom10Version1 ) ) + { + return false; + } + + // Construct the values. + LensConfigStored_CatmullRom10Version1 lcs; + lcs.VersionNumber = LCSV_CatmullRom10Version1; + for ( int i = 0; i < 11; i++ ) + { + // K[] are mostly 1.something. They may get significantly bigger, but they never hit 0.0. + lcs.K[i] = EncodeFixedPointUInt16 ( config.K[i], 0, 14 ); + } + // MaxR is tan(angle), so always >0, typically just over 1.0 (45 degrees half-fov), + // but may get arbitrarily high. tan(76)=4 is a very reasonable limit! + lcs.MaxR = EncodeFixedPointUInt16 ( config.MaxR, 0, 14 ); + // MetersPerTanAngleAtCenter is also known as focal length! + // Typically around 0.04 for our current screens, minimum of 0, sensible maximum of 0.125 (i.e. 3 "extra" bits of fraction) + lcs.MetersPerTanAngleAtCenter = EncodeFixedPointUInt16 ( config.MetersPerTanAngleAtCenter, 0, 16+3 ); + for ( int i = 0; i < 4; i++ ) + { + // ChromaticAberration[] are mostly 0.0something, centered on 0.0. Largest seen is 0.04, so set max to 0.125 (i.e. 3 "extra" bits of fraction) + lcs.ChromaticAberration[i] = EncodeFixedPointUInt16 ( config.ChromaticAberration[i], 0x8000, 16+3 ); + } + + + // Now store them out, sensitive to endinness. + EncodeUInt16 ( pbuffer + 0, lcs.VersionNumber ); + for ( int i = 0; i < 11; i++ ) + { + EncodeUInt16 ( pbuffer + 2 + 2*i, lcs.K[i] ); + } + EncodeUInt16 ( pbuffer + 24, lcs.MaxR ); + EncodeUInt16 ( pbuffer + 26, lcs.MetersPerTanAngleAtCenter ); + for ( int i = 0; i < 4; i++ ) + { + EncodeUInt16 ( pbuffer + 28 + 2*i, lcs.ChromaticAberration[i] ); + } + OVR_COMPILER_ASSERT ( 36 == sizeof(lcs) ); + + return true; +} + +#ifdef OVR_BUILD_DEBUG +void TestSaveLoadLensConfig ( LensConfig const &config ) +{ + OVR_ASSERT ( config.Eqn == Distortion_CatmullRom10 ); + // As a test, make sure this can be encoded and decoded correctly. + const int bufferSize = 256; + UByte buffer[bufferSize]; + OVR_ASSERT ( SaveLensConfigSizeInBytes ( config ) < bufferSize ); + bool success; + success = SaveLensConfig ( buffer, bufferSize, config ); + OVR_ASSERT ( success ); + LensConfig testConfig; + success = LoadLensConfig ( &testConfig, buffer, bufferSize ); + OVR_ASSERT ( success ); + OVR_ASSERT ( testConfig.Eqn == config.Eqn ); + for ( int i = 0; i < 11; i++ ) + { + OVR_ASSERT ( fabs ( testConfig.K[i] - config.K[i] ) < 0.0001f ); + } + OVR_ASSERT ( fabsf ( testConfig.MaxR - config.MaxR ) < 0.0001f ); + OVR_ASSERT ( fabsf ( testConfig.MetersPerTanAngleAtCenter - config.MetersPerTanAngleAtCenter ) < 0.00001f ); + for ( int i = 0; i < 4; i++ ) + { + OVR_ASSERT ( fabsf ( testConfig.ChromaticAberration[i] - config.ChromaticAberration[i] ) < 0.00001f ); + } +} +#endif + + + +//----------------------------------------------------------------------------------- + +// TBD: There is a question of whether this is the best file for CreateDebugHMDInfo. As long as there are many +// constants for HmdRenderInfo here as well it is ok. The alternative would be OVR_Common_HMDDevice.cpp, but +// that's specialized per platform... should probably move it there onces the code is in the common base class. + +HMDInfo CreateDebugHMDInfo(HmdTypeEnum hmdType) +{ + HMDInfo info; + + if ((hmdType != HmdType_DK1) && + (hmdType != HmdType_CrystalCoveProto)) + { + LogText("Debug HMDInfo - HmdType not supported. Defaulting to DK1.\n"); + hmdType = HmdType_DK1; + } + + // The alternative would be to initialize info.HmdType to HmdType_None instead. If we did that, + // code wouldn't be "maximally compatible" and devs wouldn't know what device we are + // simulating... so if differentiation becomes necessary we better add Debug flag in the future. + info.HmdType = hmdType; + info.Manufacturer = "Oculus VR"; + + switch(hmdType) + { + case HmdType_DK1: + info.ProductName = "Oculus Rift DK1"; + info.ResolutionInPixels = Sizei ( 1280, 800 ); + info.ScreenSizeInMeters = Sizef ( 0.1498f, 0.0936f ); + info.ScreenGapSizeInMeters = 0.0f; + info.CenterFromTopInMeters = 0.0468f; + info.LensSeparationInMeters = 0.0635f; + info.Shutter.Type = HmdShutter_RollingTopToBottom; + info.Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + info.Shutter.VsyncToFirstScanline = 0.000052f; + info.Shutter.FirstScanlineToLastScanline = 0.016580f; + info.Shutter.PixelSettleTime = 0.015f; + info.Shutter.PixelPersistence = ( 1.0f / 60.0f ); + break; + + case HmdType_CrystalCoveProto: + info.ProductName = "Oculus Rift Crystal Cove"; + info.ResolutionInPixels = Sizei ( 1920, 1080 ); + info.ScreenSizeInMeters = Sizef ( 0.12576f, 0.07074f ); + info.ScreenGapSizeInMeters = 0.0f; + info.CenterFromTopInMeters = info.ScreenSizeInMeters.h * 0.5f; + info.LensSeparationInMeters = 0.0635f; + info.Shutter.Type = HmdShutter_RollingRightToLeft; + info.Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + info.Shutter.VsyncToFirstScanline = 0.0000273f; + info.Shutter.FirstScanlineToLastScanline = 0.0131033f; + info.Shutter.PixelSettleTime = 0.0f; + info.Shutter.PixelPersistence = 0.18f * info.Shutter.VsyncToNextVsync; + break; + + case HmdType_DK2: + info.ProductName = "Oculus Rift DK2"; + info.ResolutionInPixels = Sizei ( 1920, 1080 ); + info.ScreenSizeInMeters = Sizef ( 0.12576f, 0.07074f ); + info.ScreenGapSizeInMeters = 0.0f; + info.CenterFromTopInMeters = info.ScreenSizeInMeters.h * 0.5f; + info.LensSeparationInMeters = 0.0635f; + info.Shutter.Type = HmdShutter_RollingRightToLeft; + info.Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); + info.Shutter.VsyncToFirstScanline = 0.0000273f; + info.Shutter.FirstScanlineToLastScanline = 0.0131033f; + info.Shutter.PixelSettleTime = 0.0f; + info.Shutter.PixelPersistence = 0.18f * info.Shutter.VsyncToNextVsync; + break; + } + + return info; +} + + + +// profile may be NULL, in which case it uses the hard-coded defaults. +HmdRenderInfo GenerateHmdRenderInfoFromHmdInfo ( HMDInfo const &hmdInfo, + Profile const *profile /*=NULL*/, + DistortionEqnType distortionType /*= Distortion_CatmullRom10*/, + EyeCupType eyeCupOverride /*= EyeCup_LAST*/ ) +{ + HmdRenderInfo renderInfo; + + renderInfo.HmdType = hmdInfo.HmdType; + renderInfo.ResolutionInPixels = hmdInfo.ResolutionInPixels; + renderInfo.ScreenSizeInMeters = hmdInfo.ScreenSizeInMeters; + renderInfo.CenterFromTopInMeters = hmdInfo.CenterFromTopInMeters; + renderInfo.ScreenGapSizeInMeters = hmdInfo.ScreenGapSizeInMeters; + renderInfo.LensSeparationInMeters = hmdInfo.LensSeparationInMeters; + + OVR_ASSERT ( sizeof(renderInfo.Shutter) == sizeof(hmdInfo.Shutter) ); // Try to keep the files in sync! + renderInfo.Shutter.Type = hmdInfo.Shutter.Type; + renderInfo.Shutter.VsyncToNextVsync = hmdInfo.Shutter.VsyncToNextVsync; + renderInfo.Shutter.VsyncToFirstScanline = hmdInfo.Shutter.VsyncToFirstScanline; + renderInfo.Shutter.FirstScanlineToLastScanline = hmdInfo.Shutter.FirstScanlineToLastScanline; + renderInfo.Shutter.PixelSettleTime = hmdInfo.Shutter.PixelSettleTime; + renderInfo.Shutter.PixelPersistence = hmdInfo.Shutter.PixelPersistence; + + renderInfo.LensDiameterInMeters = 0.035f; + renderInfo.LensSurfaceToMidplateInMeters = 0.025f; + renderInfo.EyeCups = EyeCup_DK1A; + +#if 0 // Device settings are out of date - don't use them. + if (Contents & Contents_Distortion) + { + memcpy(renderInfo.DistortionK, DistortionK, sizeof(float)*4); + renderInfo.DistortionEqn = Distortion_RecipPoly4; + } +#endif + + // Defaults in case of no user profile. + renderInfo.EyeLeft.NoseToPupilInMeters = 0.032f; + renderInfo.EyeLeft.ReliefInMeters = 0.012f; + + // 10mm eye-relief laser numbers for DK1 lenses. + // These are a decent seed for finding eye-relief and IPD. + // These are NOT used for rendering! + // Rendering distortions are now in GenerateLensConfigFromEyeRelief() + // So, if you're hacking in new distortions, don't do it here! + renderInfo.EyeLeft.Distortion.SetToIdentity(); + renderInfo.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.0449f; + renderInfo.EyeLeft.Distortion.Eqn = Distortion_RecipPoly4; + renderInfo.EyeLeft.Distortion.K[0] = 1.0f; + renderInfo.EyeLeft.Distortion.K[1] = -0.494165344f; + renderInfo.EyeLeft.Distortion.K[2] = 0.587046423f; + renderInfo.EyeLeft.Distortion.K[3] = -0.841887126f; + renderInfo.EyeLeft.Distortion.MaxR = 1.0f; + + renderInfo.EyeLeft.Distortion.ChromaticAberration[0] = -0.006f; + renderInfo.EyeLeft.Distortion.ChromaticAberration[1] = 0.0f; + renderInfo.EyeLeft.Distortion.ChromaticAberration[2] = 0.014f; + renderInfo.EyeLeft.Distortion.ChromaticAberration[3] = 0.0f; + + renderInfo.EyeRight = renderInfo.EyeLeft; + + + // Obtain data from profile. + if ( profile != NULL ) + { + char eyecup[16]; + if (profile->GetValue(OVR_KEY_EYE_CUP, eyecup, 16)) + SetEyeCup(&renderInfo, eyecup); + } + + switch ( hmdInfo.HmdType ) + { + case HmdType_None: + case HmdType_DKProto: + case HmdType_DK1: + // Slight hack to improve usability. + // If you have a DKHD-style lens profile enabled, + // but you plug in DK1 and forget to change the profile, + // obviously you don't want those lens numbers. + if ( ( renderInfo.EyeCups != EyeCup_DK1A ) && + ( renderInfo.EyeCups != EyeCup_DK1B ) && + ( renderInfo.EyeCups != EyeCup_DK1C ) ) + { + renderInfo.EyeCups = EyeCup_DK1A; + } + break; + + case HmdType_DKHD2Proto: + renderInfo.EyeCups = EyeCup_DKHD2A; + break; + case HmdType_CrystalCoveProto: + renderInfo.EyeCups = EyeCup_PinkA; + break; + case HmdType_DK2: + renderInfo.EyeCups = EyeCup_DK2A; + break; + default: + break; + } + + if ( eyeCupOverride != EyeCup_LAST ) + { + renderInfo.EyeCups = eyeCupOverride; + } + + switch ( renderInfo.EyeCups ) + { + case EyeCup_DK1A: + case EyeCup_DK1B: + case EyeCup_DK1C: + renderInfo.LensDiameterInMeters = 0.035f; + renderInfo.LensSurfaceToMidplateInMeters = 0.02357f; + // Not strictly lens-specific, but still wise to set a reasonable default for relief. + renderInfo.EyeLeft.ReliefInMeters = 0.010f; + renderInfo.EyeRight.ReliefInMeters = 0.010f; + break; + case EyeCup_DKHD2A: + renderInfo.LensDiameterInMeters = 0.035f; + renderInfo.LensSurfaceToMidplateInMeters = 0.02357f; + // Not strictly lens-specific, but still wise to set a reasonable default for relief. + renderInfo.EyeLeft.ReliefInMeters = 0.010f; + renderInfo.EyeRight.ReliefInMeters = 0.010f; + break; + case EyeCup_PinkA: + case EyeCup_DK2A: + renderInfo.LensDiameterInMeters = 0.04f; // approximate + renderInfo.LensSurfaceToMidplateInMeters = 0.01965f; + // Not strictly lens-specific, but still wise to set a reasonable default for relief. + renderInfo.EyeLeft.ReliefInMeters = 0.012f; + renderInfo.EyeRight.ReliefInMeters = 0.012f; + break; + default: OVR_ASSERT ( false ); break; + } + + if ( profile != NULL ) + { + // Set the customized user eye position + // TBD: Maybe we should separate custom camera positioning from custom distortion rendering ?? + if (profile->GetBoolValue(OVR_KEY_CUSTOM_EYE_RENDER, true)) + { + float eye2nose[2]; + if (profile->GetFloatValues(OVR_KEY_EYE_TO_NOSE_DISTANCE, eye2nose, 2) == 2) + { // Load per-eye half-IPD + renderInfo.EyeLeft.NoseToPupilInMeters = eye2nose[0]; + renderInfo.EyeRight.NoseToPupilInMeters = eye2nose[1]; + } + else + { // Use a centered IPD instead + float ipd = profile->GetFloatValue(OVR_KEY_IPD, OVR_DEFAULT_IPD); + renderInfo.EyeLeft.NoseToPupilInMeters = 0.5f * ipd; + renderInfo.EyeRight.NoseToPupilInMeters = 0.5f * ipd; + } + + float eye2plate[2]; + if (profile->GetFloatValues(OVR_KEY_MAX_EYE_TO_PLATE_DISTANCE, eye2plate, 2) == 2) + { // Subtract the eye-cup height from the plate distance to get the eye-to-lens distance + // This measurement should be the the distance at maximum dial setting + // We still need to adjust with the dial offset + renderInfo.EyeLeft.ReliefInMeters = eye2plate[0] - renderInfo.LensSurfaceToMidplateInMeters; + renderInfo.EyeRight.ReliefInMeters = eye2plate[1] - renderInfo.LensSurfaceToMidplateInMeters; + + // Adjust the eye relief with the dial setting (from the assumed max eye relief) + int dial = profile->GetIntValue(OVR_KEY_EYE_RELIEF_DIAL, -1); + if (dial >= 0) + { + renderInfo.EyeLeft.ReliefInMeters -= ((10 - dial) * 0.001f); + renderInfo.EyeRight.ReliefInMeters -= ((10 - dial) * 0.001f); + } + } + else + { + // Set the eye relief with the user configured dial setting + int dial = profile->GetIntValue(OVR_KEY_EYE_RELIEF_DIAL, -1); + if (dial >= 0) + { // Assume a default of 7 to 17 mm eye relief based on the dial. This corresponds + // to the sampled and tuned distortion range on the DK1. + renderInfo.EyeLeft.ReliefInMeters = 0.007f + (dial * 0.001f); + renderInfo.EyeRight.ReliefInMeters = 0.007f + (dial * 0.001f); + } + } + } + } + + // Now we know where the eyes are relative to the lenses, we can compute a distortion for each. + // TODO: incorporate lateral offset in distortion generation. + // TODO: we used a distortion to calculate eye-relief, and now we're making a distortion from that eye-relief. Close the loop! + + for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) + { + HmdRenderInfo::EyeConfig *pHmdEyeConfig = ( eyeNum == 0 ) ? &(renderInfo.EyeLeft) : &(renderInfo.EyeRight); + + float eye_relief = pHmdEyeConfig->ReliefInMeters; + LensConfig distortionConfig = GenerateLensConfigFromEyeRelief ( eye_relief, renderInfo, distortionType ); + pHmdEyeConfig->Distortion = distortionConfig; + } + + return renderInfo; +} + + +LensConfig GenerateLensConfigFromEyeRelief ( float eyeReliefInMeters, HmdRenderInfo const &hmd, DistortionEqnType distortionType /*= Distortion_CatmullRom10*/ ) +{ + struct DistortionDescriptor + { + float EyeRelief; + // The three places we're going to sample & lerp the curve at. + // One sample is always at 0.0, and the distortion scale should be 1.0 or else! + // Only use for poly4 numbers - CR has an implicit scale. + float SampleRadius[3]; + // Where the distortion has actually been measured/calibrated out to. + // Don't try to hallucinate data out beyond here. + float MaxRadius; + // The config itself. + LensConfig Config; + }; + + DistortionDescriptor distortions[10]; + for ( int i = 0; i < sizeof(distortions)/sizeof(distortions[0]); i++ ) + { + distortions[i].Config.SetToIdentity(); + distortions[i].EyeRelief = 0.0f; + distortions[i].MaxRadius = 1.0f; + } + int numDistortions = 0; + int defaultDistortion = 0; // index of the default distortion curve to use if zero eye relief supplied + + if ( ( hmd.EyeCups == EyeCup_DK1A ) || + ( hmd.EyeCups == EyeCup_DK1B ) || + ( hmd.EyeCups == EyeCup_DK1C ) ) + { + + numDistortions = 0; + + // Tuned at minimum dial setting - extended to r^2 == 1.8 + distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; + distortions[numDistortions].EyeRelief = 0.012760465f - 0.005f; + distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; + distortions[numDistortions].Config.K[0] = 1.0000f; + distortions[numDistortions].Config.K[1] = 1.06505f; + distortions[numDistortions].Config.K[2] = 1.14725f; + distortions[numDistortions].Config.K[3] = 1.2705f; + distortions[numDistortions].Config.K[4] = 1.48f; + distortions[numDistortions].Config.K[5] = 1.87f; + distortions[numDistortions].Config.K[6] = 2.534f; + distortions[numDistortions].Config.K[7] = 3.6f; + distortions[numDistortions].Config.K[8] = 5.1f; + distortions[numDistortions].Config.K[9] = 7.4f; + distortions[numDistortions].Config.K[10] = 11.0f; + distortions[numDistortions].SampleRadius[0] = 0.222717149f; + distortions[numDistortions].SampleRadius[1] = 0.512249443f; + distortions[numDistortions].SampleRadius[2] = 0.712694878f; + distortions[numDistortions].MaxRadius = sqrt(1.8f); + defaultDistortion = numDistortions; // this is the default + numDistortions++; + + // Tuned at middle dial setting + distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; + distortions[numDistortions].EyeRelief = 0.012760465f; // my average eye-relief + distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; + distortions[numDistortions].Config.K[0] = 1.0f; + distortions[numDistortions].Config.K[1] = 1.032407264f; + distortions[numDistortions].Config.K[2] = 1.07160462f; + distortions[numDistortions].Config.K[3] = 1.11998388f; + distortions[numDistortions].Config.K[4] = 1.1808606f; + distortions[numDistortions].Config.K[5] = 1.2590494f; + distortions[numDistortions].Config.K[6] = 1.361915f; + distortions[numDistortions].Config.K[7] = 1.5014339f; + distortions[numDistortions].Config.K[8] = 1.6986004f; + distortions[numDistortions].Config.K[9] = 1.9940577f; + distortions[numDistortions].Config.K[10] = 2.4783147f; + distortions[numDistortions].SampleRadius[0] = 0.222717149f; + distortions[numDistortions].SampleRadius[1] = 0.512249443f; + distortions[numDistortions].SampleRadius[2] = 0.712694878f; + distortions[numDistortions].MaxRadius = 1.0f; + numDistortions++; + + // Tuned at maximum dial setting + distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; + distortions[numDistortions].EyeRelief = 0.012760465f + 0.005f; + distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; + distortions[numDistortions].Config.K[0] = 1.0102f; + distortions[numDistortions].Config.K[1] = 1.0371f; + distortions[numDistortions].Config.K[2] = 1.0831f; + distortions[numDistortions].Config.K[3] = 1.1353f; + distortions[numDistortions].Config.K[4] = 1.2f; + distortions[numDistortions].Config.K[5] = 1.2851f; + distortions[numDistortions].Config.K[6] = 1.3979f; + distortions[numDistortions].Config.K[7] = 1.56f; + distortions[numDistortions].Config.K[8] = 1.8f; + distortions[numDistortions].Config.K[9] = 2.25f; + distortions[numDistortions].Config.K[10] = 3.0f; + distortions[numDistortions].SampleRadius[0] = 0.222717149f; + distortions[numDistortions].SampleRadius[1] = 0.512249443f; + distortions[numDistortions].SampleRadius[2] = 0.712694878f; + distortions[numDistortions].MaxRadius = 1.0f; + numDistortions++; + + // Chromatic aberration doesn't seem to change with eye relief. + for ( int i = 0; i < numDistortions; i++ ) + { + distortions[i].Config.ChromaticAberration[0] = -0.006f; + distortions[i].Config.ChromaticAberration[1] = 0.0f; + distortions[i].Config.ChromaticAberration[2] = 0.014f; + distortions[i].Config.ChromaticAberration[3] = 0.0f; + } + } + else if ( hmd.EyeCups == EyeCup_DKHD2A ) + { + // Tuned DKHD2 lens + numDistortions = 0; + + distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; + distortions[numDistortions].EyeRelief = 0.010f; + distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; + distortions[numDistortions].Config.K[0] = 1.0f; + distortions[numDistortions].Config.K[1] = 1.0425f; + distortions[numDistortions].Config.K[2] = 1.0826f; + distortions[numDistortions].Config.K[3] = 1.130f; + distortions[numDistortions].Config.K[4] = 1.185f; + distortions[numDistortions].Config.K[5] = 1.250f; + distortions[numDistortions].Config.K[6] = 1.338f; + distortions[numDistortions].Config.K[7] = 1.455f; + distortions[numDistortions].Config.K[8] = 1.620f; + distortions[numDistortions].Config.K[9] = 1.840f; + distortions[numDistortions].Config.K[10] = 2.200f; + distortions[numDistortions].SampleRadius[0] = 0.222717149f; + distortions[numDistortions].SampleRadius[1] = 0.512249443f; + distortions[numDistortions].SampleRadius[2] = 0.712694878f; + distortions[numDistortions].MaxRadius = 1.0f; + + distortions[numDistortions].SampleRadius[0] = 0.405405405f; + distortions[numDistortions].SampleRadius[1] = 0.675675676f; + distortions[numDistortions].SampleRadius[2] = 0.945945946f; + defaultDistortion = numDistortions; // this is the default + numDistortions++; + + distortions[numDistortions] = distortions[0]; + distortions[numDistortions].EyeRelief = 0.020f; + numDistortions++; + + // Chromatic aberration doesn't seem to change with eye relief. + for ( int i = 0; i < numDistortions; i++ ) + { + distortions[i].Config.ChromaticAberration[0] = -0.006f; + distortions[i].Config.ChromaticAberration[1] = 0.0f; + distortions[i].Config.ChromaticAberration[2] = 0.014f; + distortions[i].Config.ChromaticAberration[3] = 0.0f; + } + } + else if ( hmd.EyeCups == EyeCup_PinkA || hmd.EyeCups == EyeCup_DK2A ) + { + // Tuned Crystal Cove & DK2 Lens (CES & GDC) + numDistortions = 0; + + distortions[numDistortions].EyeRelief = 0.010f; + distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.036f; + + distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; + distortions[numDistortions].Config.K[0] = 1.003f; + distortions[numDistortions].Config.K[1] = 1.02f; + distortions[numDistortions].Config.K[2] = 1.042f; + distortions[numDistortions].Config.K[3] = 1.066f; + distortions[numDistortions].Config.K[4] = 1.094f; //1.0945f; + distortions[numDistortions].Config.K[5] = 1.126f; //1.127f; + distortions[numDistortions].Config.K[6] = 1.162f; //1.167f; + distortions[numDistortions].Config.K[7] = 1.203f; //1.218f; + distortions[numDistortions].Config.K[8] = 1.25f; //1.283f; + distortions[numDistortions].Config.K[9] = 1.31f; //1.37f; + distortions[numDistortions].Config.K[10] = 1.38f; //1.48f; + distortions[numDistortions].MaxRadius = 1.0f; + + + distortions[numDistortions].SampleRadius[0] = 0.405405405f; + distortions[numDistortions].SampleRadius[1] = 0.675675676f; + distortions[numDistortions].SampleRadius[2] = 0.945945946f; + defaultDistortion = numDistortions; // this is the default + numDistortions++; + + distortions[numDistortions] = distortions[0]; + distortions[numDistortions].EyeRelief = 0.020f; + numDistortions++; + + // Chromatic aberration doesn't seem to change with eye relief. + for ( int i = 0; i < numDistortions; i++ ) + { + distortions[i].Config.ChromaticAberration[0] = -0.015f; + distortions[i].Config.ChromaticAberration[1] = -0.02f; + distortions[i].Config.ChromaticAberration[2] = 0.025f; + distortions[i].Config.ChromaticAberration[3] = 0.02f; + } + } + else + { + // Unknown lens. + // Use DK1 black lens settings, just so we can continue to run with something. + distortions[0].EyeRelief = 0.005f; + distortions[0].Config.MetersPerTanAngleAtCenter = 0.043875f; + distortions[0].Config.Eqn = Distortion_RecipPoly4; + distortions[0].Config.K[0] = 1.0f; + distortions[0].Config.K[1] = -0.3999f; + distortions[0].Config.K[2] = 0.2408f; + distortions[0].Config.K[3] = -0.4589f; + distortions[0].SampleRadius[0] = 0.2f; + distortions[0].SampleRadius[1] = 0.4f; + distortions[0].SampleRadius[2] = 0.6f; + + distortions[1] = distortions[0]; + distortions[1].EyeRelief = 0.010f; + numDistortions = 2; + + // Chromatic aberration doesn't seem to change with eye relief. + for ( int i = 0; i < numDistortions; i++ ) + { + // These are placeholder, they have not been tuned! + distortions[i].Config.ChromaticAberration[0] = 0.0f; + distortions[i].Config.ChromaticAberration[1] = 0.0f; + distortions[i].Config.ChromaticAberration[2] = 0.0f; + distortions[i].Config.ChromaticAberration[3] = 0.0f; + } + } + + OVR_ASSERT ( numDistortions < (sizeof(distortions)/sizeof(distortions[0])) ); + + + DistortionDescriptor *pUpper = NULL; + DistortionDescriptor *pLower = NULL; + float lerpVal = 0.0f; + if (eyeReliefInMeters == 0) + { // Use a constant default distortion if an invalid eye-relief is supplied + pLower = &(distortions[defaultDistortion]); + pUpper = &(distortions[defaultDistortion]); + lerpVal = 0.0f; + } + else + { + for ( int i = 0; i < numDistortions-1; i++ ) + { + OVR_ASSERT ( distortions[i].EyeRelief < distortions[i+1].EyeRelief ); + if ( ( distortions[i].EyeRelief <= eyeReliefInMeters ) && ( distortions[i+1].EyeRelief > eyeReliefInMeters ) ) + { + pLower = &(distortions[i]); + pUpper = &(distortions[i+1]); + lerpVal = ( eyeReliefInMeters - pLower->EyeRelief ) / ( pUpper->EyeRelief - pLower->EyeRelief ); + // No break here - I want the ASSERT to check everything every time! + } + } + } + + if ( pUpper == NULL ) + { +#if 0 + // Outside the range, so extrapolate rather than interpolate. + if ( distortions[0].EyeRelief > eyeReliefInMeters ) + { + pLower = &(distortions[0]); + pUpper = &(distortions[1]); + } + else + { + OVR_ASSERT ( distortions[numDistortions-1].EyeRelief <= eyeReliefInMeters ); + pLower = &(distortions[numDistortions-2]); + pUpper = &(distortions[numDistortions-1]); + } + lerpVal = ( eyeReliefInMeters - pLower->EyeRelief ) / ( pUpper->EyeRelief - pLower->EyeRelief ); +#else + // Do not extrapolate, just clamp - slightly worried about people putting in bogus settings. + if ( distortions[0].EyeRelief > eyeReliefInMeters ) + { + pLower = &(distortions[0]); + pUpper = &(distortions[0]); + } + else + { + OVR_ASSERT ( distortions[numDistortions-1].EyeRelief <= eyeReliefInMeters ); + pLower = &(distortions[numDistortions-1]); + pUpper = &(distortions[numDistortions-1]); + } + lerpVal = 0.0f; +#endif + } + float invLerpVal = 1.0f - lerpVal; + + pLower->Config.MaxR = pLower->MaxRadius; + pUpper->Config.MaxR = pUpper->MaxRadius; + + LensConfig result; + // Where is the edge of the lens - no point modelling further than this. + float maxValidRadius = invLerpVal * pLower->MaxRadius + lerpVal * pUpper->MaxRadius; + result.MaxR = maxValidRadius; + + switch ( distortionType ) + { + case Distortion_Poly4: + // Deprecated + OVR_ASSERT ( false ); + break; + case Distortion_RecipPoly4:{ + // Lerp control points and fit an equation to them. + float fitX[4]; + float fitY[4]; + fitX[0] = 0.0f; + fitY[0] = 1.0f; + for ( int ctrlPt = 1; ctrlPt < 4; ctrlPt ++ ) + { + float radiusLerp = invLerpVal * pLower->SampleRadius[ctrlPt-1] + lerpVal * pUpper->SampleRadius[ctrlPt-1]; + float radiusLerpSq = radiusLerp * radiusLerp; + float fitYLower = pLower->Config.DistortionFnScaleRadiusSquared ( radiusLerpSq ); + float fitYUpper = pUpper->Config.DistortionFnScaleRadiusSquared ( radiusLerpSq ); + fitX[ctrlPt] = radiusLerpSq; + fitY[ctrlPt] = 1.0f / ( invLerpVal * fitYLower + lerpVal * fitYUpper ); + } + + result.Eqn = Distortion_RecipPoly4; + bool bSuccess = FitCubicPolynomial ( result.K, fitX, fitY ); + OVR_ASSERT ( bSuccess ); + OVR_UNUSED ( bSuccess ); + + // Set up the fast inverse. + float maxRDist = result.DistortionFn ( maxValidRadius ); + result.MaxInvR = maxRDist; + result.SetUpInverseApprox(); + + }break; + + case Distortion_CatmullRom10:{ + + // Evenly sample & lerp points on the curve. + const int NumSegments = LensConfig::NumCoefficients; + result.MaxR = maxValidRadius; + // Directly interpolate the K0 values + result.K[0] = invLerpVal * pLower->Config.K[0] + lerpVal * pUpper->Config.K[0]; + + // Sample and interpolate the distortion curves to derive K[1] ... K[n] + for ( int ctrlPt = 1; ctrlPt < NumSegments; ctrlPt++ ) + { + float radiusSq = ( (float)ctrlPt / (float)(NumSegments-1) ) * maxValidRadius * maxValidRadius; + float fitYLower = pLower->Config.DistortionFnScaleRadiusSquared ( radiusSq ); + float fitYUpper = pUpper->Config.DistortionFnScaleRadiusSquared ( radiusSq ); + float fitLerp = invLerpVal * fitYLower + lerpVal * fitYUpper; + result.K[ctrlPt] = fitLerp; + } + + result.Eqn = Distortion_CatmullRom10; + + for ( int ctrlPt = 1; ctrlPt < NumSegments; ctrlPt++ ) + { + float radiusSq = ( (float)ctrlPt / (float)(NumSegments-1) ) * maxValidRadius * maxValidRadius; + float val = result.DistortionFnScaleRadiusSquared ( radiusSq ); + OVR_ASSERT ( Alg::Abs ( val - result.K[ctrlPt] ) < 0.0001f ); + OVR_UNUSED1(val); // For release build. + } + + // Set up the fast inverse. + float maxRDist = result.DistortionFn ( maxValidRadius ); + result.MaxInvR = maxRDist; + result.SetUpInverseApprox(); + + }break; + + default: OVR_ASSERT ( false ); break; + } + + + // Chromatic aberration. + result.ChromaticAberration[0] = invLerpVal * pLower->Config.ChromaticAberration[0] + lerpVal * pUpper->Config.ChromaticAberration[0]; + result.ChromaticAberration[1] = invLerpVal * pLower->Config.ChromaticAberration[1] + lerpVal * pUpper->Config.ChromaticAberration[1]; + result.ChromaticAberration[2] = invLerpVal * pLower->Config.ChromaticAberration[2] + lerpVal * pUpper->Config.ChromaticAberration[2]; + result.ChromaticAberration[3] = invLerpVal * pLower->Config.ChromaticAberration[3] + lerpVal * pUpper->Config.ChromaticAberration[3]; + + // Scale. + result.MetersPerTanAngleAtCenter = pLower->Config.MetersPerTanAngleAtCenter * invLerpVal + + pUpper->Config.MetersPerTanAngleAtCenter * lerpVal; + /* + // Commented out - Causes ASSERT with no HMD plugged in +#ifdef OVR_BUILD_DEBUG + if ( distortionType == Distortion_CatmullRom10 ) + { + TestSaveLoadLensConfig ( result ); + } +#endif + */ + return result; +} + + + + + +DistortionRenderDesc CalculateDistortionRenderDesc ( StereoEye eyeType, HmdRenderInfo const &hmd, + const LensConfig *pLensOverride /*= NULL */ ) +{ + // From eye relief, IPD and device characteristics, we get the distortion mapping. + // This distortion does the following things: + // 1. It undoes the distortion that happens at the edges of the lens. + // 2. It maps the undistorted field into "retina" space. + // So the input is a pixel coordinate - the physical pixel on the display itself. + // The output is the real-world direction of the ray from this pixel as it comes out of the lens and hits the eye. + // However we typically think of rays "coming from" the eye, so the direction (TanAngleX,TanAngleY,1) is the direction + // that the pixel appears to be in real-world space, where AngleX and AngleY are relative to the straight-ahead vector. + // If your renderer is a raytracer, you can use this vector directly (normalize as appropriate). + // However in standard rasterisers, we have rendered a 2D image and are putting it in front of the eye, + // so we then need a mapping from this space to the [-1,1] UV coordinate space, which depends on exactly + // where "in space" the app wants to put that rendertarget. + // Where in space, and how large this rendertarget is, is completely up to the app and/or user, + // though of course we can provide some useful hints. + + // TODO: Use IPD and eye relief to modify distortion (i.e. non-radial component) + // TODO: cope with lenses that don't produce collimated light. + // This means that IPD relative to the lens separation changes the light vergence, + // and so we actually need to change where the image is displayed. + + const HmdRenderInfo::EyeConfig &hmdEyeConfig = ( eyeType == StereoEye_Left ) ? hmd.EyeLeft : hmd.EyeRight; + + DistortionRenderDesc localDistortion; + localDistortion.Lens = hmdEyeConfig.Distortion; + + if ( pLensOverride != NULL ) + { + localDistortion.Lens = *pLensOverride; + } + + Sizef pixelsPerMeter(hmd.ResolutionInPixels.w / ( hmd.ScreenSizeInMeters.w - hmd.ScreenGapSizeInMeters ), + hmd.ResolutionInPixels.h / hmd.ScreenSizeInMeters.h); + + localDistortion.PixelsPerTanAngleAtCenter = (pixelsPerMeter * localDistortion.Lens.MetersPerTanAngleAtCenter).ToVector(); + // Same thing, scaled to [-1,1] for each eye, rather than pixels. + + localDistortion.TanEyeAngleScale = Vector2f(0.25f, 0.5f).EntrywiseMultiply( + (hmd.ScreenSizeInMeters / localDistortion.Lens.MetersPerTanAngleAtCenter).ToVector()); + + // <--------------left eye------------------><-ScreenGapSizeInMeters-><--------------right eye-----------------> + // <------------------------------------------ScreenSizeInMeters.Width-----------------------------------------> + // <----------------LensSeparationInMeters---------------> + // <--centerFromLeftInMeters-> + // ^ + // Center of lens + + // Find the lens centers in scale of [-1,+1] (NDC) in left eye. + float visibleWidthOfOneEye = 0.5f * ( hmd.ScreenSizeInMeters.w - hmd.ScreenGapSizeInMeters ); + float centerFromLeftInMeters = ( hmd.ScreenSizeInMeters.w - hmd.LensSeparationInMeters ) * 0.5f; + localDistortion.LensCenter.x = ( centerFromLeftInMeters / visibleWidthOfOneEye ) * 2.0f - 1.0f; + localDistortion.LensCenter.y = ( hmd.CenterFromTopInMeters / hmd.ScreenSizeInMeters.h ) * 2.0f - 1.0f; + if ( eyeType == StereoEye_Right ) + { + localDistortion.LensCenter.x = -localDistortion.LensCenter.x; + } + + return localDistortion; +} + +FovPort CalculateFovFromEyePosition ( float eyeReliefInMeters, + float offsetToRightInMeters, + float offsetDownwardsInMeters, + float lensDiameterInMeters, + float extraEyeRotationInRadians /*= 0.0f*/ ) +{ + // 2D view of things: + // |-| <--- offsetToRightInMeters (in this case, it is negative) + // |=======C=======| <--- lens surface (C=center) + // \ | _/ + // \ R _/ + // \ | _/ + // \ | _/ + // \|/ + // O <--- center of pupil + + // (technically the lens is round rather than square, so it's not correct to + // separate vertical and horizontal like this, but it's close enough) + float halfLensDiameter = lensDiameterInMeters * 0.5f; + FovPort fovPort; + fovPort.UpTan = ( halfLensDiameter + offsetDownwardsInMeters ) / eyeReliefInMeters; + fovPort.DownTan = ( halfLensDiameter - offsetDownwardsInMeters ) / eyeReliefInMeters; + fovPort.LeftTan = ( halfLensDiameter + offsetToRightInMeters ) / eyeReliefInMeters; + fovPort.RightTan = ( halfLensDiameter - offsetToRightInMeters ) / eyeReliefInMeters; + + if ( extraEyeRotationInRadians > 0.0f ) + { + // That's the basic looking-straight-ahead eye position relative to the lens. + // But if you look left, the pupil moves left as the eyeball rotates, which + // means you can see more to the right than this geometry suggests. + // So add in the bounds for the extra movement of the pupil. + + // Beyond 30 degrees does not increase FOV because the pupil starts moving backwards more than sideways. + extraEyeRotationInRadians = Alg::Min ( DegreeToRad ( 30.0f ), Alg::Max ( 0.0f, extraEyeRotationInRadians ) ); + + // The rotation of the eye is a bit more complex than a simple circle. The center of rotation + // at 13.5mm from cornea is slightly further back than the actual center of the eye. + // Additionally the rotation contains a small lateral component as the muscles pull the eye + const float eyeballCenterToPupil = 0.0135f; // center of eye rotation + const float eyeballLateralPull = 0.001f * (extraEyeRotationInRadians / DegreeToRad ( 30.0f)); // lateral motion as linear function + float extraTranslation = eyeballCenterToPupil * sinf ( extraEyeRotationInRadians ) + eyeballLateralPull; + float extraRelief = eyeballCenterToPupil * ( 1.0f - cosf ( extraEyeRotationInRadians ) ); + + fovPort.UpTan = Alg::Max ( fovPort.UpTan , ( halfLensDiameter + offsetDownwardsInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); + fovPort.DownTan = Alg::Max ( fovPort.DownTan , ( halfLensDiameter - offsetDownwardsInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); + fovPort.LeftTan = Alg::Max ( fovPort.LeftTan , ( halfLensDiameter + offsetToRightInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); + fovPort.RightTan = Alg::Max ( fovPort.RightTan, ( halfLensDiameter - offsetToRightInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); + } + + return fovPort; +} + + + +FovPort CalculateFovFromHmdInfo ( StereoEye eyeType, + DistortionRenderDesc const &distortion, + HmdRenderInfo const &hmd, + float extraEyeRotationInRadians /*= 0.0f*/ ) +{ + FovPort fovPort; + float eyeReliefInMeters; + float offsetToRightInMeters; + if ( eyeType == StereoEye_Right ) + { + eyeReliefInMeters = hmd.EyeRight.ReliefInMeters; + offsetToRightInMeters = hmd.EyeRight.NoseToPupilInMeters - 0.5f * hmd.LensSeparationInMeters; + } + else + { + eyeReliefInMeters = hmd.EyeLeft.ReliefInMeters; + offsetToRightInMeters = -(hmd.EyeLeft.NoseToPupilInMeters - 0.5f * hmd.LensSeparationInMeters); + } + + // Central view. + fovPort = CalculateFovFromEyePosition ( eyeReliefInMeters, + offsetToRightInMeters, + 0.0f, + hmd.LensDiameterInMeters, + extraEyeRotationInRadians ); + + // clamp to the screen + fovPort = ClampToPhysicalScreenFov ( eyeType, distortion, fovPort ); + + return fovPort; +} + + + +FovPort GetPhysicalScreenFov ( StereoEye eyeType, DistortionRenderDesc const &distortion ) +{ + OVR_UNUSED1 ( eyeType ); + + FovPort resultFovPort; + + // Figure out the boundaries of the screen. We take the middle pixel of the screen, + // move to each of the four screen edges, and transform those back into TanAngle space. + Vector2f dmiddle = distortion.LensCenter; + + // The gotcha is that for some distortion functions, the map will "wrap around" + // for screen pixels that are not actually visible to the user (especially on DK1, + // which has a lot of invisible pixels), and map to pixels that are close to the middle. + // This means the edges of the screen will actually be + // "closer" than the visible bounds, so we'll clip too aggressively. + + // Solution - step gradually towards the boundary, noting the maximum distance. + struct FunctionHider + { + static FovPort FindRange ( Vector2f from, Vector2f to, int numSteps, + DistortionRenderDesc const &distortion ) + { + FovPort result; + result.UpTan = 0.0f; + result.DownTan = 0.0f; + result.LeftTan = 0.0f; + result.RightTan = 0.0f; + + float stepScale = 1.0f / ( numSteps - 1 ); + for ( int step = 0; step < numSteps; step++ ) + { + float lerpFactor = stepScale * (float)step; + Vector2f sample = from + (to - from) * lerpFactor; + Vector2f tanEyeAngle = TransformScreenNDCToTanFovSpace ( distortion, sample ); + + result.LeftTan = Alg::Max ( result.LeftTan, -tanEyeAngle.x ); + result.RightTan = Alg::Max ( result.RightTan, tanEyeAngle.x ); + result.UpTan = Alg::Max ( result.UpTan, -tanEyeAngle.y ); + result.DownTan = Alg::Max ( result.DownTan, tanEyeAngle.y ); + } + return result; + } + }; + + FovPort leftFovPort = FunctionHider::FindRange( dmiddle, Vector2f( -1.0f, dmiddle.y ), 10, distortion ); + FovPort rightFovPort = FunctionHider::FindRange( dmiddle, Vector2f( 1.0f, dmiddle.y ), 10, distortion ); + FovPort upFovPort = FunctionHider::FindRange( dmiddle, Vector2f( dmiddle.x, -1.0f ), 10, distortion ); + FovPort downFovPort = FunctionHider::FindRange( dmiddle, Vector2f( dmiddle.x, 1.0f ), 10, distortion ); + + resultFovPort.LeftTan = leftFovPort.LeftTan; + resultFovPort.RightTan = rightFovPort.RightTan; + resultFovPort.UpTan = upFovPort.UpTan; + resultFovPort.DownTan = downFovPort.DownTan; + + return resultFovPort; +} + +FovPort ClampToPhysicalScreenFov( StereoEye eyeType, DistortionRenderDesc const &distortion, + FovPort inputFovPort ) +{ + FovPort resultFovPort; + FovPort phsyicalFovPort = GetPhysicalScreenFov ( eyeType, distortion ); + resultFovPort.LeftTan = Alg::Min ( inputFovPort.LeftTan, phsyicalFovPort.LeftTan ); + resultFovPort.RightTan = Alg::Min ( inputFovPort.RightTan, phsyicalFovPort.RightTan ); + resultFovPort.UpTan = Alg::Min ( inputFovPort.UpTan, phsyicalFovPort.UpTan ); + resultFovPort.DownTan = Alg::Min ( inputFovPort.DownTan, phsyicalFovPort.DownTan ); + + return resultFovPort; +} + +Sizei CalculateIdealPixelSize ( StereoEye eyeType, DistortionRenderDesc const &distortion, + FovPort tanHalfFov, float pixelsPerDisplayPixel ) +{ + OVR_UNUSED(eyeType); // might be useful in the future if we do overlapping fovs + + Sizei result; + // TODO: if the app passes in a FOV that doesn't cover the centre, use the distortion values for the nearest edge/corner to match pixel size. + result.w = (int)(0.5f + pixelsPerDisplayPixel * distortion.PixelsPerTanAngleAtCenter.x * ( tanHalfFov.LeftTan + tanHalfFov.RightTan ) ); + result.h = (int)(0.5f + pixelsPerDisplayPixel * distortion.PixelsPerTanAngleAtCenter.y * ( tanHalfFov.UpTan + tanHalfFov.DownTan ) ); + return result; +} + +Recti GetFramebufferViewport ( StereoEye eyeType, HmdRenderInfo const &hmd ) +{ + Recti result; + result.w = hmd.ResolutionInPixels.w/2; + result.h = hmd.ResolutionInPixels.h; + result.x = 0; + result.y = 0; + if ( eyeType == StereoEye_Right ) + { + result.x = (hmd.ResolutionInPixels.w+1)/2; // Round up, not down. + } + return result; +} + + +ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort tanHalfFov ) +{ + float projXScale = 2.0f / ( tanHalfFov.LeftTan + tanHalfFov.RightTan ); + float projXOffset = ( tanHalfFov.LeftTan - tanHalfFov.RightTan ) * projXScale * 0.5f; + float projYScale = 2.0f / ( tanHalfFov.UpTan + tanHalfFov.DownTan ); + float projYOffset = ( tanHalfFov.UpTan - tanHalfFov.DownTan ) * projYScale * 0.5f; + + ScaleAndOffset2D result; + result.Scale = Vector2f(projXScale, projYScale); + result.Offset = Vector2f(projXOffset, projYOffset); + // Hey - why is that Y.Offset negated? + // It's because a projection matrix transforms from world coords with Y=up, + // whereas this is from NDC which is Y=down. + + return result; +} + + +ScaleAndOffset2D CreateUVScaleAndOffsetfromNDCScaleandOffset ( ScaleAndOffset2D scaleAndOffsetNDC, + Recti renderedViewport, + Sizei renderTargetSize ) +{ + // scaleAndOffsetNDC takes you to NDC space [-1,+1] within the given viewport on the rendertarget. + // We want a scale to instead go to actual UV coordinates you can sample with, + // which need [0,1] and ignore the viewport. + ScaleAndOffset2D result; + // Scale [-1,1] to [0,1] + result.Scale = scaleAndOffsetNDC.Scale * 0.5f; + result.Offset = scaleAndOffsetNDC.Offset * 0.5f + Vector2f(0.5f); + + // ...but we will have rendered to a subsection of the RT, so scale for that. + Vector2f scale( (float)renderedViewport.w / (float)renderTargetSize.w, + (float)renderedViewport.h / (float)renderTargetSize.h ); + Vector2f offset( (float)renderedViewport.x / (float)renderTargetSize.w, + (float)renderedViewport.y / (float)renderTargetSize.h ); + + result.Scale = result.Scale.EntrywiseMultiply(scale); + result.Offset = result.Offset.EntrywiseMultiply(scale) + offset; + return result; +} + + + +Matrix4f CreateProjection( bool rightHanded, FovPort tanHalfFov, + float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/ ) +{ + // A projection matrix is very like a scaling from NDC, so we can start with that. + ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov ( tanHalfFov ); + + float handednessScale = 1.0f; + if ( rightHanded ) + { + handednessScale = -1.0f; + } + + Matrix4f projection; + // Produces X result, mapping clip edges to [-w,+w] + projection.M[0][0] = scaleAndOffset.Scale.x; + projection.M[0][1] = 0.0f; + projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x; + projection.M[0][3] = 0.0f; + + // Produces Y result, mapping clip edges to [-w,+w] + // Hey - why is that YOffset negated? + // It's because a projection matrix transforms from world coords with Y=up, + // whereas this is derived from an NDC scaling, which is Y=down. + projection.M[1][0] = 0.0f; + projection.M[1][1] = scaleAndOffset.Scale.y; + projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y; + projection.M[1][3] = 0.0f; + + // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants. + // We'll just use some defaults for now. + projection.M[2][0] = 0.0f; + projection.M[2][1] = 0.0f; + projection.M[2][2] = -handednessScale * zFar / (zNear - zFar); + projection.M[2][3] = (zFar * zNear) / (zNear - zFar); + + // Produces W result (= Z in) + projection.M[3][0] = 0.0f; + projection.M[3][1] = 0.0f; + projection.M[3][2] = handednessScale; + projection.M[3][3] = 0.0f; + + return projection; +} + + +Matrix4f CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType, + float tanHalfFovX, float tanHalfFovY, + float unitsX, float unitsY, + float distanceFromCamera, float interpupillaryDistance, + Matrix4f const &projection, + float zNear /*= 0.0f*/, float zFar /*= 0.0f*/ ) +{ + OVR_UNUSED1 ( rightHanded ); + + float orthoHorizontalOffset = interpupillaryDistance * 0.5f / distanceFromCamera; + switch ( eyeType ) + { + case StereoEye_Center: + orthoHorizontalOffset = 0.0f; + break; + case StereoEye_Left: + break; + case StereoEye_Right: + orthoHorizontalOffset = -orthoHorizontalOffset; + break; + default: OVR_ASSERT ( false ); break; + } + + // Current projection maps real-world vector (x,y,1) to the RT. + // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to + // the physical [-orthoHalfFov,orthoHalfFov] + // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means + // we don't have to feed in Z=1 all the time. + // The horizontal offset math is a little hinky because the destination is + // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset] + // So we need to first map [-FovPixels/2,FovPixels/2] to + // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]: + // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset; + // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset; + // But then we need the sam mapping as the existing projection matrix, i.e. + // x2 = x1 * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + Projection.M[0][2]; + // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels + + // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]; + // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels and + // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]. + + float orthoScaleX = 2.0f * tanHalfFovX / unitsX; + float orthoScaleY = 2.0f * tanHalfFovY / unitsY; + Matrix4f ortho; + ortho.M[0][0] = projection.M[0][0] * orthoScaleX; + ortho.M[0][1] = 0.0f; + ortho.M[0][2] = 0.0f; + ortho.M[0][3] = -projection.M[0][2] + ( orthoHorizontalOffset * projection.M[0][0] ); + + ortho.M[1][0] = 0.0f; + ortho.M[1][1] = -projection.M[1][1] * orthoScaleY; // Note sign flip (text rendering uses Y=down). + ortho.M[1][2] = 0.0f; + ortho.M[1][3] = -projection.M[1][2]; + + if ( fabsf ( zNear - zFar ) < 0.001f ) + { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = 0.0f; + ortho.M[2][3] = zFar; + } + else + { + ortho.M[2][0] = 0.0f; + ortho.M[2][1] = 0.0f; + ortho.M[2][2] = zFar / (zNear - zFar); + ortho.M[2][3] = (zFar * zNear) / (zNear - zFar); + } + + // No perspective correction for ortho. + ortho.M[3][0] = 0.0f; + ortho.M[3][1] = 0.0f; + ortho.M[3][2] = 0.0f; + ortho.M[3][3] = 1.0f; + + return ortho; +} + + +//----------------------------------------------------------------------------------- +// A set of "forward-mapping" functions, mapping from framebuffer space to real-world and/or texture space. + +// This mimics the first half of the distortion shader's function. +Vector2f TransformScreenNDCToTanFovSpace( DistortionRenderDesc const &distortion, + const Vector2f &framebufferNDC ) +{ + // Scale to TanHalfFov space, but still distorted. + Vector2f tanEyeAngleDistorted; + tanEyeAngleDistorted.x = ( framebufferNDC.x - distortion.LensCenter.x ) * distortion.TanEyeAngleScale.x; + tanEyeAngleDistorted.y = ( framebufferNDC.y - distortion.LensCenter.y ) * distortion.TanEyeAngleScale.y; + // Distort. + float radiusSquared = ( tanEyeAngleDistorted.x * tanEyeAngleDistorted.x ) + + ( tanEyeAngleDistorted.y * tanEyeAngleDistorted.y ); + float distortionScale = distortion.Lens.DistortionFnScaleRadiusSquared ( radiusSquared ); + Vector2f tanEyeAngle; + tanEyeAngle.x = tanEyeAngleDistorted.x * distortionScale; + tanEyeAngle.y = tanEyeAngleDistorted.y * distortionScale; + + return tanEyeAngle; +} + +// Same, with chromatic aberration correction. +void TransformScreenNDCToTanFovSpaceChroma ( Vector2f *resultR, Vector2f *resultG, Vector2f *resultB, + DistortionRenderDesc const &distortion, + const Vector2f &framebufferNDC ) +{ + // Scale to TanHalfFov space, but still distorted. + Vector2f tanEyeAngleDistorted; + tanEyeAngleDistorted.x = ( framebufferNDC.x - distortion.LensCenter.x ) * distortion.TanEyeAngleScale.x; + tanEyeAngleDistorted.y = ( framebufferNDC.y - distortion.LensCenter.y ) * distortion.TanEyeAngleScale.y; + // Distort. + float radiusSquared = ( tanEyeAngleDistorted.x * tanEyeAngleDistorted.x ) + + ( tanEyeAngleDistorted.y * tanEyeAngleDistorted.y ); + Vector3f distortionScales = distortion.Lens.DistortionFnScaleRadiusSquaredChroma ( radiusSquared ); + *resultR = tanEyeAngleDistorted * distortionScales.x; + *resultG = tanEyeAngleDistorted * distortionScales.y; + *resultB = tanEyeAngleDistorted * distortionScales.z; +} + +// This mimics the second half of the distortion shader's function. +Vector2f TransformTanFovSpaceToRendertargetTexUV( StereoEyeParams const &eyeParams, + Vector2f const &tanEyeAngle ) +{ + Vector2f textureUV; + textureUV.x = tanEyeAngle.x * eyeParams.EyeToSourceUV.Scale.x + eyeParams.EyeToSourceUV.Offset.x; + textureUV.y = tanEyeAngle.y * eyeParams.EyeToSourceUV.Scale.y + eyeParams.EyeToSourceUV.Offset.y; + return textureUV; +} + +Vector2f TransformTanFovSpaceToRendertargetNDC( StereoEyeParams const &eyeParams, + Vector2f const &tanEyeAngle ) +{ + Vector2f textureNDC; + textureNDC.x = tanEyeAngle.x * eyeParams.EyeToSourceNDC.Scale.x + eyeParams.EyeToSourceNDC.Offset.x; + textureNDC.y = tanEyeAngle.y * eyeParams.EyeToSourceNDC.Scale.y + eyeParams.EyeToSourceNDC.Offset.y; + return textureNDC; +} + +Vector2f TransformScreenPixelToScreenNDC( Recti const &distortionViewport, + Vector2f const &pixel ) +{ + // Move to [-1,1] NDC coords. + Vector2f framebufferNDC; + framebufferNDC.x = -1.0f + 2.0f * ( ( pixel.x - (float)distortionViewport.x ) / (float)distortionViewport.w ); + framebufferNDC.y = -1.0f + 2.0f * ( ( pixel.y - (float)distortionViewport.y ) / (float)distortionViewport.h ); + return framebufferNDC; +} + +Vector2f TransformScreenPixelToTanFovSpace( Recti const &distortionViewport, + DistortionRenderDesc const &distortion, + Vector2f const &pixel ) +{ + return TransformScreenNDCToTanFovSpace( distortion, + TransformScreenPixelToScreenNDC( distortionViewport, pixel ) ); +} + +Vector2f TransformScreenNDCToRendertargetTexUV( DistortionRenderDesc const &distortion, + StereoEyeParams const &eyeParams, + Vector2f const &pixel ) +{ + return TransformTanFovSpaceToRendertargetTexUV ( eyeParams, + TransformScreenNDCToTanFovSpace ( distortion, pixel ) ); +} + +Vector2f TransformScreenPixelToRendertargetTexUV( Recti const &distortionViewport, + DistortionRenderDesc const &distortion, + StereoEyeParams const &eyeParams, + Vector2f const &pixel ) +{ + return TransformTanFovSpaceToRendertargetTexUV ( eyeParams, + TransformScreenPixelToTanFovSpace ( distortionViewport, distortion, pixel ) ); +} + + +//----------------------------------------------------------------------------------- +// A set of "reverse-mapping" functions, mapping from real-world and/or texture space back to the framebuffer. + +Vector2f TransformTanFovSpaceToScreenNDC( DistortionRenderDesc const &distortion, + const Vector2f &tanEyeAngle, bool usePolyApprox /*= false*/ ) +{ + float tanEyeAngleRadius = tanEyeAngle.Length(); + float tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverseApprox ( tanEyeAngleRadius ); + if ( !usePolyApprox ) + { + tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverse ( tanEyeAngleRadius ); + } + Vector2f tanEyeAngleDistorted = tanEyeAngle; + if ( tanEyeAngleRadius > 0.0f ) + { + tanEyeAngleDistorted = tanEyeAngle * ( tanEyeAngleDistortedRadius / tanEyeAngleRadius ); + } + + Vector2f framebufferNDC; + framebufferNDC.x = ( tanEyeAngleDistorted.x / distortion.TanEyeAngleScale.x ) + distortion.LensCenter.x; + framebufferNDC.y = ( tanEyeAngleDistorted.y / distortion.TanEyeAngleScale.y ) + distortion.LensCenter.y; + + return framebufferNDC; +} + +Vector2f TransformRendertargetNDCToTanFovSpace( const ScaleAndOffset2D &eyeToSourceNDC, + const Vector2f &textureNDC ) +{ + Vector2f tanEyeAngle = (textureNDC - eyeToSourceNDC.Offset) / eyeToSourceNDC.Scale; + return tanEyeAngle; +} + + + +} //namespace OVR + +//Just want to make a copy disentangled from all these namespaces! +float ExtEvalCatmullRom10Spline ( float const *K, float scaledVal ) +{ + return(OVR::EvalCatmullRom10Spline ( K, scaledVal )); +} + + diff --git a/LibOVR/Src/OVR_Stereo.h b/LibOVR/Src/OVR_Stereo.h new file mode 100644 index 0000000..bd5438d --- /dev/null +++ b/LibOVR/Src/OVR_Stereo.h @@ -0,0 +1,460 @@ +/************************************************************************************ + +PublicHeader: OVR.h +Filename : OVR_Stereo.h +Content : Stereo rendering functions +Created : November 30, 2013 +Authors : Tom Fosyth + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Stereo_h +#define OVR_Stereo_h + +#include "OVR_Device.h" + +// CAPI Forward declaration. +typedef struct ovrFovPort_ ovrFovPort; +typedef struct ovrRecti_ ovrRecti; + +namespace OVR { + +//----------------------------------------------------------------------------------- +// ***** Stereo Enumerations + +// StereoEye specifies which eye we are rendering for; it is used to +// retrieve StereoEyeParams. +enum StereoEye +{ + StereoEye_Center, + StereoEye_Left, + StereoEye_Right +}; + + +//----------------------------------------------------------------------------------- +// ***** FovPort + +// FovPort describes Field Of View (FOV) of a viewport. +// This class has values for up, down, left and right, stored in +// tangent of the angle units to simplify calculations. +// +// As an example, for a standard 90 degree vertical FOV, we would +// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }. +// +// CreateFromRadians/Degrees helper functions can be used to +// access FOV in different units. + +struct FovPort +{ + float UpTan; + float DownTan; + float LeftTan; + float RightTan; + + FovPort ( float sideTan = 0.0f ) : + UpTan(sideTan), DownTan(sideTan), LeftTan(sideTan), RightTan(sideTan) { } + FovPort ( float u, float d, float l, float r ) : + UpTan(u), DownTan(d), LeftTan(l), RightTan(r) { } + + // C-interop support: FovPort <-> ovrFovPort (implementation in OVR_CAPI.cpp). + FovPort (const ovrFovPort &src); + operator const ovrFovPort () const; + + + static FovPort CreateFromRadians(float horizontalFov, float verticalFov) + { + FovPort result; + result.UpTan = tanf ( verticalFov * 0.5f ); + result.DownTan = tanf ( verticalFov * 0.5f ); + result.LeftTan = tanf ( horizontalFov * 0.5f ); + result.RightTan = tanf ( horizontalFov * 0.5f ); + return result; + } + + static FovPort CreateFromDegrees(float horizontalFovDegrees, + float verticalFovDegrees) + { + return CreateFromRadians(DegreeToRad(horizontalFovDegrees), + DegreeToRad(verticalFovDegrees)); + } + + // Get Horizontal/Vertical components of Fov in radians. + float GetVerticalFovRadians() const { return atanf(UpTan) + atanf(DownTan); } + float GetHorizontalFovRadians() const { return atanf(LeftTan) + atanf(RightTan); } + // Get Horizontal/Vertical components of Fov in degrees. + float GetVerticalFovDegrees() const { return RadToDegree(GetVerticalFovRadians()); } + float GetHorizontalFovDegrees() const { return RadToDegree(GetHorizontalFovRadians()); } + + // Compute maximum tangent value among all four sides. + float GetMaxSideTan() const + { + return Alg::Max(Alg::Max(UpTan, DownTan), Alg::Max(LeftTan, RightTan)); + } + + // Converts Fov Tan angle units to [-1,1] render target NDC space + Vector2f TanAngleToRendertargetNDC(Vector2f const &tanEyeAngle); + + + // Compute per-channel minimum and maximum of Fov. + static FovPort Min(const FovPort& a, const FovPort& b) + { + FovPort fov( Alg::Min( a.UpTan , b.UpTan ), + Alg::Min( a.DownTan , b.DownTan ), + Alg::Min( a.LeftTan , b.LeftTan ), + Alg::Min( a.RightTan, b.RightTan ) ); + return fov; + } + + static FovPort Max(const FovPort& a, const FovPort& b) + { + FovPort fov( Alg::Max( a.UpTan , b.UpTan ), + Alg::Max( a.DownTan , b.DownTan ), + Alg::Max( a.LeftTan , b.LeftTan ), + Alg::Max( a.RightTan, b.RightTan ) ); + return fov; + } +}; + + + +//----------------------------------------------------------------------------------- +// ***** ScaleAndOffset + +struct ScaleAndOffset2D +{ + Vector2f Scale; + Vector2f Offset; + + ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f) + : Scale(sx, sy), Offset(ox, oy) + { } +}; + + +//----------------------------------------------------------------------------------- +// ***** Misc. utility functions. + +// Inputs are 4 points (pFitX[0],pFitY[0]) through (pFitX[3],pFitY[3]) +// Result is four coefficients in pResults[0] through pResults[3] such that +// y = pResult[0] + x * ( pResult[1] + x * ( pResult[2] + x * ( pResult[3] ) ) ); +// passes through all four input points. +// Return is true if it succeeded, false if it failed (because two control points +// have the same pFitX value). +bool FitCubicPolynomial ( float *pResult, const float *pFitX, const float *pFitY ); + +//----------------------------------------------------------------------------------- +// ***** LensConfig + +// LensConfig describes the configuration of a single lens in an HMD. +// - Eqn and K[] describe a distortion function. +// - MetersPerTanAngleAtCenter is the relationship between distance on a +// screen (at the center of the lens), and the angle variance of the light after it +// has passed through the lens. +// - ChromaticAberration is an array of parameters for controlling +// additional Red and Blue scaling in order to reduce chromatic aberration +// caused by the Rift lenses. +struct LensConfig +{ + // The result is a scaling applied to the distance from the center of the lens. + float DistortionFnScaleRadiusSquared (float rsq) const; + // x,y,z components map to r,g,b scales. + Vector3f DistortionFnScaleRadiusSquaredChroma (float rsq) const; + + // DistortionFn applies distortion to the argument. + // Input: the distance in TanAngle/NIC space from the optical center to the input pixel. + // Output: the resulting distance after distortion. + float DistortionFn(float r) const + { + return r * DistortionFnScaleRadiusSquared ( r * r ); + } + + // DistortionFnInverse computes the inverse of the distortion function on an argument. + float DistortionFnInverse(float r) const; + + // Also computes the inverse, but using a polynomial approximation. Warning - it's just an approximation! + float DistortionFnInverseApprox(float r) const; + // Sets up InvK[]. + void SetUpInverseApprox(); + + // Sets a bunch of sensible defaults. + void SetToIdentity(); + + + + enum { NumCoefficients = 11 }; + + DistortionEqnType Eqn; + float K[NumCoefficients]; + float MaxR; // The highest R you're going to query for - the curve is unpredictable beyond it. + + float MetersPerTanAngleAtCenter; + + // Additional per-channel scaling is applied after distortion: + // Index [0] - Red channel constant coefficient. + // Index [1] - Red channel r^2 coefficient. + // Index [2] - Blue channel constant coefficient. + // Index [3] - Blue channel r^2 coefficient. + float ChromaticAberration[4]; + + float InvK[NumCoefficients]; + float MaxInvR; +}; + + +// For internal use - storing and loading lens config data + +// Returns true on success. +bool LoadLensConfig ( LensConfig *presult, UByte const *pbuffer, int bufferSizeInBytes ); + +// Returns number of bytes needed. +int SaveLensConfigSizeInBytes ( LensConfig const &config ); +// Returns true on success. +bool SaveLensConfig ( UByte *pbuffer, int bufferSizeInBytes, LensConfig const &config ); + + +//----------------------------------------------------------------------------------- +// ***** DistortionRenderDesc + +// This describes distortion for a single eye in an HMD with a display, not just the lens by itself. +struct DistortionRenderDesc +{ + // The raw lens values. + LensConfig Lens; + + // These map from [-1,1] across the eye being rendered into TanEyeAngle space (but still distorted) + Vector2f LensCenter; + Vector2f TanEyeAngleScale; + // Computed from device characteristics, IPD and eye-relief. + // (not directly used for rendering, but very useful) + Vector2f PixelsPerTanAngleAtCenter; +}; + + + +//----------------------------------------------------------------------------------- +// ***** HmdRenderInfo + +// All the parts of the HMD info that are needed to set up the rendering system. + +struct HmdRenderInfo +{ + // The start of this sturucture is intentionally very similar to HMDInfo in OVER_Device.h + // However to reduce interdependencies, one does not simply #include the other. + + HmdTypeEnum HmdType; + + // Size of the entire screen + Size<int> ResolutionInPixels; + Size<float> ScreenSizeInMeters; + float ScreenGapSizeInMeters; + + // Characteristics of the lenses. + float CenterFromTopInMeters; + float LensSeparationInMeters; + float LensDiameterInMeters; + float LensSurfaceToMidplateInMeters; + EyeCupType EyeCups; + + // Timing & shutter data. All values in seconds. + struct ShutterInfo + { + HmdShutterTypeEnum Type; + float VsyncToNextVsync; // 1/framerate + float VsyncToFirstScanline; // for global shutter, vsync->shutter open. + float FirstScanlineToLastScanline; // for global shutter, will be zero. + float PixelSettleTime; // estimated. + float PixelPersistence; // Full persistence = 1/framerate. + } Shutter; + + + // These are all set from the user's profile. + struct EyeConfig + { + // Distance from center of eyeball to front plane of lens. + float ReliefInMeters; + // Distance from nose (technically, center of Rift) to the middle of the eye. + float NoseToPupilInMeters; + + LensConfig Distortion; + } EyeLeft, EyeRight; + + + HmdRenderInfo() + { + HmdType = HmdType_None; + ResolutionInPixels.w = 0; + ResolutionInPixels.h = 0; + ScreenSizeInMeters.w = 0.0f; + ScreenSizeInMeters.h = 0.0f; + ScreenGapSizeInMeters = 0.0f; + CenterFromTopInMeters = 0.0f; + LensSeparationInMeters = 0.0f; + LensDiameterInMeters = 0.0f; + LensSurfaceToMidplateInMeters = 0.0f; + Shutter.Type = HmdShutter_LAST; + Shutter.VsyncToNextVsync = 0.0f; + Shutter.VsyncToFirstScanline = 0.0f; + Shutter.FirstScanlineToLastScanline = 0.0f; + Shutter.PixelSettleTime = 0.0f; + Shutter.PixelPersistence = 0.0f; + EyeCups = EyeCup_DK1A; + EyeLeft.ReliefInMeters = 0.0f; + EyeLeft.NoseToPupilInMeters = 0.0f; + EyeLeft.Distortion.SetToIdentity(); + EyeRight = EyeLeft; + } + + // The "center eye" is the position the HMD tracking returns, + // and games will also usually use it for audio, aiming reticles, some line-of-sight tests, etc. + EyeConfig GetEyeCenter() const + { + EyeConfig result; + result.ReliefInMeters = 0.5f * ( EyeLeft.ReliefInMeters + EyeRight.ReliefInMeters ); + result.NoseToPupilInMeters = 0.0f; + result.Distortion.SetToIdentity(); + return result; + } + +}; + + + + +//----------------------------------------------------------------------------------- + +// Stateless computation functions, in somewhat recommended execution order. +// For examples on how to use many of them, see the StereoConfig::UpdateComputedState function. + +const float OVR_DEFAULT_EXTRA_EYE_ROTATION = 30.0f * Math<float>::DegreeToRadFactor; + +// Creates a dummy debug HMDInfo matching a particular HMD model. +// Useful for development without an actual HMD attached. +HMDInfo CreateDebugHMDInfo(HmdTypeEnum hmdType); + + +// profile may be NULL, in which case it uses the hard-coded defaults. +// distortionType should be left at the default unless you require something specific for your distortion shaders. +// eyeCupOverride can be EyeCup_LAST, in which case it uses the one in the profile. +HmdRenderInfo GenerateHmdRenderInfoFromHmdInfo ( HMDInfo const &hmdInfo, + Profile const *profile = NULL, + DistortionEqnType distortionType = Distortion_CatmullRom10, + EyeCupType eyeCupOverride = EyeCup_LAST ); + +LensConfig GenerateLensConfigFromEyeRelief ( float eyeReliefInMeters, HmdRenderInfo const &hmd, + DistortionEqnType distortionType = Distortion_CatmullRom10 ); + +DistortionRenderDesc CalculateDistortionRenderDesc ( StereoEye eyeType, HmdRenderInfo const &hmd, + LensConfig const *pLensOverride = NULL ); + +FovPort CalculateFovFromEyePosition ( float eyeReliefInMeters, + float offsetToRightInMeters, + float offsetDownwardsInMeters, + float lensDiameterInMeters, + float extraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION); + +FovPort CalculateFovFromHmdInfo ( StereoEye eyeType, + DistortionRenderDesc const &distortion, + HmdRenderInfo const &hmd, + float extraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION ); + +FovPort GetPhysicalScreenFov ( StereoEye eyeType, DistortionRenderDesc const &distortion ); + +FovPort ClampToPhysicalScreenFov ( StereoEye eyeType, DistortionRenderDesc const &distortion, + FovPort inputFovPort ); + +Sizei CalculateIdealPixelSize ( StereoEye eyeType, DistortionRenderDesc const &distortion, + FovPort fov, float pixelsPerDisplayPixel ); + +Recti GetFramebufferViewport ( StereoEye eyeType, HmdRenderInfo const &hmd ); + +Matrix4f CreateProjection ( bool rightHanded, FovPort fov, + float zNear = 0.01f, float zFar = 10000.0f ); + +Matrix4f CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType, + float tanHalfFovX, float tanHalfFovY, + float unitsX, float unitsY, float distanceFromCamera, + float interpupillaryDistance, Matrix4f const &projection, + float zNear = 0.0f, float zFar = 0.0f ); + +ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort fov ); + +ScaleAndOffset2D CreateUVScaleAndOffsetfromNDCScaleandOffset ( ScaleAndOffset2D scaleAndOffsetNDC, + Recti renderedViewport, + Sizei renderTargetSize ); + + +//----------------------------------------------------------------------------------- +// ***** StereoEyeParams + +// StereoEyeParams describes RenderDevice configuration needed to render +// the scene for one eye. +struct StereoEyeParams +{ + StereoEye Eye; + Matrix4f ViewAdjust; // Translation to be applied to view matrix. + + // Distortion and the VP on the physical display - the thing to run the distortion shader on. + DistortionRenderDesc Distortion; + Recti DistortionViewport; + + // Projection and VP of a particular view (you could have multiple of these). + Recti RenderedViewport; // Viewport that we render the standard scene to. + FovPort Fov; // The FOVs of this scene. + Matrix4f RenderedProjection; // Projection matrix used with this eye. + ScaleAndOffset2D EyeToSourceNDC; // Mapping from TanEyeAngle space to [-1,+1] on the rendered image. + ScaleAndOffset2D EyeToSourceUV; // Mapping from TanEyeAngle space to actual texture UV coords. +}; + + +//----------------------------------------------------------------------------------- +// A set of "forward-mapping" functions, mapping from framebuffer space to real-world and/or texture space. +Vector2f TransformScreenNDCToTanFovSpace ( DistortionRenderDesc const &distortion, + const Vector2f &framebufferNDC ); +void TransformScreenNDCToTanFovSpaceChroma ( Vector2f *resultR, Vector2f *resultG, Vector2f *resultB, + DistortionRenderDesc const &distortion, + const Vector2f &framebufferNDC ); +Vector2f TransformTanFovSpaceToRendertargetTexUV ( StereoEyeParams const &eyeParams, + Vector2f const &tanEyeAngle ); +Vector2f TransformTanFovSpaceToRendertargetNDC ( StereoEyeParams const &eyeParams, + Vector2f const &tanEyeAngle ); +Vector2f TransformScreenPixelToScreenNDC( Recti const &distortionViewport, + Vector2f const &pixel ); +Vector2f TransformScreenPixelToTanFovSpace ( Recti const &distortionViewport, + DistortionRenderDesc const &distortion, + Vector2f const &pixel ); +Vector2f TransformScreenNDCToRendertargetTexUV( DistortionRenderDesc const &distortion, + StereoEyeParams const &eyeParams, + Vector2f const &pixel ); +Vector2f TransformScreenPixelToRendertargetTexUV( Recti const &distortionViewport, + DistortionRenderDesc const &distortion, + StereoEyeParams const &eyeParams, + Vector2f const &pixel ); + +// A set of "reverse-mapping" functions, mapping from real-world and/or texture space back to the framebuffer. +// Be aware that many of these are significantly slower than their forward-mapping counterparts. +Vector2f TransformTanFovSpaceToScreenNDC( DistortionRenderDesc const &distortion, + const Vector2f &tanEyeAngle, bool usePolyApprox = false ); +Vector2f TransformRendertargetNDCToTanFovSpace( const ScaleAndOffset2D &eyeToSourceNDC, + const Vector2f &textureNDC ); + +} //namespace OVR + +#endif // OVR_Stereo_h
\ No newline at end of file diff --git a/LibOVR/Src/OVR_ThreadCommandQueue.cpp b/LibOVR/Src/OVR_ThreadCommandQueue.cpp index abefffb..427a539 100644 --- a/LibOVR/Src/OVR_ThreadCommandQueue.cpp +++ b/LibOVR/Src/OVR_ThreadCommandQueue.cpp @@ -5,16 +5,16 @@ Filename : OVR_ThreadCommandQueue.cpp Content : Command queue for operations executed on a thread Created : October 29, 2012 -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_ThreadCommandQueue.h b/LibOVR/Src/OVR_ThreadCommandQueue.h index cd1770c..9774212 100644 --- a/LibOVR/Src/OVR_ThreadCommandQueue.h +++ b/LibOVR/Src/OVR_ThreadCommandQueue.h @@ -6,16 +6,16 @@ Content : Command queue for operations executed on a thread Created : October 29, 2012 Author : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_Win32_DeviceManager.cpp b/LibOVR/Src/OVR_Win32_DeviceManager.cpp index b9dc238..6327d58 100644 --- a/LibOVR/Src/OVR_Win32_DeviceManager.cpp +++ b/LibOVR/Src/OVR_Win32_DeviceManager.cpp @@ -5,16 +5,16 @@ Content : Win32 implementation of DeviceManager. Created : September 21, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -107,8 +107,8 @@ bool DeviceManager::GetDeviceInfo(DeviceInfo* info) const info->Type = Device_Manager; info->Version = 0; - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, "DeviceManager"); - OVR_strcpy(info->Manufacturer,DeviceInfo::MaxNameLength, "Oculus VR, Inc."); + info->ProductName = "DeviceManager"; + info->Manufacturer = "Oculus VR, Inc."; return true; } @@ -200,12 +200,12 @@ int DeviceManagerThread::Run() // allowed based on current ticks. if (!TicksNotifiers.IsEmpty()) { - UInt64 ticksMks = Timer::GetTicks(); - DWORD waitAllowed; - + double timeSeconds = Timer::GetSeconds(); + DWORD waitAllowed; + for (UPInt j = 0; j < TicksNotifiers.GetSize(); j++) { - waitAllowed = (DWORD)(TicksNotifiers[j]->OnTicks(ticksMks) / Timer::MksPerMs); + waitAllowed = (DWORD)(TicksNotifiers[j]->OnTicks(timeSeconds) * Timer::MsPerSecond); if (waitAllowed < waitMs) waitMs = waitAllowed; } @@ -416,7 +416,7 @@ DeviceManager* DeviceManager::Create() manager->AddFactory(&SensorDeviceFactory::Instance); manager->AddFactory(&LatencyTestDeviceFactory::Instance); manager->AddFactory(&Win32::HMDDeviceFactory::Instance); - + manager->AddRef(); } else diff --git a/LibOVR/Src/OVR_Win32_DeviceManager.h b/LibOVR/Src/OVR_Win32_DeviceManager.h index ecf308f..b0cf65d 100644 --- a/LibOVR/Src/OVR_Win32_DeviceManager.h +++ b/LibOVR/Src/OVR_Win32_DeviceManager.h @@ -5,16 +5,16 @@ Content : Win32-specific DeviceManager header. Created : September 21, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -90,10 +90,10 @@ public: virtual void OnOverlappedEvent(HANDLE hevent) { OVR_UNUSED1(hevent); } // Called when timing ticks are updated. - // Returns the largest number of microseconds this function can + // Returns the largest number of seconds this function can // wait till next call. - virtual UInt64 OnTicks(UInt64 ticksMks) - { OVR_UNUSED1(ticksMks); return Timer::MksPerSecond * 1000; } + virtual double OnTicks(double tickSeconds) + { OVR_UNUSED1(tickSeconds); return 1000.0; } enum DeviceMessageType { @@ -135,14 +135,14 @@ private: // Event notifications for devices whose OVERLAPPED I/O we service. // This list is modified through AddDeviceOverlappedEvent. // WaitHandles[0] always == hCommandEvent, with null device. - Array<HANDLE> WaitHandles; - Array<Notifier*> WaitNotifiers; + ArrayPOD<HANDLE> WaitHandles; + ArrayPOD<Notifier*> WaitNotifiers; // Ticks notifiers - used for time-dependent events such as keep-alive. - Array<Notifier*> TicksNotifiers; + ArrayPOD<Notifier*> TicksNotifiers; // Message notifiers. - Array<Notifier*> MessageNotifiers; + ArrayPOD<Notifier*> MessageNotifiers; // Object that manages notifications originating from Windows messages. Ptr<DeviceStatus> pStatusObject; diff --git a/LibOVR/Src/OVR_Win32_DeviceStatus.cpp b/LibOVR/Src/OVR_Win32_DeviceStatus.cpp index 96655a5..22f193c 100644 --- a/LibOVR/Src/OVR_Win32_DeviceStatus.cpp +++ b/LibOVR/Src/OVR_Win32_DeviceStatus.cpp @@ -5,16 +5,16 @@ Content : Win32 implementation of DeviceStatus. Created : January 24, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -36,6 +36,10 @@ namespace OVR { namespace Win32 { static TCHAR windowClassName[] = TEXT("LibOVR_DeviceStatus_WindowClass"); +#define STATIC_KSCATEGORY_VIDEO_CAMERA \ + 0xe5323777, 0xf976, 0x4f5b, { 0x9b, 0x55, 0xb9, 0x46, 0x99, 0xc4, 0x6e, 0x44 } + + //------------------------------------------------------------------------------------- DeviceStatus::DeviceStatus(Notifier* const pClient) : pNotificationClient(pClient), LastTimerId(0) @@ -259,8 +263,10 @@ LRESULT CALLBACK DeviceStatus::WindowsMessageCallback( HWND hwnd, DeviceStatus* pDeviceStatus = (DeviceStatus*) userData; String devicePath(hdr->dbcc_name); + static const GUID videoCamGuid = { STATIC_KSCATEGORY_VIDEO_CAMERA }; // check if HID device caused the event... - if (pDeviceStatus->HidGuid == hdr->dbcc_classguid) + if (pDeviceStatus->HidGuid == hdr->dbcc_classguid || + videoCamGuid == hdr->dbcc_classguid) { // check if recovery timer is already running; stop it and // remove it, if so. diff --git a/LibOVR/Src/OVR_Win32_DeviceStatus.h b/LibOVR/Src/OVR_Win32_DeviceStatus.h index 31df4d5..593a450 100644 --- a/LibOVR/Src/OVR_Win32_DeviceStatus.h +++ b/LibOVR/Src/OVR_Win32_DeviceStatus.h @@ -5,16 +5,16 @@ Content : Win32-specific DeviceStatus header. Created : January 24, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/OVR_Win32_HIDDevice.cpp b/LibOVR/Src/OVR_Win32_HIDDevice.cpp index 8643041..6d33f7a 100644 --- a/LibOVR/Src/OVR_Win32_HIDDevice.cpp +++ b/LibOVR/Src/OVR_Win32_HIDDevice.cpp @@ -5,16 +5,16 @@ Content : Win32 HID device implementation. Created : February 22, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -168,10 +168,10 @@ bool HIDDeviceManager::GetHIDDeviceDesc(const String& path, HIDDeviceDesc* pdevD return false; pdevDesc->Path = path; - getFullDesc(hidDev, pdevDesc); + bool succ = getFullDesc(hidDev, pdevDesc); ::CloseHandle(hidDev); - return true; + return succ; } OVR::HIDDevice* HIDDeviceManager::Open(const String& path) @@ -298,10 +298,11 @@ bool HIDDevice::HIDInitialize(const String& path) HIDManager->Manager->pThread->AddMessageNotifier(this); LogText("OVR::Win32::HIDDevice - Opened '%s'\n" - " Manufacturer:'%s' Product:'%s' Serial#:'%s'\n", + " Manufacturer:'%s' Product:'%s' Serial#:'%s' Version:'%x'\n", DevDesc.Path.ToCStr(), DevDesc.Manufacturer.ToCStr(), DevDesc.Product.ToCStr(), - DevDesc.SerialNumber.ToCStr()); + DevDesc.SerialNumber.ToCStr(), + DevDesc.VersionNumber); return true; } @@ -500,18 +501,20 @@ void HIDDevice::closeDeviceOnIOError() bool HIDDevice::SetFeatureReport(UByte* data, UInt32 length) { - if (!ReadRequested) + if (!ReadRequested) return false; - return HIDManager->HidD_SetFeature(Device, data, (ULONG) length) != FALSE; + BOOLEAN res = HIDManager->HidD_SetFeature(Device, data, (ULONG) length); + return (res == TRUE); } bool HIDDevice::GetFeatureReport(UByte* data, UInt32 length) { - if (!ReadRequested) + if (!ReadRequested) return false; - return HIDManager->HidD_GetFeature(Device, data, (ULONG) length) != FALSE; + BOOLEAN res = HIDManager->HidD_GetFeature(Device, data, (ULONG) length); + return (res == TRUE); } void HIDDevice::OnOverlappedEvent(HANDLE hevent) @@ -526,14 +529,14 @@ void HIDDevice::OnOverlappedEvent(HANDLE hevent) } } -UInt64 HIDDevice::OnTicks(UInt64 ticksMks) +double HIDDevice::OnTicks(double tickSeconds) { if (Handler) { - return Handler->OnTicks(ticksMks); + return Handler->OnTicks(tickSeconds); } - return DeviceManagerThread::Notifier::OnTicks(ticksMks); + return DeviceManagerThread::Notifier::OnTicks(tickSeconds); } bool HIDDevice::OnDeviceMessage(DeviceMessageType messageType, diff --git a/LibOVR/Src/OVR_Win32_HIDDevice.h b/LibOVR/Src/OVR_Win32_HIDDevice.h index b8e3c9f..e43e876 100644 --- a/LibOVR/Src/OVR_Win32_HIDDevice.h +++ b/LibOVR/Src/OVR_Win32_HIDDevice.h @@ -5,16 +5,16 @@ Content : Win32 HID device implementation. Created : February 22, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -107,7 +107,7 @@ public: // DeviceManagerThread::Notifier void OnOverlappedEvent(HANDLE hevent); - UInt64 OnTicks(UInt64 ticksMks); + double OnTicks(double tickSeconds); bool OnDeviceMessage(DeviceMessageType messageType, const String& devicePath, bool* error); private: diff --git a/LibOVR/Src/OVR_Win32_HMDDevice.cpp b/LibOVR/Src/OVR_Win32_HMDDevice.cpp index e213691..08c98f2 100644 --- a/LibOVR/Src/OVR_Win32_HMDDevice.cpp +++ b/LibOVR/Src/OVR_Win32_HMDDevice.cpp @@ -5,16 +5,16 @@ Content : Win32 Interface to HMD - detects HMD display Created : September 21, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -27,33 +27,40 @@ limitations under the License. #include "OVR_Win32_HMDDevice.h" #include "OVR_Win32_DeviceManager.h" +#include "util/Util_Render_Stereo.h" #include <tchar.h> namespace OVR { namespace Win32 { +using namespace OVR::Util::Render; + //------------------------------------------------------------------------------------- HMDDeviceCreateDesc::HMDDeviceCreateDesc(DeviceFactory* factory, const String& deviceId, const String& displayDeviceName) : DeviceCreateDesc(factory, Device_HMD), DeviceId(deviceId), DisplayDeviceName(displayDeviceName), - DesktopX(0), DesktopY(0), Contents(0), EyeToScreenDistance(0), - HResolution(0), VResolution(0), HScreenSize(0), VScreenSize(0) + Contents(0) { - for (int i=0; i<4; i++) - DistortionK[i] = 0; + Desktop.X = 0; + Desktop.Y = 0; + ResolutionInPixels = Sizei(0); + ScreenSizeInMeters = Sizef(0.0f); + VCenterFromTopInMeters = 0.0f; + LensSeparationInMeters = 0.0f; } HMDDeviceCreateDesc::HMDDeviceCreateDesc(const HMDDeviceCreateDesc& other) : DeviceCreateDesc(other.pFactory, Device_HMD), DeviceId(other.DeviceId), DisplayDeviceName(other.DisplayDeviceName), - DesktopX(other.DesktopX), DesktopY(other.DesktopY), Contents(other.Contents), - HResolution(other.HResolution), VResolution(other.VResolution), - HScreenSize(other.HScreenSize), VScreenSize(other.VScreenSize), - EyeToScreenDistance(other.EyeToScreenDistance) + Contents(other.Contents) { - for (int i=0; i<4; i++) - DistortionK[i] = other.DistortionK[i]; + Desktop.X = other.Desktop.X; + Desktop.Y = other.Desktop.Y; + ResolutionInPixels = other.ResolutionInPixels; + ScreenSizeInMeters = other.ScreenSizeInMeters; + VCenterFromTopInMeters = other.VCenterFromTopInMeters; + LensSeparationInMeters = other.LensSeparationInMeters; } HMDDeviceCreateDesc::MatchResult HMDDeviceCreateDesc::MatchDevice(const DeviceCreateDesc& other, @@ -78,8 +85,7 @@ HMDDeviceCreateDesc::MatchResult HMDDeviceCreateDesc::MatchDevice(const DeviceCr // Non-null DeviceId may match while size is different if screen size was overwritten // by SensorDisplayInfo in prior iteration. if (!DeviceId.IsEmpty() || - ((HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) ) + (ScreenSizeInMeters == s2.ScreenSizeInMeters) ) { *pcandidate = 0; return Match_Found; @@ -88,10 +94,8 @@ HMDDeviceCreateDesc::MatchResult HMDDeviceCreateDesc::MatchDevice(const DeviceCr // DisplayInfo takes precedence, although we try to match it first. - if ((HResolution == s2.HResolution) && - (VResolution == s2.VResolution) && - (HScreenSize == s2.HScreenSize) && - (VScreenSize == s2.VScreenSize)) + if ((ResolutionInPixels == s2.ResolutionInPixels) && + (ScreenSizeInMeters == s2.ScreenSizeInMeters)) { if (DeviceId.IsEmpty() && !s2.DeviceId.IsEmpty()) { @@ -133,27 +137,30 @@ bool HMDDeviceCreateDesc::UpdateMatchedCandidate(const DeviceCreateDesc& other, // which may be corrupted by splitter reporting wrong monitor if (s2.DeviceId.IsEmpty()) { - HScreenSize = s2.HScreenSize; - VScreenSize = s2.VScreenSize; + // disconnected HMD: replace old descriptor by the 'fake' one. + ScreenSizeInMeters = s2.ScreenSizeInMeters; Contents |= Contents_Screen; if (s2.Contents & HMDDeviceCreateDesc::Contents_Distortion) { memcpy(DistortionK, s2.DistortionK, sizeof(float)*4); + // TODO: DistortionEqn Contents |= Contents_Distortion; } DeviceId = s2.DeviceId; DisplayDeviceName = s2.DisplayDeviceName; - DesktopX = s2.DesktopX; - DesktopY = s2.DesktopY; + Desktop.X = s2.Desktop.X; + Desktop.Y = s2.Desktop.Y; if (newDeviceFlag) *newDeviceFlag = true; } else if (DeviceId.IsEmpty()) { + // This branch is executed when 'fake' HMD descriptor is being replaced by + // the real one. DeviceId = s2.DeviceId; DisplayDeviceName = s2.DisplayDeviceName; - DesktopX = s2.DesktopX; - DesktopY = s2.DesktopY; + Desktop.X = s2.Desktop.X; + Desktop.Y = s2.Desktop.Y; // ScreenSize and Resolution are NOT assigned here, since they may have // come from a sensor DisplayInfo (which has precedence over HDMI). @@ -299,24 +306,28 @@ void HMDDeviceFactory::EnumerateDevices(EnumerateVisitor& visitor) } HMDDeviceCreateDesc hmdCreateDesc(this, deviceId, displayDeviceName); - - if (wcsstr(ddm.DeviceID, L"OVR0002")) - { - hmdCreateDesc.SetScreenParameters(mx, my, 1920, 1080, 0.12096f, 0.06804f); + + // Hard-coded defaults in case the device doesn't have the data itself. + if (wcsstr(ddm.DeviceID, L"OVR0003")) + { // DK2 prototypes and variants (default to HmdType_DK2) + hmdCreateDesc.SetScreenParameters(mx, my, 1920, 1080, 0.12576f, 0.07074f, 0.12576f*0.5f, 0.0635f ); + } + else if (wcsstr(ddm.DeviceID, L"OVR0002")) + { // HD Prototypes (default to HmdType_DKHDProto) + hmdCreateDesc.SetScreenParameters(mx, my, 1920, 1080, 0.12096f, 0.06804f, 0.06804f*0.5f, 0.0635f ); } - else - { - if (hmdCreateDesc.Is7Inch()) - { - // Physical dimension of SLA screen. - hmdCreateDesc.SetScreenParameters(mx, my, mwidth, mheight, 0.14976f, 0.0936f); - } - else - { - hmdCreateDesc.SetScreenParameters(mx, my, mwidth, mheight, 0.12096f, 0.0756f); - } + else if (wcsstr(ddm.DeviceID, L"OVR0001")) + { // DK1 + hmdCreateDesc.SetScreenParameters(mx, my, mwidth, mheight, 0.14976f, 0.0936f, 0.0936f*0.5f, 0.0635f); + } + else if (wcsstr(ddm.DeviceID, L"OVR00")) + { // Future Oculus HMD devices (default to DK1 dimensions) + hmdCreateDesc.SetScreenParameters(mx, my, mwidth, mheight, 0.14976f, 0.0936f, 0.0936f*0.5f, 0.0635f); } - + else + { // Duct-tape prototype + hmdCreateDesc.SetScreenParameters(mx, my, mwidth, mheight, 0.12096f, 0.0756f, 0.0756f*0.5f, 0.0635f); + } OVR_DEBUG_LOG_TEXT(("DeviceManager - HMD Found %s - %s\n", deviceId.ToCStr(), displayDeviceName.ToCStr())); @@ -341,174 +352,7 @@ void HMDDeviceFactory::EnumerateDevices(EnumerateVisitor& visitor) } } -DeviceBase* HMDDeviceCreateDesc::NewDeviceInstance() -{ - return new HMDDevice(this); -} - -bool HMDDeviceCreateDesc::Is7Inch() const -{ - return (strstr(DeviceId.ToCStr(), "OVR0001") != 0) || (Contents & Contents_7Inch); -} - -Profile* HMDDeviceCreateDesc::GetProfileAddRef() const -{ - // Create device may override profile name, so get it from there is possible. - ProfileManager* profileManager = GetManagerImpl()->GetProfileManager(); - ProfileType profileType = GetProfileType(); - const char * profileName = pDevice ? - ((HMDDevice*)pDevice)->GetProfileName() : - profileManager->GetDefaultProfileName(profileType); - - return profileName ? - profileManager->LoadProfile(profileType, profileName) : - profileManager->GetDeviceDefaultProfile(profileType); -} - - -bool HMDDeviceCreateDesc::GetDeviceInfo(DeviceInfo* info) const -{ - if ((info->InfoClassType != Device_HMD) && - (info->InfoClassType != Device_None)) - return false; - - bool is7Inch = Is7Inch(); - - OVR_strcpy(info->ProductName, DeviceInfo::MaxNameLength, - is7Inch ? "Oculus Rift DK1" : - ((HResolution >= 1920) ? "Oculus Rift DK HD" : "Oculus Rift DK1-Prototype") ); - OVR_strcpy(info->Manufacturer, DeviceInfo::MaxNameLength, "Oculus VR"); - info->Type = Device_HMD; - info->Version = 0; - - // Display detection. - if (info->InfoClassType == Device_HMD) - { - HMDInfo* hmdInfo = static_cast<HMDInfo*>(info); - - hmdInfo->DesktopX = DesktopX; - hmdInfo->DesktopY = DesktopY; - hmdInfo->HResolution = HResolution; - hmdInfo->VResolution = VResolution; - hmdInfo->HScreenSize = HScreenSize; - hmdInfo->VScreenSize = VScreenSize; - hmdInfo->VScreenCenter = VScreenSize * 0.5f; - hmdInfo->InterpupillaryDistance = 0.064f; // Default IPD; should be configurable. - hmdInfo->LensSeparationDistance = 0.0635f; - - // Obtain IPD from profile. - Ptr<Profile> profile = *GetProfileAddRef(); - - if (profile) - { - hmdInfo->InterpupillaryDistance = profile->GetIPD(); - // TBD: Switch on EyeCup type. - } - - if (Contents & Contents_Distortion) - { - memcpy(hmdInfo->DistortionK, DistortionK, sizeof(float)*4); - hmdInfo->EyeToScreenDistance = EyeToScreenDistance; - } - else - { - if (is7Inch) - { - // 7" screen. - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.22f; - hmdInfo->DistortionK[2] = 0.24f; - hmdInfo->EyeToScreenDistance = 0.041f; - } - else - { - hmdInfo->DistortionK[0] = 1.0f; - hmdInfo->DistortionK[1] = 0.18f; - hmdInfo->DistortionK[2] = 0.115f; - - if (HResolution == 1920) - hmdInfo->EyeToScreenDistance = 0.040f; - else - hmdInfo->EyeToScreenDistance = 0.0387f; - } - } - - hmdInfo->ChromaAbCorrection[0] = 0.996f; - hmdInfo->ChromaAbCorrection[1] = -0.004f; - hmdInfo->ChromaAbCorrection[2] = 1.014f; - hmdInfo->ChromaAbCorrection[3] = 0.0f; - - OVR_strcpy(hmdInfo->DisplayDeviceName, sizeof(hmdInfo->DisplayDeviceName), - DisplayDeviceName.ToCStr()); - } - - return true; -} - -//------------------------------------------------------------------------------------- -// ***** HMDDevice - -HMDDevice::HMDDevice(HMDDeviceCreateDesc* createDesc) - : OVR::DeviceImpl<OVR::HMDDevice>(createDesc, 0) -{ -} -HMDDevice::~HMDDevice() -{ -} - -bool HMDDevice::Initialize(DeviceBase* parent) -{ - pParent = parent; - - // Initialize user profile to default for device. - ProfileManager* profileManager = GetManager()->GetProfileManager(); - ProfileName = profileManager->GetDefaultProfileName(getDesc()->GetProfileType()); - - return true; -} -void HMDDevice::Shutdown() -{ - ProfileName.Clear(); - pCachedProfile.Clear(); - pParent.Clear(); -} - -Profile* HMDDevice::GetProfile() const -{ - if (!pCachedProfile) - pCachedProfile = *getDesc()->GetProfileAddRef(); - return pCachedProfile.GetPtr(); -} - -const char* HMDDevice::GetProfileName() const -{ - return ProfileName.ToCStr(); -} - -bool HMDDevice::SetProfileName(const char* name) -{ - pCachedProfile.Clear(); - if (!name) - { - ProfileName.Clear(); - return 0; - } - if (GetManager()->GetProfileManager()->HasProfile(getDesc()->GetProfileType(), name)) - { - ProfileName = name; - return true; - } - return false; -} - -OVR::SensorDevice* HMDDevice::GetSensor() -{ - // Just return first sensor found since we have no way to match it yet. - OVR::SensorDevice* sensor = GetManager()->EnumerateDevices<SensorDevice>().CreateDevice(); - if (sensor) - sensor->SetCoordinateFrame(SensorDevice::Coord_HMD); - return sensor; -} +#include "OVR_Common_HMDDevice.cpp" }} // namespace OVR::Win32 diff --git a/LibOVR/Src/OVR_Win32_HMDDevice.h b/LibOVR/Src/OVR_Win32_HMDDevice.h index 44e5a97..d1e481c 100644 --- a/LibOVR/Src/OVR_Win32_HMDDevice.h +++ b/LibOVR/Src/OVR_Win32_HMDDevice.h @@ -5,16 +5,16 @@ Content : Win32 HMDDevice implementation Created : September 21, 2012 Authors : Michael Antonov -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -63,16 +63,23 @@ protected: { Contents_Screen = 1, Contents_Distortion = 2, - Contents_7Inch = 4, }; - String DeviceId; - String DisplayDeviceName; - int DesktopX, DesktopY; - unsigned Contents; - unsigned HResolution, VResolution; - float HScreenSize, VScreenSize; - float DistortionK[4]; - float EyeToScreenDistance; + String DeviceId; + String DisplayDeviceName; + struct + { + int X, Y; + } Desktop; + unsigned int Contents; + + Sizei ResolutionInPixels; + Sizef ScreenSizeInMeters; + float VCenterFromTopInMeters; + float LensSeparationInMeters; + + // TODO: update these to splines. + DistortionEqnType DistortionEqn; + float DistortionK[4]; public: HMDDeviceCreateDesc(DeviceFactory* factory, @@ -96,38 +103,13 @@ public: virtual bool GetDeviceInfo(DeviceInfo* info) const; - // Requests the currently used default profile. This profile affects the - // settings reported by HMDInfo. - Profile* GetProfileAddRef() const; - - ProfileType GetProfileType() const - { - return (HResolution >= 1920) ? Profile_RiftDKHD : Profile_RiftDK1; - } - - - void SetScreenParameters(int x, int y, unsigned hres, unsigned vres, float hsize, float vsize) - { - DesktopX = x; - DesktopY = y; - HResolution = hres; - VResolution = vres; - HScreenSize = hsize; - VScreenSize = vsize; - Contents |= Contents_Screen; - } - void SetDistortion(float eye2screen, const float* dks) - { - EyeToScreenDistance = eye2screen; - - for (int i = 0; i < 4; i++) - DistortionK[i] = dks[i]; - Contents |= Contents_Distortion; - } - - void Set7Inch() { Contents |= Contents_7Inch; } - - bool Is7Inch() const; + void SetScreenParameters(int x, int y, + int hres, int vres, + float hsize, float vsize, + float vCenterFromTopInMeters, float lensSeparationInMeters); + void SetDistortion(const float* dks); + + HmdTypeEnum GetHmdType() const; }; @@ -149,8 +131,8 @@ public: // Requests the currently used default profile. This profile affects the // settings reported by HMDInfo. - virtual Profile* GetProfile() const; - virtual const char* GetProfileName() const; + virtual Profile* GetProfile(); + virtual const char* GetProfileName(); virtual bool SetProfileName(const char* name); // Query associated sensor. diff --git a/LibOVR/Src/OVR_Win32_SensorDevice.cpp b/LibOVR/Src/OVR_Win32_SensorDevice.cpp index 88beb7a..e8beb31 100644 --- a/LibOVR/Src/OVR_Win32_SensorDevice.cpp +++ b/LibOVR/Src/OVR_Win32_SensorDevice.cpp @@ -5,16 +5,16 @@ Content : Win32 SensorDevice implementation Created : March 12, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -43,13 +43,15 @@ void SensorDeviceImpl::EnumerateHMDFromSensorDisplayInfo Win32::HMDDeviceCreateDesc hmdCreateDesc(&Win32::HMDDeviceFactory::Instance, String(), String()); hmdCreateDesc.SetScreenParameters( 0, 0, displayInfo.HResolution, displayInfo.VResolution, - displayInfo.HScreenSize, displayInfo.VScreenSize); - - if ((displayInfo.DistortionType & SensorDisplayInfoImpl::Mask_BaseFmt) & SensorDisplayInfoImpl::Base_Distortion) - hmdCreateDesc.SetDistortion(displayInfo.EyeToScreenDistance[0], displayInfo.DistortionK); - if (displayInfo.HScreenSize > 0.14f) - hmdCreateDesc.Set7Inch(); - + displayInfo.HScreenSize, displayInfo.VScreenSize, + displayInfo.VCenter, displayInfo.LensSeparation); + + if ((displayInfo.DistortionType & SensorDisplayInfoImpl::Mask_BaseFmt) == SensorDisplayInfoImpl::Base_Distortion) + { + // TODO: update to spline system. + hmdCreateDesc.SetDistortion(displayInfo.DistortionK); + } + visitor.Visit(hmdCreateDesc); } diff --git a/LibOVR/Src/OVR_Win32_SensorDevice.h b/LibOVR/Src/OVR_Win32_SensorDevice.h index 85f478f..6e4e070 100644 --- a/LibOVR/Src/OVR_Win32_SensorDevice.h +++ b/LibOVR/Src/OVR_Win32_SensorDevice.h @@ -5,16 +5,16 @@ Content : Win32 SensorDevice implementation Created : March 12, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, diff --git a/LibOVR/Src/Recording/Recorder.h b/LibOVR/Src/Recording/Recorder.h new file mode 100644 index 0000000..460c3e6 --- /dev/null +++ b/LibOVR/Src/Recording/Recorder.h @@ -0,0 +1,273 @@ +/************************************************************************************ + +Filename : Recorder.h +Content : Support for recording sensor + camera data +Created : March 14, 2014 +Notes : + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +************************************************************************************/ + +#ifndef OVR_Recorder_h +#define OVR_Recorder_h + +#define ENABLE_RECORDING 0 + +#include "../../Include/OVR.h" + +#if ENABLE_RECORDING + +#include "../Vision/Vision_CameraCalibration.h" +#include "../Vision/Vision_Blob.h" +#include "../Vision/Vision_Image.h" +#include "../Vision/Vision_ModulatedLEDModel.h" +#include "LogDataTypes.h" +#include "matfile.h" + +#define RECORDING_LOCATION "%Y%m%d_%H%M%S" + +#endif +namespace OVR{ + + typedef UByte RecordingMode; + enum RecordingMode_t + { + RecordingOff = 0x0, + RecordForPlayback = 0x1, + RecordForLogging = 0x2 + }; +}; + +#if ENABLE_RECORDING + +namespace OVR{ + + class Recorder + { + public: + + static void SetPrefix(const char* prefix); + static String GetPrefix(); + + struct StartupParams + { + StartupParams() + : intrinsics(), + distortion(), + ledPositions(), + imuPosition(), + devIfcVersion(1) + {} + + ~StartupParams() + { + } + + Vision::CameraIntrinsics intrinsics; + Vision::DistortionCoefficients distortion; + Array<PositionCalibrationReport> ledPositions; + PositionCalibrationReport imuPosition; + UByte devIfcVersion; + }; + + // Global Interface + static void Buffer(const Message& msg); + + static void Buffer(const Vision::CameraIntrinsics& intrinsics, + const Vision::DistortionCoefficients& distortion); + + static void Buffer(const Array<PositionCalibrationReport>& ledPositions); + + static void Buffer(const PositionCalibrationReport& imuPosition); + + static void BufferDevIfcVersion(const UByte devIfcVersion); + + template<typename T> + static void LogData(const char* label, const T& data) + { + Recorder* myRecorder = GetRecorder(); + if(myRecorder && (myRecorder->recordingMode & RecordForLogging)) + myRecorder->DoLogData(label, data); + } + + static void LogData(const char* label, const Vision::Blob blobs[], const int numElements); + + static void LogData(const char* label, const Vector3d& vect3); + + static void LogData(const char* label, const Quatd& quat); + + static void LogData(const char* label, const Posed& pose); + + static Recorder* GetRecorder(); + // Instantiates Recorder if it does not already exist. + static Recorder* BuildRecorder(); + // Activates or deactivates recording. Returns resultant state (true = recording, false = not recording). + static bool ToggleRecording(const RecordingMode mode); + + Recorder(); + + ~Recorder(); + + void SaveCameraParams(const Vision::CameraIntrinsics& intrinsics, + const Vision::DistortionCoefficients& distortion); + + void SaveLedPositions(const Array<PositionCalibrationReport>& ledPositions); + + void SaveImuPosition(const PositionCalibrationReport& imuPosition); + + void SaveDevIfcVersion(const UByte devIfcVersion); + + void WriteToRec(const Array<UByte>& buffer); + + template<class T> + void DoLogData(const char* label, const T& data) + { + if(!(recordingMode & RecordForLogging)) + return; + Ptr<LogDataEntryBase> entry; + StringHash<Ptr<LogDataEntryBase> >::Iterator iter = logDataBuffer.Find(label); + if(!iter.IsEnd()) + entry = logDataBuffer.Find(label)->Second; + if(!entry) + { + // Add new entry + entry = getNewEntry(label, data); + logDataBuffer.Add(label, entry); + } + + OVR_ASSERT(entry != NULL); + + // Add new sample to the entry that we found + + Array<T>& myBuffer = dynamic_cast<LogDataEntry<T>*>(entry.GetPtr())->buffer; + myBuffer.PushBack(data); + } + + void DoLogData(const char* label, const Vision::Blob blobs[]); + + void DoLogData(const char* label, const Posed& pose); + + void DoLogData(const char* label, const Vector3d& vect3); + + void DoLogData(const char* label, const Quatd& quat); + + // Activates or deactivates recording. Returns resultant state (true = recording, false = not recording). + bool DoToggleRecording(const RecordingMode mode); + + // Keep this up-to-date when the recording format changes + static const UInt16 RECORDING_FORMAT_VERSION = 1; + + private: + Ptr<LogDataEntryBase> getNewEntry(const char* label, const float&); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const double&); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const int&); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const Vision::Blob[]); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const Posed&); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const Vector3d&); + Ptr<LogDataEntryBase> getNewEntry(const char* label, const Quatd&); + + void start(); + + // Serialize the startup params that we have saved and write them to file. Then set readyForMessages flag. + void writeStartupParams(); + + int bufferPositionReport(const PositionCalibrationReport& report, UByte* buffer); + + void finalize(); + + void writeBlobStats(const char* label, LogDataEntryBase* entry); + + void writeVector3d(const char* label, const Array<Vector3d>& data); + + void writePosed(const char* label, const Array<Posed>& data); + + void writeQuatd(const char* label, const Array<Quatd>& data); + + void reset(); + + String getFilePrefix(); + + // File that will contain simulation/playback data + FILE* recFile; + vortex::CMatFile matFile; + + // Logging data to be written to .mat file + StringHash<Ptr<LogDataEntryBase> > logDataBuffer; + + StartupParams startup; // Startup params. Must be written before general messages + bool readyForMessages; // Indicates that the startup params have been written, and we can safely write messages to the .rec file + + // To preserve ordering of incoming messages + Lock recorderLock; + // How/are we currently recording? + UByte recordingMode; + }; + +}; + +#else // If Recording is not enabled, then no-op all the functions so they can be inlined/optimized away by the compiler. + +namespace OVR{ + + namespace Vision{ + class CameraIntrinsics; + class DistortionCoefficients; + class Blob; + }; + struct PositionCalibrationReport; + + class Recorder + { + public: + static void Buffer(const Message&) { } + + static void Buffer(const Vision::CameraIntrinsics&, + const Vision::DistortionCoefficients&) + { } + + static void Buffer(const Array<PositionCalibrationReport>&) { } + + static void Buffer(const PositionCalibrationReport&) { } + + static void BufferDevIfcVersion(const UByte) { }; + + static Recorder* GetRecorder() { return NULL; } + + static Recorder* BuildRecorder() { return NULL; } + + static bool ToggleRecording(const int) { return false; } + + template<typename T> + static void LogData(const char*, const T&) { }; + + static void LogData(const char*, const Vision::Blob[], const int) { }; + + Recorder() { } + + ~Recorder() { } + + bool DoToggleRecording(const int) { return false; } + + void AddToBuffer(const Message&) { } + }; +} // namespace OVR + +#endif // ENABLE_RECORDING + +#endif // OVR_Recorder_h diff --git a/LibOVR/Src/Util/Util_ImageWindow.cpp b/LibOVR/Src/Util/Util_ImageWindow.cpp new file mode 100644 index 0000000..e038d1f --- /dev/null +++ b/LibOVR/Src/Util/Util_ImageWindow.cpp @@ -0,0 +1,473 @@ +/************************************************************************************ + +Filename : Util_ImageWindow.cpp +Content : An output object for windows that can display raw images for testing +Created : March 13, 2014 +Authors : Dean Beeler + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ +#include "../../Include/OVR.h" + +#include "Util_ImageWindow.h" + +#include <Windows.h> + +typedef HRESULT (WINAPI *D2D1CreateFactoryFn)( + _In_ D2D1_FACTORY_TYPE, + _In_ REFIID, + _In_opt_ const D2D1_FACTORY_OPTIONS*, + _Out_ ID2D1Factory ** + ); + + +namespace OVR { namespace Util { + +ID2D1Factory* ImageWindow::pD2DFactory = NULL; +ImageWindow* ImageWindow::globalWindow = NULL; + +LRESULT CALLBACK MainWndProc( + HWND hwnd, + UINT uMsg, + WPARAM wParam, + LPARAM lParam) +{ + switch (uMsg) + { + case WM_CREATE: + return 0; + + case WM_PAINT: + { + LONG_PTR ptr = GetWindowLongPtr( hwnd, GWLP_USERDATA ); + if( ptr ) + { + ImageWindow* iw = (ImageWindow*)ptr; + iw->OnPaint(); + } + } + + return 0; + + case WM_SIZE: + // Set the size and position of the window. + return 0; + + case WM_DESTROY: + // Clean up window-specific data objects. + return 0; + + // + // Process other messages. + // + + default: + return DefWindowProc(hwnd, uMsg, wParam, lParam); + } + //return 0; +} + +ImageWindow::ImageWindow() : + frontBufferMutex( new Mutex() ) +{ + + HINSTANCE hInst = LoadLibrary( L"d2d1.dll" ); + + D2D1CreateFactoryFn createFactory = NULL; + + if( hInst ) + { + createFactory = (D2D1CreateFactoryFn)GetProcAddress( hInst, "D2D1CreateFactory" ); + } + + globalWindow = this; + + int width = 752; + int height = 480; + + if( pD2DFactory == NULL && createFactory ) + { + createFactory( + D2D1_FACTORY_TYPE_MULTI_THREADED, + __uuidof(ID2D1Factory), + NULL, + &pD2DFactory + ); + } + + resolution = D2D1::SizeU( width, height ); + + SetWindowLongPtr( hWindow, GWLP_USERDATA, (LONG_PTR)this ); + + pRT = NULL; + greyBitmap = NULL; + colorBitmap = NULL; +} + +ImageWindow::ImageWindow( UINT width, UINT height ) : + frontBufferMutex( new Mutex() ) +{ + + + HINSTANCE hInstance = GetModuleHandle( NULL ); + + WNDCLASS wc; + wc.lpszClassName = L"ImageWindowClass"; + wc.lpfnWndProc = MainWndProc; + wc.style = CS_OWNDC | CS_VREDRAW | CS_HREDRAW; + wc.hInstance = hInstance; + wc.hIcon = LoadIcon( NULL, IDI_APPLICATION ); + wc.hCursor = LoadCursor( NULL, IDC_ARROW ); + wc.hbrBackground = (HBRUSH)( COLOR_WINDOW+1 ); + wc.lpszMenuName = L""; + wc.cbClsExtra = 0; + wc.cbWndExtra = 0; + + RegisterClass(&wc); + + hWindow = CreateWindow( + L"ImageWindowClass", + L"ImageWindow", + WS_OVERLAPPEDWINDOW & ~WS_SYSMENU, + CW_USEDEFAULT, + CW_USEDEFAULT, + width, + height, + NULL, + NULL, + hInstance, + NULL); + + resolution = D2D1::SizeU( width, height ); + + SetWindowLongPtr( hWindow, GWLP_USERDATA, (LONG_PTR)this ); + + ShowWindow( hWindow, SW_SHOW ); + + RECT rc = {0}; + GetClientRect( hWindow, &rc ); + + D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(); + D2D1_HWND_RENDER_TARGET_PROPERTIES hwndProps = D2D1::HwndRenderTargetProperties( + hWindow, + resolution + ); + + ID2D1HwndRenderTarget* hwndTarget = NULL; + // Create a Direct2D render target + pRT = NULL; + pD2DFactory->CreateHwndRenderTarget( + &props, + &hwndProps, + &hwndTarget + ); + + pRT = hwndTarget; + + D2D1_SIZE_U size = D2D1::SizeU( width, height ); + + D2D1_PIXEL_FORMAT pixelFormat = D2D1::PixelFormat( + DXGI_FORMAT_A8_UNORM, + D2D1_ALPHA_MODE_PREMULTIPLIED + ); + + D2D1_BITMAP_PROPERTIES bitmapProps; + bitmapProps.dpiX = 72; + bitmapProps.dpiY = 72; + bitmapProps.pixelFormat = pixelFormat; + + HRESULT result = pRT->CreateBitmap( size, bitmapProps, &greyBitmap ); + result = pRT->CreateBitmap( size, bitmapProps, &colorBitmap ); +} + +ImageWindow::~ImageWindow() +{ + if( greyBitmap ) + greyBitmap->Release(); + + if( colorBitmap ) + colorBitmap->Release(); + + if( pRT ) + pRT->Release(); + + delete frontBufferMutex; + + ShowWindow( hWindow, SW_HIDE ); + DestroyWindow( hWindow ); +} + +void ImageWindow::AssociateSurface( void* surface ) +{ + // Assume an IUnknown + IUnknown* unknown = (IUnknown*)surface; + + IDXGISurface *pDxgiSurface = NULL; + HRESULT hr = unknown->QueryInterface(&pDxgiSurface); + if( hr == S_OK ) + { + D2D1_RENDER_TARGET_PROPERTIES props = + D2D1::RenderTargetProperties( + D2D1_RENDER_TARGET_TYPE_DEFAULT, + D2D1::PixelFormat(DXGI_FORMAT_UNKNOWN, D2D1_ALPHA_MODE_PREMULTIPLIED), + 96, + 96 + ); + + + pRT = NULL; + ID2D1RenderTarget* tmpTarget; + + hr = pD2DFactory->CreateDxgiSurfaceRenderTarget( pDxgiSurface, &props, &tmpTarget ); + + if( hr == S_OK ) + { + DXGI_SURFACE_DESC desc = {0}; + pDxgiSurface->GetDesc( &desc ); + int width = desc.Width; + int height = desc.Height; + + D2D1_SIZE_U size = D2D1::SizeU( width, height ); + + D2D1_PIXEL_FORMAT pixelFormat = D2D1::PixelFormat( + DXGI_FORMAT_A8_UNORM, + D2D1_ALPHA_MODE_PREMULTIPLIED + ); + + D2D1_PIXEL_FORMAT colorPixelFormat = D2D1::PixelFormat( + DXGI_FORMAT_B8G8R8A8_UNORM, + D2D1_ALPHA_MODE_PREMULTIPLIED + ); + + D2D1_BITMAP_PROPERTIES bitmapProps; + bitmapProps.dpiX = 96; + bitmapProps.dpiY = 96; + bitmapProps.pixelFormat = pixelFormat; + + D2D1_BITMAP_PROPERTIES colorBitmapProps; + colorBitmapProps.dpiX = 96; + colorBitmapProps.dpiY = 96; + colorBitmapProps.pixelFormat = colorPixelFormat; + + HRESULT result = tmpTarget->CreateBitmap( size, bitmapProps, &greyBitmap ); + if( result != S_OK ) + { + tmpTarget->Release(); + tmpTarget = NULL; + } + + result = tmpTarget->CreateBitmap( size, colorBitmapProps, &colorBitmap ); + if( result != S_OK ) + { + greyBitmap->Release(); + greyBitmap = NULL; + + tmpTarget->Release(); + tmpTarget = NULL; + } + pRT = tmpTarget; + } + } +} + +void ImageWindow::Process() +{ + if( pRT && greyBitmap ) + { + OnPaint(); + } +} + +void ImageWindow::Complete() +{ + Mutex::Locker locker( frontBufferMutex ); + + if( frames.IsEmpty() ) + return; + + if( frames.PeekBack(0).ready ) + return; + + Frame& frame = frames.PeekBack(0); + + frame.ready = true; +} + +void ImageWindow::OnPaint() +{ + static float mover = -752.0f; + + Mutex::Locker locker( frontBufferMutex ); + + // Nothing to do + if( frames.IsEmpty() ) + return; + + if( !frames.PeekFront(0).ready ) + return; + + Frame currentFrame = frames.PopFront(); + Frame dummyFrame = {0}; + + Frame& nextFrame = dummyFrame; + + if( !frames.IsEmpty() ) + nextFrame = frames.PeekFront(0); + + while( nextFrame.ready ) + { + // Free up the current frame since it's been removed from the deque + free( currentFrame.imageData ); + if( currentFrame.colorImageData ) + free( currentFrame.colorImageData ); + + currentFrame = frames.PopFront(); + + if( frames.IsEmpty() ) + return; + + nextFrame = frames.PeekFront(0); + } + + if( currentFrame.imageData ) + greyBitmap->CopyFromMemory( NULL, currentFrame.imageData, currentFrame.width ); + + if( currentFrame.colorImageData ) + colorBitmap->CopyFromMemory( NULL, currentFrame.colorImageData, currentFrame.colorPitch ); + + pRT->BeginDraw(); + + pRT->SetAntialiasMode(D2D1_ANTIALIAS_MODE_ALIASED); + + pRT->Clear( D2D1::ColorF(D2D1::ColorF::Black) ); + + // This will mirror our image + D2D1_MATRIX_3X2_F m; + m._11 = -1; m._12 = 0; + m._21 = 0; m._22 = 1; + m._31 = 0; m._32 = 0; + pRT->SetTransform( m ); + + ID2D1SolidColorBrush* whiteBrush; + + pRT->CreateSolidColorBrush( D2D1::ColorF(D2D1::ColorF::White, 1.0f), &whiteBrush ); + + if( currentFrame.imageData ) + { + pRT->FillOpacityMask( greyBitmap, whiteBrush, + D2D1_OPACITY_MASK_CONTENT_TEXT_NATURAL, + D2D1::RectF( -(FLOAT)resolution.width, 0.0f, (FLOAT)0.0f, (FLOAT)resolution.height ), + D2D1::RectF( 0.0f, 0.0f, (FLOAT)resolution.width, (FLOAT)resolution.height ) ); + } + else if( currentFrame.colorImageData ) + { + pRT->DrawBitmap( colorBitmap, + D2D1::RectF( -(FLOAT)resolution.width, 0.0f, (FLOAT)0.0f, (FLOAT)resolution.height ) ); + + } + + pRT->SetTransform(D2D1::Matrix3x2F::Identity()); + + whiteBrush->Release(); + + Array<CirclePlot>::Iterator it; + + for( it = currentFrame.plots.Begin(); it != currentFrame.plots.End(); ++it ) + { + ID2D1SolidColorBrush* aBrush; + + pRT->CreateSolidColorBrush( D2D1::ColorF( it->r, it->g, it->b), &aBrush ); + + D2D1_ELLIPSE ellipse; + ellipse.point.x = it->x; + ellipse.point.y = it->y; + ellipse.radiusX = it->radius; + ellipse.radiusY = it->radius; + + if( it->fill ) + pRT->FillEllipse( &ellipse, aBrush ); + else + pRT->DrawEllipse( &ellipse, aBrush ); + + aBrush->Release(); + } + + pRT->EndDraw(); + + if( currentFrame.imageData ) + free( currentFrame.imageData ); + if( currentFrame.colorImageData ) + free( currentFrame.colorImageData ); +} + +void ImageWindow::UpdateImageBW( const UINT8* imageData, UINT width, UINT height ) +{ + if( pRT && greyBitmap ) + { + Mutex::Locker locker( frontBufferMutex ); + + Frame frame = {0}; + frame.imageData = malloc( width * height ); + frame.width = width; + frame.height = height; + memcpy( frame.imageData, imageData, width * height ); + + frames.PushBack( frame ); + } +} + +void ImageWindow::UpdateImageRGBA( const UINT8* imageData, UINT width, UINT height, UINT pitch ) +{ + if( pRT && colorBitmap ) + { + Mutex::Locker locker( frontBufferMutex ); + + Frame frame = {0}; + frame.colorImageData = malloc( pitch * height ); + frame.width = width; + frame.height = height; + frame.colorPitch = pitch; + memcpy( frame.colorImageData, imageData, pitch * height ); + + frames.PushBack( frame ); + } +} + +void ImageWindow::addCircle( float x, float y, float radius, float r, float g, float b, bool fill ) +{ + if( pRT ) + { + CirclePlot cp; + + cp.x = x; + cp.y = y; + cp.radius = radius; + cp.r = r; + cp.g = g; + cp.b = b; + cp.fill = fill; + + Mutex::Locker locker( frontBufferMutex ); + Frame& frame = frames.PeekBack( 0 ); + frame.plots.PushBack( cp ); + } + +} + +}} diff --git a/LibOVR/Src/Util/Util_ImageWindow.h b/LibOVR/Src/Util/Util_ImageWindow.h new file mode 100644 index 0000000..418598c --- /dev/null +++ b/LibOVR/Src/Util/Util_ImageWindow.h @@ -0,0 +1,122 @@ +/************************************************************************************ + +Filename : Util_ImageWindow.h +Content : An output object for windows that can display raw images for testing +Created : March 13, 2014 +Authors : Dean Beeler + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef UTIL_IMAGEWINDOW_H +#define UTIL_IMAGEWINDOW_H + +#define WIN32_LEAN_AND_MEAN 1 +#include <windows.h> +#include <d2d1.h> + +#include "../../Include/OVR.h" +#include "../Kernel/OVR_Hash.h" +#include "../Kernel/OVR_Array.h" +#include "../Kernel/OVR_Threads.h" +#include "../Kernel/OVR_Deque.h" + +#include <stdint.h> + +namespace OVR { namespace Util { + +class ImageWindow +{ + typedef struct + { + float x; + float y; + float radius; + float r; + float g; + float b; + bool fill; + } CirclePlot; + + typedef struct + { + float x; + float y; + float r; + float g; + float b; + WCHAR* text; + } TextPlot; + + typedef struct + { + Array<CirclePlot> plots; + void* imageData; + void* colorImageData; + int width; + int height; + int colorPitch; + bool ready; + } Frame; + + static ID2D1Factory* pD2DFactory; + + HWND hWindow; + ID2D1RenderTarget* pRT; + D2D1_SIZE_U resolution; + + Mutex* frontBufferMutex; + + InPlaceMutableDeque<Frame> frames; + + ID2D1Bitmap* greyBitmap; + ID2D1Bitmap* colorBitmap; + +public: + // constructors + ImageWindow(); + ImageWindow( UINT width, UINT height ); + virtual ~ImageWindow(); + + void OnPaint(); // Called by Windows when it receives a WM_PAINT message + + void UpdateImage( const UINT8* imageData, UINT width, UINT height ) { UpdateImageBW( imageData, width, height ); } + void UpdateImageBW( const UINT8* imageData, UINT width, UINT height ); + void UpdateImageRGBA( const UINT8* imageData, UINT width, UINT height, UINT pitch ); + void Complete(); // Called by drawing thread to submit a frame + + void Process(); // Called by rendering thread to do window processing + + void AssociateSurface( void* surface ); + + void addCircle( float x , float y, float radius, float r, float g, float b, bool fill ); + + static ImageWindow* GlobalWindow() { return globalWindow; } + +private: + + + static ImageWindow* globalWindow; + + static bool running; +}; + +}} // namespace OVR::Util + +#endif
\ No newline at end of file diff --git a/LibOVR/Src/Util/Util_Interface.cpp b/LibOVR/Src/Util/Util_Interface.cpp new file mode 100644 index 0000000..d96423c --- /dev/null +++ b/LibOVR/Src/Util/Util_Interface.cpp @@ -0,0 +1,34 @@ +/************************************************************************************ + +Filename : Util_Interface.cpp +Content : Simple interface, utilised by internal demos, + with access to wider SDK as needed. + Located in the body of the SDK to ensure updated + when new SDK features are added. +Created : February 20, 2014 +Authors : Tom Heath + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "Util_Interface.h" + + + +//Files left in to ease its possible return......
\ No newline at end of file diff --git a/LibOVR/Src/Util/Util_Interface.h b/LibOVR/Src/Util/Util_Interface.h new file mode 100644 index 0000000..1bbf638 --- /dev/null +++ b/LibOVR/Src/Util/Util_Interface.h @@ -0,0 +1,37 @@ +/************************************************************************************ + +PublicHeader: OVR.h +Filename : Util_Interface.h +Content : Simple interface, utilised by internal demos, + with access to wider SDK as needed. + Located in the body of the SDK to ensure updated + when new SDK features are added. +Created : February 20, 2014 +Authors : Tom Heath + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Util_Interface_h +#define OVR_Util_Interface_h +#include "../../Src/OVR_CAPI.h" + +//Files left in to ease its possible return...... + +#endif diff --git a/LibOVR/Src/Util/Util_LatencyTest.cpp b/LibOVR/Src/Util/Util_LatencyTest.cpp index b972e48..3017c72 100644 --- a/LibOVR/Src/Util/Util_LatencyTest.cpp +++ b/LibOVR/Src/Util/Util_LatencyTest.cpp @@ -5,16 +5,16 @@ Content : Wraps the lower level LatencyTester interface and adds functiona Created : February 14, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -73,31 +73,19 @@ bool LatencyTest::SetDevice(LatencyTestDevice* device) if (device != Device) { - if (device != NULL) - { - if (device->GetMessageHandler() != NULL) - { - OVR_DEBUG_LOG( - ("LatencyTest::AttachToDevice failed - device %p already has handler", device)); - return false; - } - } + Handler.RemoveHandlerFromDevices(); - if (Device != NULL) - { - Device->SetMessageHandler(0); - } Device = device; if (Device != NULL) { - Device->SetMessageHandler(&Handler); + Device->AddMessageHandler(&Handler); // Set trigger threshold. LatencyTestConfiguration configuration(SENSOR_DETECT_THRESHOLD, false); // No samples streaming. Device->SetConfiguration(configuration, true); - // Set display to intial (3 dashes). + // Set display to initial (3 dashes). LatencyTestDisplay ltd(2, 0x40400040); Device->SetDisplay(ltd); } @@ -251,7 +239,7 @@ void LatencyTest::handleMessage(const Message& msg, LatencyTestMessageType laten getActiveResult()->TargetColor = RenderColor; // Record time so we can determine usb roundtrip time. - getActiveResult()->StartTestTicksMicroS = Timer::GetTicks(); + getActiveResult()->StartTestSeconds = Timer::GetSeconds(); Device->SetStartTest(RenderColor); @@ -275,7 +263,7 @@ void LatencyTest::handleMessage(const Message& msg, LatencyTestMessageType laten clearTimer(); // Record time so we can determine usb roundtrip time. - getActiveResult()->TestStartedTicksMicroS = Timer::GetTicks(); + getActiveResult()->TestStartedSeconds = Timer::GetSeconds(); State = State_WaitingForColorDetected; OVR_DEBUG_LOG(("State_WaitingForTestStarted -> State_WaitingForColorDetected.")); @@ -501,7 +489,7 @@ void LatencyTest::processResults() } } - float usbRountripElapsedMilliS = 0.001f * (float) (pCurr->TestStartedTicksMicroS - pCurr->StartTestTicksMicroS); + float usbRountripElapsedMilliS = Timer::MsPerSecond * (float) (pCurr->TestStartedSeconds - pCurr->StartTestSeconds); minUSBTripMilliS = Alg::Min(usbRountripElapsedMilliS, minUSBTripMilliS); maxUSBTripMilliS = Alg::Max(usbRountripElapsedMilliS, maxUSBTripMilliS); averageUSBTripMilliS += usbRountripElapsedMilliS; diff --git a/LibOVR/Src/Util/Util_LatencyTest.h b/LibOVR/Src/Util/Util_LatencyTest.h index 657a3a9..0844603 100644 --- a/LibOVR/Src/Util/Util_LatencyTest.h +++ b/LibOVR/Src/Util/Util_LatencyTest.h @@ -6,16 +6,16 @@ Content : Wraps the lower level LatencyTesterDevice and adds functionality Created : February 14, 2013 Authors : Lee Cooper -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -80,6 +80,8 @@ public: bool DisplayScreenColor(Color& colorToDisplay); const char* GetResultsString(); + bool IsMeasuringNow() const { return (State != State_WaitingForButton); } + // Begin test. Equivalent to pressing the button on the latency tester. void BeginTest(); @@ -142,8 +144,8 @@ private: : DeviceMeasuredElapsedMilliS(0), TimedOutWaitingForTestStarted(false), TimedOutWaitingForColorDetected(false), - StartTestTicksMicroS(0), - TestStartedTicksMicroS(0) + StartTestSeconds(0.0), + TestStartedSeconds(0.0) {} Color TargetColor; @@ -153,8 +155,8 @@ private: bool TimedOutWaitingForTestStarted; bool TimedOutWaitingForColorDetected; - UInt64 StartTestTicksMicroS; - UInt64 TestStartedTicksMicroS; + double StartTestSeconds; + double TestStartedSeconds; }; List<MeasurementResult> Results; diff --git a/LibOVR/Src/Util/Util_LatencyTest2.cpp b/LibOVR/Src/Util/Util_LatencyTest2.cpp new file mode 100644 index 0000000..f4baf29 --- /dev/null +++ b/LibOVR/Src/Util/Util_LatencyTest2.cpp @@ -0,0 +1,194 @@ +/************************************************************************************ + +Filename : Util_LatencyTest2.cpp +Content : Wraps the lower level LatencyTester interface for DK2 and adds functionality. +Created : March 10, 2014 +Authors : Volga Aksoy + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#include "Util_LatencyTest2.h" + +#include "../OVR_CAPI.h" +#include "../Kernel/OVR_Log.h" +#include "../Kernel/OVR_Timer.h" + + +namespace OVR { namespace Util { + +//static const float BIG_FLOAT = 1000000.0f; +//static const float SMALL_FLOAT = -1000000.0f; + +//------------------------------------------------------------------------------------- +// ***** LatencyTest2 + +LatencyTest2::LatencyTest2(SensorDevice* device) + : Handler(getThis()) + , TestActive(false) + , StartTiming(-1) + , LatencyMeasuredInSeconds(-1) + , LastPixelReadMsg(NULL) + , RenderColorValue(0) + , NumMsgsBeforeSettle(0) + , NumTestsSuccessful(0) +{ + if (device != NULL) + { + SetSensorDevice(device); + } +} + +LatencyTest2::~LatencyTest2() +{ + HmdDevice = NULL; + LatencyTesterDev = NULL; + + Handler.RemoveHandlerFromDevices(); +} + +bool LatencyTest2::SetSensorDevice(SensorDevice* device) +{ + Lock::Locker devLocker(&TesterLock); + + // Enable/Disable pixel read from HMD + if (device != HmdDevice) + { + Handler.RemoveHandlerFromDevices(); + + HmdDevice = device; + + if (HmdDevice != NULL) + { + HmdDevice->AddMessageHandler(&Handler); + } + } + + return true; +} + +bool LatencyTest2::SetDisplayDevice(LatencyTestDevice* device) +{ + Lock::Locker devLocker(&TesterLock); + + if (device != LatencyTesterDev) + { + LatencyTesterDev = device; + if (LatencyTesterDev != NULL) + { + // Set display to initial (3 dashes). + LatencyTestDisplay ltd(2, 0x40400040); + LatencyTesterDev->SetDisplay(ltd); + } + } + + return true; +} + +void LatencyTest2::BeginTest(double startTime) +{ + Lock::Locker devLocker(&TesterLock); + + if (!TestActive) + { + TestActive = true; + NumMsgsBeforeSettle = 0; + + // Go to next pixel value + //RenderColorValue = (RenderColorValue == 0) ? 255 : 0; + RenderColorValue = (RenderColorValue + LT2_ColorIncrement) % 256; + RawStartTiming = LastPixelReadMsg.RawSensorTime; + + if (startTime > 0.0) + StartTiming = startTime; + else + StartTiming = ovr_GetTimeInSeconds(); + + } +} + +void LatencyTest2::handleMessage(const MessagePixelRead& msg) +{ + Lock::Locker devLocker(&TesterLock); + + // Hold onto the last message as we will use this when we start a new test + LastPixelReadMsg = msg; + + // If color readback index is valid, store it in the lock-less queue. + int readbackIndex = 0; + if (FrameTimeRecord::ColorToReadbackIndex(&readbackIndex, msg.PixelReadValue)) + { + RecentFrameSet.AddValue(readbackIndex, msg.FrameTimeSeconds); + LockessRecords.SetState(RecentFrameSet); + } + + NumMsgsBeforeSettle++; + + if (TestActive) + { + int pixelValueDiff = RenderColorValue - LastPixelReadMsg.PixelReadValue; + int rawTimeDiff = LastPixelReadMsg.RawFrameTime - RawStartTiming; + + if (pixelValueDiff < LT2_PixelTestThreshold && pixelValueDiff > -LT2_PixelTestThreshold) + { + TestActive = false; + + LatencyMeasuredInSeconds = LastPixelReadMsg.FrameTimeSeconds - StartTiming; + RawLatencyMeasured = rawTimeDiff; + //LatencyMeasuredInSeconds = RawLatencyMeasured / 1000000.0; + + if(LatencyTesterDev && (NumTestsSuccessful % 5) == 0) + { + int displayNum = (int)(RawLatencyMeasured / 100.0); + //int displayNum = NumMsgsBeforeSettle; + //int displayNum = (int)(LatencyMeasuredInSeconds * 1000.0); + LatencyTestDisplay ltd(1, displayNum); + LatencyTesterDev->SetDisplay(ltd); + } + + NumTestsSuccessful++; + } + else if (TestActive && (rawTimeDiff / 1000) > LT2_TimeoutWaitingForColorDetected) + { + TestActive = false; + LatencyMeasuredInSeconds = -1; + } + } +} + +LatencyTest2::PixelReadHandler::~PixelReadHandler() +{ + RemoveHandlerFromDevices(); +} + +void LatencyTest2::PixelReadHandler::OnMessage(const Message& msg) +{ + if(msg.Type == Message_PixelRead) + pLatencyTestUtil->handleMessage(static_cast<const MessagePixelRead&>(msg)); +} + +bool LatencyTest2::DisplayScreenColor(Color& colorToDisplay) +{ + Lock::Locker devLocker(&TesterLock); + colorToDisplay = Color(RenderColorValue, RenderColorValue, RenderColorValue, 255); + + return TestActive; +} + +}} // namespace OVR::Util diff --git a/LibOVR/Src/Util/Util_LatencyTest2.h b/LibOVR/Src/Util/Util_LatencyTest2.h new file mode 100644 index 0000000..ae11a52 --- /dev/null +++ b/LibOVR/Src/Util/Util_LatencyTest2.h @@ -0,0 +1,238 @@ +/************************************************************************************ + +PublicHeader: OVR.h +Filename : Util_LatencyTest2.h +Content : Wraps the lower level LatencyTester interface for DK2 and adds functionality. +Created : March 10, 2014 +Authors : Volga Aksoy + +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. + +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, +which is provided at the time of installation or download, or which +otherwise accompanies this software in either electronic or hard copy form. + +You may obtain a copy of the License at + +http://www.oculusvr.com/licenses/LICENSE-3.1 + +Unless required by applicable law or agreed to in writing, the Oculus VR SDK +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*************************************************************************************/ + +#ifndef OVR_Util_LatencyTest2_h +#define OVR_Util_LatencyTest2_h + +#include "../OVR_Device.h" + +#include "../Kernel/OVR_String.h" +#include "../Kernel/OVR_List.h" +#include "../Kernel/OVR_Lockless.h" + +namespace OVR { namespace Util { + + +enum { + LT2_ColorIncrement = 32, + LT2_PixelTestThreshold = LT2_ColorIncrement / 3, + LT2_IncrementCount = 256 / LT2_ColorIncrement, + LT2_TimeoutWaitingForColorDetected = 1000 // 1 second +}; + +//------------------------------------------------------------------------------------- + +// Describes frame scanout time used for latency testing. +struct FrameTimeRecord +{ + int ReadbackIndex; + double TimeSeconds; + + // Utility functions to convert color to readBack indices and back. + // The purpose of ReadbackIndex is to allow direct comparison by value. + + static bool ColorToReadbackIndex(int *readbackIndex, unsigned char color) + { + int compareColor = color - LT2_ColorIncrement/2; + int index = color / LT2_ColorIncrement; // Use color without subtraction due to rounding. + int delta = compareColor - index * LT2_ColorIncrement; + + if ((delta < LT2_PixelTestThreshold) && (delta > -LT2_PixelTestThreshold)) + { + *readbackIndex = index; + return true; + } + return false; + } + + static unsigned char ReadbackIndexToColor(int readbackIndex) + { + OVR_ASSERT(readbackIndex < LT2_IncrementCount); + return (unsigned char)(readbackIndex * LT2_ColorIncrement + LT2_ColorIncrement/2); + } +}; + +// FrameTimeRecordSet is a container holding multiple consecutive frame timing records +// returned from the lock-less state. Used by FrameTimeManager. + +struct FrameTimeRecordSet +{ + enum { + RecordCount = 4, + RecordMask = RecordCount - 1 + }; + FrameTimeRecord Records[RecordCount]; + int NextWriteIndex; + + FrameTimeRecordSet() + { + NextWriteIndex = 0; + memset(this, 0, sizeof(FrameTimeRecordSet)); + } + + void AddValue(int readValue, double timeSeconds) + { + Records[NextWriteIndex].ReadbackIndex = readValue; + Records[NextWriteIndex].TimeSeconds = timeSeconds; + NextWriteIndex ++; + if (NextWriteIndex == RecordCount) + NextWriteIndex = 0; + } + // Matching should be done starting from NextWrite index + // until wrap-around + + const FrameTimeRecord& operator [] (int i) const + { + return Records[(NextWriteIndex + i) & RecordMask]; + } + + const FrameTimeRecord& GetMostRecentFrame() + { + return Records[(NextWriteIndex - 1) & RecordMask]; + } + + // Advances I to absolute color index + bool FindReadbackIndex(int* i, int readbackIndex) const + { + for (; *i < RecordCount; *i++) + { + if ((*this)[*i].ReadbackIndex == readbackIndex) + return true; + } + return false; + } + + bool IsAllZeroes() const + { + for (int i = 0; i < RecordCount; i++) + if (Records[i].ReadbackIndex != 0) + return false; + return true; + } +}; + + +//------------------------------------------------------------------------------------- +// ***** LatencyTest2 +// +// LatencyTest2 utility class wraps the low level SensorDevice and manages the scheduling +// of a latency test. A single test is composed of a series of individual latency measurements +// which are used to derive min, max, and an average latency value. +// +// Developers are required to call the following methods: +// SetDevice - Sets the SensorDevice to be used for the tests. +// ProcessInputs - This should be called at the same place in the code where the game engine +// reads the headset orientation from LibOVR (typically done by calling +// 'GetOrientation' on the SensorFusion object). Calling this at the right time +// enables us to measure the same latency that occurs for headset orientation +// changes. +// DisplayScreenColor - The latency tester works by sensing the color of the pixels directly +// beneath it. The color of these pixels can be set by drawing a small +// quad at the end of the rendering stage. The quad should be small +// such that it doesn't significantly impact the rendering of the scene, +// but large enough to be 'seen' by the sensor. See the SDK +// documentation for more information. +// GetResultsString - Call this to get a string containing the most recent results. +// If the string has already been gotten then NULL will be returned. +// The string pointer will remain valid until the next time this +// method is called. +// + +class LatencyTest2 : public NewOverrideBase +{ +public: + LatencyTest2(SensorDevice* device = NULL); + ~LatencyTest2(); + + // Set the Latency Tester device that we'll use to send commands to and receive + // notification messages from. + bool SetSensorDevice(SensorDevice* device); + bool SetDisplayDevice(LatencyTestDevice* device); + + // Returns true if this LatencyTestUtil has a Latency Tester device. + bool HasDisplayDevice() const { return LatencyTesterDev.GetPtr() != NULL; } + bool HasDevice() const { return Handler.IsHandlerInstalled(); } + + bool DisplayScreenColor(Color& colorToDisplay); + //const char* GetResultsString(); + + // Begin test. Equivalent to pressing the button on the latency tester. + void BeginTest(double startTime = -1.0f); + bool IsMeasuringNow() const { return TestActive; } + double GetMeasuredLatency() const { return LatencyMeasuredInSeconds; } + +// + FrameTimeRecordSet GetLocklessState() { return LockessRecords.GetState(); } + +private: + LatencyTest2* getThis() { return this; } + + enum LatencyTestMessageType + { + LatencyTest_None, + LatencyTest_Timer, + LatencyTest_ProcessInputs, + }; + + void handleMessage(const MessagePixelRead& msg); + + class PixelReadHandler : public MessageHandler + { + LatencyTest2* pLatencyTestUtil; + public: + PixelReadHandler(LatencyTest2* latencyTester) : pLatencyTestUtil(latencyTester) { } + ~PixelReadHandler(); + + virtual void OnMessage(const Message& msg); + }; + PixelReadHandler Handler; + + Ptr<SensorDevice> HmdDevice; + Ptr<LatencyTestDevice> LatencyTesterDev; + + Lock TesterLock; + bool TestActive; + unsigned char RenderColorValue; + MessagePixelRead LastPixelReadMsg; + double StartTiming; + unsigned int RawStartTiming; + UInt32 RawLatencyMeasured; + double LatencyMeasuredInSeconds; + int NumMsgsBeforeSettle; + unsigned int NumTestsSuccessful; + + // MA: + // Frames are added here, then copied into lockess state + FrameTimeRecordSet RecentFrameSet; + LocklessUpdater<FrameTimeRecordSet> LockessRecords; +}; + + + +}} // namespace OVR::Util + +#endif // OVR_Util_LatencyTest2_h diff --git a/LibOVR/Src/Util/Util_MagCalibration.cpp b/LibOVR/Src/Util/Util_MagCalibration.cpp deleted file mode 100644 index 58b8c45..0000000 --- a/LibOVR/Src/Util/Util_MagCalibration.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/************************************************************************************ - -Filename : Util_MagCalibration.cpp -Content : Procedures for calibrating the magnetometer -Created : April 16, 2013 -Authors : Steve LaValle, Andrew Reisse - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Use of this software is subject to the terms of the Oculus license -agreement provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -*************************************************************************************/ - -#include "Util_MagCalibration.h" - -namespace OVR { namespace Util { - -void MagCalibration::BeginAutoCalibration(SensorFusion& sf) -{ - Stat = Mag_AutoCalibrating; - // This is a "hard" reset of the mag, so need to clear stored values - sf.ClearMagCalibration(); - SampleCount = 0; - - // reset the statistics - MinMagValues = Vector3f(10000.0f,10000.0f,10000.0f); - MaxMagValues = Vector3f(-10000.0f,-10000.0f,-10000.0f); - MinQuatValues = Quatf(1.0f,1.0f,1.0f,1.0f); - MaxQuatValues = Quatf(0.0f,0.0f,0.0f,0.0f); -} - -unsigned MagCalibration::UpdateAutoCalibration(SensorFusion& sf) -{ - if (Stat != Mag_AutoCalibrating) - return Stat; - - Quatf q = sf.GetOrientation(); - Vector3f m = sf.GetMagnetometer(); - - InsertIfAcceptable(q, m); - - if ((SampleCount == 4) && (Stat == Mag_AutoCalibrating)) - { - //LogText("Magnetometer Output Spread: %f %f %f\n",MagSpread.x,MagSpread.y,MagSpread.z); - //LogText("Quaternion Spread: %f %f %f %f\n",QuatSpread.x,QuatSpread.y,QuatSpread.z,QuatSpread.w); - SetCalibration(sf); - } - - return Stat; - -} - -void MagCalibration::BeginManualCalibration(SensorFusion& sf) -{ - Stat = Mag_ManuallyCalibrating; - sf.ClearMagCalibration(); - SampleCount = 0; -} - -bool MagCalibration::IsAcceptableSample(const Quatf& q, const Vector3f& m) -{ - switch (SampleCount) - { - // Initial sample is always acceptable - case 0: - return true; - break; - case 1: - return (q.DistanceSq(QuatSamples[0]) > MinQuatDistanceSq)&& - ((m - MagSamples[0]).LengthSq() > MinMagDistanceSq); - break; - case 2: - return (q.DistanceSq(QuatSamples[0]) > MinQuatDistanceSq)&& - (q.DistanceSq(QuatSamples[1]) > MinQuatDistanceSq)&& - ((m - MagSamples[0]).LengthSq() > MinMagDistanceSq)&& - ((m - MagSamples[1]).LengthSq() > MinMagDistanceSq); - break; - case 3: - return (q.DistanceSq(QuatSamples[0]) > MinQuatDistanceSq)&& - (q.DistanceSq(QuatSamples[1]) > MinQuatDistanceSq)&& - (q.DistanceSq(QuatSamples[2]) > MinQuatDistanceSq)&& - ((PointToPlaneDistance(MagSamples[0],MagSamples[1],MagSamples[2],m) > MinMagDistance)|| - (PointToPlaneDistance(MagSamples[1],MagSamples[2],m,MagSamples[0]) > MinMagDistance)|| - (PointToPlaneDistance(MagSamples[2],m,MagSamples[0],MagSamples[1]) > MinMagDistance)|| - (PointToPlaneDistance(m,MagSamples[0],MagSamples[1],MagSamples[2]) > MinMagDistance)); - } - - return false; -} - - -bool MagCalibration::InsertIfAcceptable(const Quatf& q, const Vector3f& m) -{ - // Update some statistics - if (m.x < MinMagValues.x) - MinMagValues.x = m.x; - if (m.y < MinMagValues.y) - MinMagValues.y = m.y; - if (m.z < MinMagValues.z) - MinMagValues.z = m.z; - if (m.x > MaxMagValues.x) - MaxMagValues.x = m.x; - if (m.y > MaxMagValues.y) - MaxMagValues.y = m.y; - if (m.z > MaxMagValues.z) - MaxMagValues.z = m.z; - if (q.x < MinQuatValues.x) - MinQuatValues.x = q.x; - if (q.y < MinQuatValues.y) - MinQuatValues.y = q.y; - if (q.z < MinQuatValues.z) - MinQuatValues.z = q.z; - if (q.w < MinQuatValues.w) - MinQuatValues.w = q.w; - if (q.x > MaxQuatValues.x) - MaxQuatValues.x = q.x; - if (q.y > MaxQuatValues.y) - MaxQuatValues.y = q.y; - if (q.z > MaxQuatValues.z) - MaxQuatValues.z = q.z; - if (q.w > MaxQuatValues.w) - MaxQuatValues.w = q.w; - MagSpread = MaxMagValues - MinMagValues; - QuatSpread = MaxQuatValues - MinQuatValues; - - if (IsAcceptableSample(q, m)) - { - MagSamples[SampleCount] = m; - QuatSamples[SampleCount] = q; - SampleCount++; - return true; - } - - return false; -} - -Matrix4f MagCalibration::GetMagCalibration() const -{ - Matrix4f calMat = Matrix4f(); - calMat.M[0][3] = -MagCenter.x; - calMat.M[1][3] = -MagCenter.y; - calMat.M[2][3] = -MagCenter.z; - return calMat; -} - -bool MagCalibration::SetCalibration(SensorFusion& sf) -{ - if (SampleCount < 4) - return false; - - MagCenter = CalculateSphereCenter(MagSamples[0],MagSamples[1],MagSamples[2],MagSamples[3]); - Matrix4f calMat = GetMagCalibration(); - sf.SetMagCalibration(calMat); - Stat = Mag_Calibrated; - //LogText("MagCenter: %f %f %f\n",MagCenter.x,MagCenter.y,MagCenter.z); - - return true; -} - - -// Calculate the center of a sphere that passes through p1, p2, p3, p4 -Vector3f MagCalibration::CalculateSphereCenter(const Vector3f& p1, const Vector3f& p2, - const Vector3f& p3, const Vector3f& p4) -{ - Matrix4f A; - int i; - Vector3f p[4]; - p[0] = p1; - p[1] = p2; - p[2] = p3; - p[3] = p4; - - for (i = 0; i < 4; i++) - { - A.M[i][0] = p[i].x; - A.M[i][1] = p[i].y; - A.M[i][2] = p[i].z; - A.M[i][3] = 1.0f; - } - float m11 = A.Determinant(); - OVR_ASSERT(m11 != 0.0f); - - for (i = 0; i < 4; i++) - { - A.M[i][0] = p[i].x*p[i].x + p[i].y*p[i].y + p[i].z*p[i].z; - A.M[i][1] = p[i].y; - A.M[i][2] = p[i].z; - A.M[i][3] = 1.0f; - } - float m12 = A.Determinant(); - - for (i = 0; i < 4; i++) - { - A.M[i][0] = p[i].x*p[i].x + p[i].y*p[i].y + p[i].z*p[i].z; - A.M[i][1] = p[i].x; - A.M[i][2] = p[i].z; - A.M[i][3] = 1.0f; - } - float m13 = A.Determinant(); - - for (i = 0; i < 4; i++) - { - A.M[i][0] = p[i].x*p[i].x + p[i].y*p[i].y + p[i].z*p[i].z; - A.M[i][1] = p[i].x; - A.M[i][2] = p[i].y; - A.M[i][3] = 1.0f; - } - float m14 = A.Determinant(); - - float c = 0.5f / m11; - return Vector3f(c*m12, -c*m13, c*m14); -} - -// Distance from p4 to the nearest point on a plane through p1, p2, p3 -float MagCalibration::PointToPlaneDistance(const Vector3f& p1, const Vector3f& p2, - const Vector3f& p3, const Vector3f& p4) -{ - Vector3f v1 = p1 - p2; - Vector3f v2 = p1 - p3; - Vector3f planeNormal = v1.Cross(v2); - planeNormal.Normalize(); - return (fabs((planeNormal * p4) - planeNormal * p1)); -} - -}} diff --git a/LibOVR/Src/Util/Util_MagCalibration.h b/LibOVR/Src/Util/Util_MagCalibration.h deleted file mode 100644 index 1f8e8cb..0000000 --- a/LibOVR/Src/Util/Util_MagCalibration.h +++ /dev/null @@ -1,138 +0,0 @@ -/************************************************************************************ - -PublicHeader: OVR.h -Filename : Util_MagCalibration.h -Content : Procedures for calibrating the magnetometer -Created : April 16, 2013 -Authors : Steve LaValle, Andrew Reisse - -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. - -Use of this software is subject to the terms of the Oculus license -agreement provided at the time of installation or download, or which -otherwise accompanies this software in either electronic or hard copy form. - -*************************************************************************************/ - -#ifndef OVR_Util_MagCalibration_h -#define OVR_Util_MagCalibration_h - -#include "../OVR_SensorFusion.h" -#include "../Kernel/OVR_String.h" -#include "../Kernel/OVR_Log.h" - -namespace OVR { namespace Util { - -class MagCalibration -{ -public: - enum MagStatus - { - Mag_Uninitialized = 0, - Mag_AutoCalibrating = 1, - Mag_ManuallyCalibrating = 2, - Mag_Calibrated = 3 - }; - - MagCalibration() : - Stat(Mag_Uninitialized), - MinMagDistance(0.2f), MinQuatDistance(0.5f), - SampleCount(0) - { - MinMagDistanceSq = MinMagDistance * MinMagDistance; - MinQuatDistanceSq = MinQuatDistance * MinQuatDistance; - MinMagValues = Vector3f(10000.0f,10000.0f,10000.0f); - MaxMagValues = Vector3f(-10000.0f,-10000.0f,-10000.0f); - MinQuatValues = Quatf(1.0f,1.0f,1.0f,1.0f); - MaxQuatValues = Quatf(0.0f,0.0f,0.0f,0.0f); - } - - // Methods that are useful for either auto or manual calibration - bool IsUnitialized() const { return Stat == Mag_Uninitialized; } - bool IsCalibrated() const { return Stat == Mag_Calibrated; } - int NumberOfSamples() const { return SampleCount; } - int RequiredSampleCount() const { return 4; } - void AbortCalibration() - { - Stat = Mag_Uninitialized; - SampleCount = 0; - } - - void ClearCalibration(SensorFusion& sf) - { - Stat = Mag_Uninitialized; - SampleCount = 0; - sf.ClearMagCalibration(); - }; - - // Methods for automatic magnetometer calibration - void BeginAutoCalibration(SensorFusion& sf); - unsigned UpdateAutoCalibration(SensorFusion& sf); - bool IsAutoCalibrating() const { return Stat == Mag_AutoCalibrating; } - - // Methods for building a manual (user-guided) calibraton procedure - void BeginManualCalibration(SensorFusion& sf); - bool IsAcceptableSample(const Quatf& q, const Vector3f& m); - bool InsertIfAcceptable(const Quatf& q, const Vector3f& m); - // Returns true if successful, requiring that SampleCount = 4 - bool SetCalibration(SensorFusion& sf); - bool IsManuallyCalibrating() const { return Stat == Mag_ManuallyCalibrating; } - - // This is the minimum acceptable distance (Euclidean) between raw - // magnetometer values to be acceptable for usage in calibration. - void SetMinMagDistance(float dist) - { - MinMagDistance = dist; - MinMagDistanceSq = MinMagDistance * MinMagDistance; - } - - // The minimum acceptable distance (4D Euclidean) between orientations - // to be acceptable for calibration usage. - void SetMinQuatDistance(float dist) - { - MinQuatDistance = dist; - MinQuatDistanceSq = MinQuatDistance * MinQuatDistance; - } - - // A result of the calibration, which is the center of a sphere that - // roughly approximates the magnetometer data. - Vector3f GetMagCenter() const { return MagCenter; } - // Retrieves the full magnetometer calibration matrix - Matrix4f GetMagCalibration() const; - // Retrieves the range of each quaternion term during calibration - Quatf GetCalibrationQuatSpread() const { return QuatSpread; } - // Retrieves the range of each magnetometer term during calibration - Vector3f GetCalibrationMagSpread() const { return MagSpread; } - -private: - // Determine the unique sphere through 4 non-coplanar points - Vector3f CalculateSphereCenter(const Vector3f& p1, const Vector3f& p2, - const Vector3f& p3, const Vector3f& p4); - - // Distance from p4 to the nearest point on a plane through p1, p2, p3 - float PointToPlaneDistance(const Vector3f& p1, const Vector3f& p2, - const Vector3f& p3, const Vector3f& p4); - - Vector3f MagCenter; - unsigned Stat; - float MinMagDistance; - float MinQuatDistance; - float MinMagDistanceSq; - float MinQuatDistanceSq; - // For gathering statistics during calibration - Vector3f MinMagValues; - Vector3f MaxMagValues; - Vector3f MagSpread; - Quatf MinQuatValues; - Quatf MaxQuatValues; - Quatf QuatSpread; - - unsigned SampleCount; - Vector3f MagSamples[4]; - Quatf QuatSamples[4]; - -}; - -}} - -#endif diff --git a/LibOVR/Src/Util/Util_Render_Stereo.cpp b/LibOVR/Src/Util/Util_Render_Stereo.cpp index 2ff3ef2..87fed3c 100644 --- a/LibOVR/Src/Util/Util_Render_Stereo.cpp +++ b/LibOVR/Src/Util/Util_Render_Stereo.cpp @@ -3,18 +3,18 @@ Filename : Util_Render_Stereo.cpp Content : Stereo rendering configuration implementation Created : October 22, 2012 -Authors : Michael Antonov, Andrew Reisse +Authors : Michael Antonov, Andrew Reisse, Tom Forsyth -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -25,122 +25,407 @@ limitations under the License. *************************************************************************************/ #include "Util_Render_Stereo.h" +#include "../OVR_SensorFusion.h" namespace OVR { namespace Util { namespace Render { //----------------------------------------------------------------------------------- +// **** Useful debug functions. -// DistortionFnInverse computes the inverse of the distortion function on an argument. -float DistortionConfig::DistortionFnInverse(float r) -{ - OVR_ASSERT((r <= 10.0f)); +char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType ) +{ + switch ( eyeCupType ) + { + case EyeCup_DK1A: return "DK1 A"; break; + case EyeCup_DK1B: return "DK1 B"; break; + case EyeCup_DK1C: return "DK1 C"; break; + case EyeCup_DKHD2A: return "DKHD2 A"; break; + case EyeCup_OrangeA: return "Orange A"; break; + case EyeCup_RedA: return "Red A"; break; + case EyeCup_PinkA: return "Pink A"; break; + case EyeCup_BlueA: return "Blue A"; break; + case EyeCup_Delilah1A: return "Delilah 1 A"; break; + case EyeCup_Delilah2A: return "Delilah 2 A"; break; + case EyeCup_JamesA: return "James A"; break; + case EyeCup_SunMandalaA: return "Sun Mandala A"; break; + case EyeCup_DK2A: return "DK2 A"; break; + case EyeCup_LAST: return "LAST"; break; + default: OVR_ASSERT ( false ); return "Error"; break; + } +} - float s, d; - float delta = r * 0.25f; +char const* GetDebugNameHmdType ( HmdTypeEnum hmdType ) +{ + switch ( hmdType ) + { + case HmdType_None: return "None"; break; + case HmdType_DK1: return "DK1"; break; + case HmdType_DKProto: return "DK1 prototype"; break; + case HmdType_DKHDProto: return "DK HD prototype 1"; break; + case HmdType_DKHDProto566Mi: return "DK HD prototype 566 Mi"; break; + case HmdType_DKHD2Proto: return "DK HD prototype 585"; break; + case HmdType_CrystalCoveProto: return "Crystal Cove"; break; + case HmdType_DK2: return "DK2"; break; + case HmdType_Unknown: return "Unknown"; break; + case HmdType_LAST: return "LAST"; break; + default: OVR_ASSERT ( false ); return "Error"; break; + } +} - s = r * 0.5f; - d = fabs(r - DistortionFn(s)); - for (int i = 0; i < 20; i++) +//----------------------------------------------------------------------------------- +// **** Internal pipeline functions. + +struct DistortionAndFov +{ + DistortionRenderDesc Distortion; + FovPort Fov; +}; + +static DistortionAndFov CalculateDistortionAndFovInternal ( StereoEye eyeType, HmdRenderInfo const &hmd, + LensConfig const *pLensOverride = NULL, + FovPort const *pTanHalfFovOverride = NULL, + float extraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION ) +{ + // pLensOverride can be NULL, which means no override. + + DistortionRenderDesc localDistortion = CalculateDistortionRenderDesc ( eyeType, hmd, pLensOverride ); + FovPort fov = CalculateFovFromHmdInfo ( eyeType, localDistortion, hmd, extraEyeRotationInRadians ); + // Here the app or the user would optionally clamp this visible fov to a smaller number if + // they want more perf or resolution and are willing to give up FOV. + // They may also choose to clamp UDLR differently e.g. to get cinemascope-style views. + if ( pTanHalfFovOverride != NULL ) { - float sUp = s + delta; - float sDown = s - delta; - float dUp = fabs(r - DistortionFn(sUp)); - float dDown = fabs(r - DistortionFn(sDown)); + fov = *pTanHalfFovOverride; + } - if (dUp < d) + // Here we could call ClampToPhysicalScreenFov(), but we do want people + // to be able to play with larger-than-screen views. + // The calling app can always do the clamping itself. + DistortionAndFov result; + result.Distortion = localDistortion; + result.Fov = fov; + + return result; +} + + +static Recti CalculateViewportInternal ( StereoEye eyeType, + Sizei const actualRendertargetSurfaceSize, + Sizei const requestedRenderedPixelSize, + bool bRendertargetSharedByBothEyes, + bool bMonoRenderingMode = false ) +{ + Recti renderedViewport; + if ( bMonoRenderingMode || !bRendertargetSharedByBothEyes || (eyeType == StereoEye_Center) ) + { + // One eye per RT. + renderedViewport.x = 0; + renderedViewport.y = 0; + renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w, requestedRenderedPixelSize.w ); + renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h ); + } + else + { + // Both eyes share the RT. + renderedViewport.x = 0; + renderedViewport.y = 0; + renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w/2, requestedRenderedPixelSize.w ); + renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h ); + if ( eyeType == StereoEye_Right ) { - s = sUp; - d = dUp; + renderedViewport.x = (actualRendertargetSurfaceSize.w+1)/2; // Round up, not down. } - else if (dDown < d) + } + return renderedViewport; +} + +static Recti CalculateViewportDensityInternal ( StereoEye eyeType, + DistortionRenderDesc const &distortion, + FovPort const &fov, + Sizei const &actualRendertargetSurfaceSize, + bool bRendertargetSharedByBothEyes, + float desiredPixelDensity = 1.0f, + bool bMonoRenderingMode = false ) +{ + OVR_ASSERT ( actualRendertargetSurfaceSize.w > 0 ); + OVR_ASSERT ( actualRendertargetSurfaceSize.h > 0 ); + + // What size RT do we need to get 1:1 mapping? + Sizei idealPixelSize = CalculateIdealPixelSize ( eyeType, distortion, fov, desiredPixelDensity ); + // ...but we might not actually get that size. + return CalculateViewportInternal ( eyeType, + actualRendertargetSurfaceSize, + idealPixelSize, + bRendertargetSharedByBothEyes, bMonoRenderingMode ); +} + +static ViewportScaleAndOffset CalculateViewportScaleAndOffsetInternal ( + ScaleAndOffset2D const &eyeToSourceNDC, + Recti const &renderedViewport, + Sizei const &actualRendertargetSurfaceSize ) +{ + ViewportScaleAndOffset result; + result.RenderedViewport = renderedViewport; + result.EyeToSourceUV = CreateUVScaleAndOffsetfromNDCScaleandOffset( + eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize ); + return result; +} + + +static StereoEyeParams CalculateStereoEyeParamsInternal ( StereoEye eyeType, HmdRenderInfo const &hmd, + DistortionRenderDesc const &distortion, + FovPort const &fov, + Sizei const &actualRendertargetSurfaceSize, + Recti const &renderedViewport, + bool bRightHanded = true, float zNear = 0.01f, float zFar = 10000.0f, + bool bMonoRenderingMode = false, + float zoomFactor = 1.0f ) +{ + // Generate the projection matrix for intermediate rendertarget. + // Z range can also be inserted later by the app (though not in this particular case) + float fovScale = 1.0f / zoomFactor; + FovPort zoomedFov = fov; + zoomedFov.LeftTan *= fovScale; + zoomedFov.RightTan *= fovScale; + zoomedFov.UpTan *= fovScale; + zoomedFov.DownTan *= fovScale; + Matrix4f projection = CreateProjection ( bRightHanded, zoomedFov, zNear, zFar ); + + // Find the mapping from TanAngle space to target NDC space. + // Note this does NOT take the zoom factor into account because + // this is the mapping of actual physical eye FOV (and our eyes do not zoom!) + // to screen space. + ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov ( fov ); + + // The size of the final FB, which is fixed and determined by the physical size of the device display. + Recti distortedViewport = GetFramebufferViewport ( eyeType, hmd ); + Vector3f virtualCameraOffset = CalculateEyeVirtualCameraOffset(hmd, eyeType, bMonoRenderingMode); + + StereoEyeParams result; + result.Eye = eyeType; + result.ViewAdjust = Matrix4f::Translation(virtualCameraOffset); + result.Distortion = distortion; + result.DistortionViewport = distortedViewport; + result.Fov = fov; + result.RenderedProjection = projection; + result.EyeToSourceNDC = eyeToSourceNDC; + ViewportScaleAndOffset vsao = CalculateViewportScaleAndOffsetInternal ( eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize ); + result.RenderedViewport = vsao.RenderedViewport; + result.EyeToSourceUV = vsao.EyeToSourceUV; + + return result; +} + + +Vector3f CalculateEyeVirtualCameraOffset(HmdRenderInfo const &hmd, + StereoEye eyeType, bool bmonoRenderingMode) +{ + Vector3f virtualCameraOffset(0); + + if (!bmonoRenderingMode) + { + float eyeCenterRelief = hmd.GetEyeCenter().ReliefInMeters; + + if (eyeType == StereoEye_Left) { - s = sDown; - d = dDown; + virtualCameraOffset.x = hmd.EyeLeft.NoseToPupilInMeters; + virtualCameraOffset.z = eyeCenterRelief - hmd.EyeLeft.ReliefInMeters; } - else + else if (eyeType == StereoEye_Right) { - delta *= 0.5f; + virtualCameraOffset.x = -hmd.EyeRight.NoseToPupilInMeters; + virtualCameraOffset.z = eyeCenterRelief - hmd.EyeRight.ReliefInMeters; } } - return s; + return virtualCameraOffset; } //----------------------------------------------------------------------------------- -// **** StereoConfig Implementation +// **** Higher-level utility functions. -StereoConfig::StereoConfig(StereoMode mode, const Viewport& vp) - : Mode(mode), - InterpupillaryDistance(0.064f), AspectMultiplier(1.0f), - FullView(vp), DirtyFlag(true), IPDOverride(false), - YFov(0), Aspect(vp.w / float(vp.h)), ProjectionCenterOffset(0), - OrthoPixelOffset(0) +Sizei CalculateRecommendedTextureSize ( HmdRenderInfo const &hmd, + bool bRendertargetSharedByBothEyes, + float pixelDensityInCenter /*= 1.0f*/ ) { - // And default distortion for it. - Distortion.SetCoefficients(1.0f, 0.22f, 0.24f); - Distortion.Scale = 1.0f; // Will be computed later. + Sizei idealPixelSize[2]; + for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) + { + StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right; - // Fit left of the image. - DistortionFitX = -1.0f; - DistortionFitY = 0.0f; + DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); - // Initialize "fake" default HMD values for testing without HMD plugged in. - // These default values match those returned by the HMD. - HMD.HResolution = 1280; - HMD.VResolution = 800; - HMD.HScreenSize = 0.14976f; - HMD.VScreenSize = HMD.HScreenSize / (1280.0f / 800.0f); - HMD.InterpupillaryDistance = InterpupillaryDistance; - HMD.LensSeparationDistance = 0.0635f; - HMD.EyeToScreenDistance = 0.041f; - HMD.DistortionK[0] = Distortion.K[0]; - HMD.DistortionK[1] = Distortion.K[1]; - HMD.DistortionK[2] = Distortion.K[2]; - HMD.DistortionK[3] = 0; + idealPixelSize[eyeNum] = CalculateIdealPixelSize ( eyeType, + distortionAndFov.Distortion, + distortionAndFov.Fov, + pixelDensityInCenter ); + } - Set2DAreaFov(DegreeToRad(85.0f)); + Sizei result; + result.w = Alg::Max ( idealPixelSize[0].w, idealPixelSize[1].w ); + result.h = Alg::Max ( idealPixelSize[0].h, idealPixelSize[1].h ); + if ( bRendertargetSharedByBothEyes ) + { + result.w *= 2; + } + return result; } -void StereoConfig::SetFullViewport(const Viewport& vp) +StereoEyeParams CalculateStereoEyeParams ( HmdRenderInfo const &hmd, + StereoEye eyeType, + Sizei const &actualRendertargetSurfaceSize, + bool bRendertargetSharedByBothEyes, + bool bRightHanded /*= true*/, + float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/, + Sizei const *pOverrideRenderedPixelSize /* = NULL*/, + FovPort const *pOverrideFovport /*= NULL*/, + float zoomFactor /*= 1.0f*/ ) { - if (vp != FullView) - { - FullView = vp; - DirtyFlag = true; + DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); + if ( pOverrideFovport != NULL ) + { + distortionAndFov.Fov = *pOverrideFovport; } + + Recti viewport; + if ( pOverrideRenderedPixelSize != NULL ) + { + viewport = CalculateViewportInternal ( eyeType, actualRendertargetSurfaceSize, *pOverrideRenderedPixelSize, bRendertargetSharedByBothEyes, false ); + } + else + { + viewport = CalculateViewportDensityInternal ( eyeType, + distortionAndFov.Distortion, + distortionAndFov.Fov, + actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, 1.0f, false ); + } + + return CalculateStereoEyeParamsInternal ( + eyeType, hmd, + distortionAndFov.Distortion, + distortionAndFov.Fov, + actualRendertargetSurfaceSize, viewport, + bRightHanded, zNear, zFar, false, zoomFactor ); } -void StereoConfig::SetHMDInfo(const HMDInfo& hmd) + +FovPort CalculateRecommendedFov ( HmdRenderInfo const &hmd, + StereoEye eyeType, + bool bMakeFovSymmetrical /* = false */ ) { - HMD = hmd; - Distortion.K[0] = hmd.DistortionK[0]; - Distortion.K[1] = hmd.DistortionK[1]; - Distortion.K[2] = hmd.DistortionK[2]; - Distortion.K[3] = hmd.DistortionK[3]; + DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); + FovPort fov = distortionAndFov.Fov; + if ( bMakeFovSymmetrical ) + { + // Deal with engines that cannot support an off-center projection. + // Unfortunately this means they will be rendering pixels that the user can't actually see. + float fovTanH = Alg::Max ( fov.LeftTan, fov.RightTan ); + float fovTanV = Alg::Max ( fov.UpTan, fov.DownTan ); + fov.LeftTan = fovTanH; + fov.RightTan = fovTanH; + fov.UpTan = fovTanV; + fov.DownTan = fovTanV; + } + return fov; +} - Distortion.SetChromaticAberration(hmd.ChromaAbCorrection[0], hmd.ChromaAbCorrection[1], - hmd.ChromaAbCorrection[2], hmd.ChromaAbCorrection[3]); +ViewportScaleAndOffset ModifyRenderViewport ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + Recti const &renderViewport ) +{ + return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); +} - if (!IPDOverride) - InterpupillaryDistance = HMD.InterpupillaryDistance; +ViewportScaleAndOffset ModifyRenderSize ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + Sizei const &requestedRenderSize, + bool bRendertargetSharedByBothEyes /*= false*/ ) +{ + Recti renderViewport = CalculateViewportInternal ( params.Eye, actualRendertargetSurfaceSize, requestedRenderSize, bRendertargetSharedByBothEyes, false ); + return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); +} - DirtyFlag = true; +ViewportScaleAndOffset ModifyRenderDensity ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + float pixelDensity /*= 1.0f*/, + bool bRendertargetSharedByBothEyes /*= false*/ ) +{ + Recti renderViewport = CalculateViewportDensityInternal ( params.Eye, params.Distortion, params.Fov, actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, pixelDensity, false ); + return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); } -void StereoConfig::SetDistortionFitPointVP(float x, float y) + +//----------------------------------------------------------------------------------- +// **** StereoConfig Implementation + +StereoConfig::StereoConfig(StereoMode mode) + : Mode(mode), + DirtyFlag(true) { - DistortionFitX = x; - DistortionFitY = y; - DirtyFlag = true; + // Initialize "fake" default HMD values for testing without HMD plugged in. + // These default values match those returned by DK1 + // (at least they did at time of writing - certainly good enough for debugging) + Hmd.HmdType = HmdType_None; + Hmd.ResolutionInPixels = Sizei(1280, 800); + Hmd.ScreenSizeInMeters = Sizef(0.1498f, 0.0936f); + Hmd.ScreenGapSizeInMeters = 0.0f; + Hmd.CenterFromTopInMeters = 0.0468f; + Hmd.LensSeparationInMeters = 0.0635f; + Hmd.LensDiameterInMeters = 0.035f; + Hmd.LensSurfaceToMidplateInMeters = 0.025f; + Hmd.EyeCups = EyeCup_DK1A; + Hmd.Shutter.Type = HmdShutter_RollingTopToBottom; + Hmd.Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); + Hmd.Shutter.VsyncToFirstScanline = 0.000052f; + Hmd.Shutter.FirstScanlineToLastScanline = 0.016580f; + Hmd.Shutter.PixelSettleTime = 0.015f; + Hmd.Shutter.PixelPersistence = ( 1.0f / 60.0f ); + Hmd.EyeLeft.Distortion.SetToIdentity(); + Hmd.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.043875f; + Hmd.EyeLeft.Distortion.Eqn = Distortion_RecipPoly4; + Hmd.EyeLeft.Distortion.K[0] = 1.0f; + Hmd.EyeLeft.Distortion.K[1] = -0.3999f; + Hmd.EyeLeft.Distortion.K[2] = 0.2408f; + Hmd.EyeLeft.Distortion.K[3] = -0.4589f; + Hmd.EyeLeft.Distortion.MaxR = 1.0f; + Hmd.EyeLeft.Distortion.ChromaticAberration[0] = 0.006f; + Hmd.EyeLeft.Distortion.ChromaticAberration[1] = 0.0f; + Hmd.EyeLeft.Distortion.ChromaticAberration[2] = -0.014f; + Hmd.EyeLeft.Distortion.ChromaticAberration[3] = 0.0f; + Hmd.EyeLeft.NoseToPupilInMeters = 0.62f; + Hmd.EyeLeft.ReliefInMeters = 0.013f; + Hmd.EyeRight = Hmd.EyeLeft; + + SetViewportMode = SVPM_Density; + SetViewportPixelsPerDisplayPixel = 1.0f; + // Not used in this mode, but init them anyway. + SetViewportSize[0] = Sizei(0,0); + SetViewportSize[1] = Sizei(0,0); + SetViewport[0] = Recti(0,0,0,0); + SetViewport[1] = Recti(0,0,0,0); + + OverrideLens = false; + OverrideTanHalfFov = false; + OverrideZeroIpd = false; + ExtraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION; + IsRendertargetSharedByBothEyes = true; + RightHandedProjection = true; + + // This should cause an assert if the app does not call SetRendertargetSize() + RendertargetSize = Sizei ( 0, 0 ); + + ZNear = 0.01f; + ZFar = 10000.0f; + + Set2DAreaFov(DegreeToRad(85.0f)); } -void StereoConfig::SetDistortionFitPointPixels(float x, float y) +void StereoConfig::SetHmdRenderInfo(const HmdRenderInfo& hmd) { - DistortionFitX = (4 * x / float(FullView.w)) - 1.0f; - DistortionFitY = (2 * y / float(FullView.h)) - 1.0f; + Hmd = hmd; DirtyFlag = true; } @@ -150,174 +435,830 @@ void StereoConfig::Set2DAreaFov(float fovRadians) DirtyFlag = true; } - -const StereoEyeParams& StereoConfig::GetEyeRenderParams(StereoEye eye) +const StereoEyeParamsWithOrtho& StereoConfig::GetEyeRenderParams(StereoEye eye) { + if ( DirtyFlag ) + { + UpdateComputedState(); + } + static const UByte eyeParamIndices[3] = { 0, 0, 1 }; - updateIfDirty(); OVR_ASSERT(eye < sizeof(eyeParamIndices)); return EyeRenderParams[eyeParamIndices[eye]]; } - -void StereoConfig::updateComputedState() -{ - // Need to compute all of the following: - // - Aspect Ratio - // - FOV - // - Projection offsets for 3D - // - Distortion XCenterOffset - // - Update 2D - // - Initialize EyeRenderParams - - // Compute aspect ratio. Stereo mode cuts width in half. - Aspect = float(FullView.w) / float(FullView.h); - Aspect *= (Mode == Stereo_None) ? 1.0f : 0.5f; - Aspect *= AspectMultiplier; - - updateDistortionOffsetAndScale(); - - // Compute Vertical FOV based on distance, distortion, etc. - // Distance from vertical center to render vertical edge perceived through the lens. - // This will be larger then normal screen size due to magnification & distortion. - // - // This percievedHalfRTDistance equation should hold as long as the render target - // and display have the same aspect ratios. What we'd like to know is where the edge - // of the render target will on the perceived screen surface. With NO LENS, - // the answer would be: - // - // halfRTDistance = (VScreenSize / 2) * aspect * - // DistortionFn_Inverse( DistortionScale / aspect ) - // - // To model the optical lens we eliminates DistortionFn_Inverse. Aspect ratios - // cancel out, so we get: - // - // halfRTDistance = (VScreenSize / 2) * DistortionScale - // - if (Mode == Stereo_None) - { - YFov = DegreeToRad(80.0f); +void StereoConfig::SetLensOverride ( LensConfig const *pLensOverrideLeft /*= NULL*/, + LensConfig const *pLensOverrideRight /*= NULL*/ ) +{ + if ( pLensOverrideLeft == NULL ) + { + OverrideLens = false; } else { - float percievedHalfRTDistance = (HMD.VScreenSize / 2) * Distortion.Scale; - YFov = 2.0f * atan(percievedHalfRTDistance/HMD.EyeToScreenDistance); + OverrideLens = true; + LensOverrideLeft = *pLensOverrideLeft; + LensOverrideRight = *pLensOverrideLeft; + if ( pLensOverrideRight != NULL ) + { + LensOverrideRight = *pLensOverrideRight; + } } - - updateProjectionOffset(); - update2D(); - updateEyeParams(); - - DirtyFlag = false; + DirtyFlag = true; } -void StereoConfig::updateDistortionOffsetAndScale() +void StereoConfig::SetRendertargetSize (Size<int> const rendertargetSize, + bool rendertargetIsSharedByBothEyes ) { - // Distortion center shift is stored separately, since it isn't affected - // by the eye distance. - float lensOffset = HMD.LensSeparationDistance * 0.5f; - float lensShift = HMD.HScreenSize * 0.25f - lensOffset; - float lensViewportShift = 4.0f * lensShift / HMD.HScreenSize; - Distortion.XCenterOffset= lensViewportShift; + RendertargetSize = rendertargetSize; + IsRendertargetSharedByBothEyes = rendertargetIsSharedByBothEyes; + DirtyFlag = true; +} - // Compute distortion scale from DistortionFitX & DistortionFitY. - // Fit value of 0.0 means "no fit". - if ((fabs(DistortionFitX) < 0.0001f) && (fabs(DistortionFitY) < 0.0001f)) +void StereoConfig::SetFov ( FovPort const *pfovLeft /*= NULL*/, + FovPort const *pfovRight /*= NULL*/ ) +{ + DirtyFlag = true; + if ( pfovLeft == NULL ) { - Distortion.Scale = 1.0f; + OverrideTanHalfFov = false; } else { - // Convert fit value to distortion-centered coordinates before fit radius - // calculation. - float stereoAspect = 0.5f * float(FullView.w) / float(FullView.h); - float dx = DistortionFitX - Distortion.XCenterOffset; - float dy = DistortionFitY / stereoAspect; - float fitRadius = sqrt(dx * dx + dy * dy); - Distortion.Scale = Distortion.DistortionFn(fitRadius)/fitRadius; - } -} - -void StereoConfig::updateProjectionOffset() -{ - // Post-projection viewport coordinates range from (-1.0, 1.0), with the - // center of the left viewport falling at (1/4) of horizontal screen size. - // We need to shift this projection center to match with the lens center; - // note that we don't use the IPD here due to collimated light property of the lens. - // We compute this shift in physical units (meters) to - // correct for different screen sizes and then rescale to viewport coordinates. - float viewCenter = HMD.HScreenSize * 0.25f; - float eyeProjectionShift = viewCenter - HMD.LensSeparationDistance*0.5f; - ProjectionCenterOffset = 4.0f * eyeProjectionShift / HMD.HScreenSize; -} - -void StereoConfig::update2D() -{ - // Orthographic projection fakes a screen at a distance of 0.8m from the - // eye, where hmd screen projection surface is at 0.05m distance. - // This introduces an extra off-center pixel projection shift based on eye distance. - // This offCenterShift is the pixel offset of the other camera's center - // in your reference camera based on surface distance. - float metersToPixels = (HMD.HResolution / HMD.HScreenSize); - float lensDistanceScreenPixels= metersToPixels * HMD.LensSeparationDistance; - float eyeDistanceScreenPixels = metersToPixels * InterpupillaryDistance; - float offCenterShiftPixels = (HMD.EyeToScreenDistance / 0.8f) * eyeDistanceScreenPixels; - float leftPixelCenter = (HMD.HResolution / 2) - lensDistanceScreenPixels * 0.5f; - float rightPixelCenter = lensDistanceScreenPixels * 0.5f; - float pixelDifference = leftPixelCenter - rightPixelCenter; - - // This computes the number of pixels that fit within specified 2D FOV (assuming - // distortion scaling will be done). - float percievedHalfScreenDistance = tan(Area2DFov * 0.5f) * HMD.EyeToScreenDistance; - float vfovSize = 2.0f * percievedHalfScreenDistance / Distortion.Scale; - FovPixels = HMD.VResolution * vfovSize / HMD.VScreenSize; - - // Create orthographic matrix. - Matrix4f& m = OrthoCenter; - m.SetIdentity(); - m.M[0][0] = FovPixels / (FullView.w * 0.5f); - m.M[1][1] = -FovPixels / FullView.h; - m.M[0][3] = 0; - m.M[1][3] = 0; - m.M[2][2] = 0; + OverrideTanHalfFov = true; + FovOverrideLeft = *pfovLeft; + FovOverrideRight = *pfovLeft; + if ( pfovRight != NULL ) + { + FovOverrideRight = *pfovRight; + } + } +} + + +void StereoConfig::SetZeroVirtualIpdOverride ( bool enableOverride ) +{ + DirtyFlag = true; + OverrideZeroIpd = enableOverride; +} + + +void StereoConfig::SetZClipPlanesAndHandedness ( float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/, bool rightHandedProjection /*= true*/ ) +{ + DirtyFlag = true; + ZNear = zNear; + ZFar = zFar; + RightHandedProjection = rightHandedProjection; +} + +void StereoConfig::SetExtraEyeRotation ( float extraEyeRotationInRadians ) +{ + DirtyFlag = true; + ExtraEyeRotationInRadians = extraEyeRotationInRadians; +} - float orthoPixelOffset = (pixelDifference + offCenterShiftPixels/Distortion.Scale) * 0.5f; - OrthoPixelOffset = orthoPixelOffset * 2.0f / FovPixels; +Sizei StereoConfig::CalculateRecommendedTextureSize ( bool rendertargetSharedByBothEyes, + float pixelDensityInCenter /*= 1.0f*/ ) +{ + return Render::CalculateRecommendedTextureSize ( Hmd, rendertargetSharedByBothEyes, pixelDensityInCenter ); } -void StereoConfig::updateEyeParams() + + +void StereoConfig::UpdateComputedState() { - // Projection matrix for the center eye, which the left/right matrices are based on. - Matrix4f projCenter = Matrix4f::PerspectiveRH(YFov, Aspect, 0.01f, 2000.0f); - - switch(Mode) + int numEyes = 2; + StereoEye eyeTypes[2]; + + switch ( Mode ) { case Stereo_None: - { - EyeRenderParams[0].Init(StereoEye_Center, FullView, 0, projCenter, OrthoCenter); - } + numEyes = 1; + eyeTypes[0] = StereoEye_Center; break; case Stereo_LeftRight_Multipass: + numEyes = 2; + eyeTypes[0] = StereoEye_Left; + eyeTypes[1] = StereoEye_Right; + break; + + default: + OVR_ASSERT( false ); break; + } + + // If either of these fire, you've probably forgotten to call SetRendertargetSize() + OVR_ASSERT ( RendertargetSize.w > 0 ); + OVR_ASSERT ( RendertargetSize.h > 0 ); + + for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ ) + { + StereoEye eyeType = eyeTypes[eyeNum]; + LensConfig *pLensOverride = NULL; + if ( OverrideLens ) { - Matrix4f projLeft = Matrix4f::Translation(ProjectionCenterOffset, 0, 0) * projCenter, - projRight = Matrix4f::Translation(-ProjectionCenterOffset, 0, 0) * projCenter; - - EyeRenderParams[0].Init(StereoEye_Left, - Viewport(FullView.x, FullView.y, FullView.w/2, FullView.h), - +InterpupillaryDistance * 0.5f, // World view shift. - projLeft, OrthoCenter * Matrix4f::Translation(OrthoPixelOffset, 0, 0), - &Distortion); - EyeRenderParams[1].Init(StereoEye_Right, - Viewport(FullView.x + FullView.w/2, FullView.y, FullView.w/2, FullView.h), - -InterpupillaryDistance * 0.5f, - projRight, OrthoCenter * Matrix4f::Translation(-OrthoPixelOffset, 0, 0), - &Distortion); + if ( eyeType == StereoEye_Right ) + { + pLensOverride = &LensOverrideRight; + } + else + { + pLensOverride = &LensOverrideLeft; + } } - break; + + FovPort *pTanHalfFovOverride = NULL; + if ( OverrideTanHalfFov ) + { + if ( eyeType == StereoEye_Right ) + { + pTanHalfFovOverride = &FovOverrideRight; + } + else + { + pTanHalfFovOverride = &FovOverrideLeft; + } + } + + DistortionAndFov distortionAndFov = + CalculateDistortionAndFovInternal ( eyeType, Hmd, + pLensOverride, pTanHalfFovOverride, + ExtraEyeRotationInRadians ); + + EyeRenderParams[eyeNum].StereoEye.Distortion = distortionAndFov.Distortion; + EyeRenderParams[eyeNum].StereoEye.Fov = distortionAndFov.Fov; } + if ( OverrideZeroIpd ) + { + // Take the union of the calculated eye FOVs. + FovPort fov; + fov.UpTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.UpTan , EyeRenderParams[1].StereoEye.Fov.UpTan ); + fov.DownTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.DownTan , EyeRenderParams[1].StereoEye.Fov.DownTan ); + fov.LeftTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.LeftTan , EyeRenderParams[1].StereoEye.Fov.LeftTan ); + fov.RightTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.RightTan, EyeRenderParams[1].StereoEye.Fov.RightTan ); + EyeRenderParams[0].StereoEye.Fov = fov; + EyeRenderParams[1].StereoEye.Fov = fov; + } + + for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ ) + { + StereoEye eyeType = eyeTypes[eyeNum]; + + DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion; + FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov; + + // Use a placeholder - will be overridden later. + Recti tempViewport = Recti ( 0, 0, 1, 1 ); + + EyeRenderParams[eyeNum].StereoEye = CalculateStereoEyeParamsInternal ( + eyeType, Hmd, localDistortion, fov, + RendertargetSize, tempViewport, + RightHandedProjection, ZNear, ZFar, + OverrideZeroIpd ); + + // We want to create a virtual 2D surface we can draw debug text messages to. + // We'd like it to be a fixed distance (OrthoDistance) away, + // and to cover a specific FOV (Area2DFov). We need to find the projection matrix for this, + // and also to know how large it is in pixels to achieve a 1:1 mapping at the center of the screen. + float orthoDistance = 0.8f; + float orthoHalfFov = tanf ( Area2DFov * 0.5f ); + Vector2f unityOrthoPixelSize = localDistortion.PixelsPerTanAngleAtCenter * ( orthoHalfFov * 2.0f ); + float localInterpupillaryDistance = Hmd.EyeLeft.NoseToPupilInMeters + Hmd.EyeRight.NoseToPupilInMeters; + if ( OverrideZeroIpd ) + { + localInterpupillaryDistance = 0.0f; + } + Matrix4f ortho = CreateOrthoSubProjection ( true, eyeType, + orthoHalfFov, orthoHalfFov, + unityOrthoPixelSize.x, unityOrthoPixelSize.y, + orthoDistance, localInterpupillaryDistance, + EyeRenderParams[eyeNum].StereoEye.RenderedProjection ); + EyeRenderParams[eyeNum].OrthoProjection = ortho; + } + + // ...and now set up the viewport, scale & offset the way the app wanted. + setupViewportScaleAndOffsets(); + + if ( OverrideZeroIpd ) + { + // Monocular rendering has some fragile parts... don't break any by accident. + OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.UpTan == EyeRenderParams[1].StereoEye.Fov.UpTan ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.DownTan == EyeRenderParams[1].StereoEye.Fov.DownTan ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.LeftTan == EyeRenderParams[1].StereoEye.Fov.LeftTan ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.RightTan == EyeRenderParams[1].StereoEye.Fov.RightTan ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][0] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][0] ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][1] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][1] ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][2] ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][2] ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedViewport == EyeRenderParams[1].StereoEye.RenderedViewport ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Offset == EyeRenderParams[1].StereoEye.EyeToSourceUV.Offset ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Scale == EyeRenderParams[1].StereoEye.EyeToSourceUV.Scale ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Offset == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Offset ); + OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Scale == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Scale ); + OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][0] == EyeRenderParams[1].OrthoProjection.M[0][0] ); + OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][1] == EyeRenderParams[1].OrthoProjection.M[1][1] ); + OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][2] == EyeRenderParams[1].OrthoProjection.M[0][2] ); + OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][2] == EyeRenderParams[1].OrthoProjection.M[1][2] ); + } + + DirtyFlag = false; +} + + + +ViewportScaleAndOffsetBothEyes StereoConfig::setupViewportScaleAndOffsets() +{ + for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) + { + StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right; + + DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion; + FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov; + + Recti renderedViewport; + switch ( SetViewportMode ) + { + case SVPM_Density: + renderedViewport = CalculateViewportDensityInternal ( + eyeType, localDistortion, fov, + RendertargetSize, IsRendertargetSharedByBothEyes, + SetViewportPixelsPerDisplayPixel, OverrideZeroIpd ); + break; + case SVPM_Size: + if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd ) + { + renderedViewport = CalculateViewportInternal ( + eyeType, RendertargetSize, + SetViewportSize[1], + IsRendertargetSharedByBothEyes, OverrideZeroIpd ); + } + else + { + renderedViewport = CalculateViewportInternal ( + eyeType, RendertargetSize, + SetViewportSize[0], + IsRendertargetSharedByBothEyes, OverrideZeroIpd ); + } + break; + case SVPM_Viewport: + if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd ) + { + renderedViewport = SetViewport[1]; + } + else + { + renderedViewport = SetViewport[0]; + } + break; + default: OVR_ASSERT ( false ); break; + } + + ViewportScaleAndOffset vpsao = CalculateViewportScaleAndOffsetInternal ( + EyeRenderParams[eyeNum].StereoEye.EyeToSourceNDC, + renderedViewport, + RendertargetSize ); + EyeRenderParams[eyeNum].StereoEye.RenderedViewport = vpsao.RenderedViewport; + EyeRenderParams[eyeNum].StereoEye.EyeToSourceUV = vpsao.EyeToSourceUV; + } + + ViewportScaleAndOffsetBothEyes result; + result.Left.EyeToSourceUV = EyeRenderParams[0].StereoEye.EyeToSourceUV; + result.Left.RenderedViewport = EyeRenderParams[0].StereoEye.RenderedViewport; + result.Right.EyeToSourceUV = EyeRenderParams[1].StereoEye.EyeToSourceUV; + result.Right.RenderedViewport = EyeRenderParams[1].StereoEye.RenderedViewport; + return result; +} + +// Specify a pixel density - how many rendered pixels per pixel in the physical display. +ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderDensity ( float pixelsPerDisplayPixel ) +{ + SetViewportMode = SVPM_Density; + SetViewportPixelsPerDisplayPixel = pixelsPerDisplayPixel; + return setupViewportScaleAndOffsets(); +} + +// Supply the size directly. Will be clamped to the physical rendertarget size. +ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderSize ( Sizei const &renderSizeLeft, Sizei const &renderSizeRight ) +{ + SetViewportMode = SVPM_Size; + SetViewportSize[0] = renderSizeLeft; + SetViewportSize[1] = renderSizeRight; + return setupViewportScaleAndOffsets(); +} + +// Supply the viewport directly. This is not clamped to the physical rendertarget - careful now! +ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderViewport ( Recti const &renderViewportLeft, Recti const &renderViewportRight ) +{ + SetViewportMode = SVPM_Viewport; + SetViewport[0] = renderViewportLeft; + SetViewport[1] = renderViewportRight; + return setupViewportScaleAndOffsets(); +} + +Matrix4f StereoConfig::GetProjectionWithZoom ( StereoEye eye, float fovZoom ) const +{ + int eyeNum = ( eye == StereoEye_Right ) ? 1 : 0; + float fovScale = 1.0f / fovZoom; + FovPort fovPort = EyeRenderParams[eyeNum].StereoEye.Fov; + fovPort.LeftTan *= fovScale; + fovPort.RightTan *= fovScale; + fovPort.UpTan *= fovScale; + fovPort.DownTan *= fovScale; + return CreateProjection ( RightHandedProjection, fovPort, ZNear, ZFar ); +} + + + + +//----------------------------------------------------------------------------------- +// ***** Distortion Mesh Rendering + + +// Pow2 for the Morton order to work! +// 4 is too low - it is easy to see the "wobbles" in the HMD. +// 5 is realllly close but you can see pixel differences with even/odd frame checking. +// 6 is indistinguishable on a monitor on even/odd frames. +static const int DMA_GridSizeLog2 = 6; +static const int DMA_GridSize = 1<<DMA_GridSizeLog2; +static const int DMA_NumVertsPerEye = (DMA_GridSize+1)*(DMA_GridSize+1); +static const int DMA_NumTrisPerEye = (DMA_GridSize)*(DMA_GridSize)*2; + + + +void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices ) +{ + OVR_FREE ( pVertices ); + OVR_FREE ( pTriangleMeshIndices ); +} + +void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices, + int *pNumVertices, int *pNumTriangles, + const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo ) +{ + bool rightEye = ( stereoParams.Eye == StereoEye_Right ); + int vertexCount = 0; + int triangleCount = 0; + + // Generate mesh into allocated data and return result. + DistortionMeshCreate(ppVertices, ppTriangleListIndices, &vertexCount, &triangleCount, + rightEye, hmdRenderInfo, stereoParams.Distortion, stereoParams.EyeToSourceNDC); + + *pNumVertices = vertexCount; + *pNumTriangles = triangleCount; +} + + +// Generate distortion mesh for a eye. +void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices, + int *pNumVertices, int *pNumTriangles, + bool rightEye, + const HmdRenderInfo &hmdRenderInfo, + const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC ) +{ + *pNumVertices = DMA_NumVertsPerEye; + *pNumTriangles = DMA_NumTrisPerEye; + + *ppVertices = (DistortionMeshVertexData*) + OVR_ALLOC( sizeof(DistortionMeshVertexData) * (*pNumVertices) ); + *ppTriangleListIndices = (UInt16*) OVR_ALLOC( sizeof(UInt16) * (*pNumTriangles) * 3 ); + + if (!*ppVertices || !*ppTriangleListIndices) + { + if (*ppVertices) + { + OVR_FREE(*ppVertices); + } + if (*ppTriangleListIndices) + { + OVR_FREE(*ppTriangleListIndices); + } + *ppVertices = NULL; + *ppTriangleListIndices = NULL; + *pNumTriangles = NULL; + *pNumVertices = NULL; + return; + } + + // When does the fade-to-black edge start? Chosen heuristically. + const float fadeOutBorderFraction = 0.075f; + + + // Populate vertex buffer info + float xOffset = 0.0f; + float uOffset = 0.0f; + + if (rightEye) + { + xOffset = 1.0f; + uOffset = 0.5f; + } + + // First pass - build up raw vertex data. + DistortionMeshVertexData* pcurVert = *ppVertices; + + for ( int y = 0; y <= DMA_GridSize; y++ ) + { + for ( int x = 0; x <= DMA_GridSize; x++ ) + { + + Vector2f sourceCoordNDC; + // NDC texture coords [-1,+1] + sourceCoordNDC.x = 2.0f * ( (float)x / (float)DMA_GridSize ) - 1.0f; + sourceCoordNDC.y = 2.0f * ( (float)y / (float)DMA_GridSize ) - 1.0f; + Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC ); + + // This is the function that does the really heavy lifting. + Vector2f screenNDC = TransformTanFovSpaceToScreenNDC ( distortion, tanEyeAngle, false ); + + // We then need RGB UVs. Since chromatic aberration is generated from screen coords, not + // directly from texture NDCs, we can't just use tanEyeAngle, we need to go the long way round. + Vector2f tanEyeAnglesR, tanEyeAnglesG, tanEyeAnglesB; + TransformScreenNDCToTanFovSpaceChroma ( &tanEyeAnglesR, &tanEyeAnglesG, &tanEyeAnglesB, + distortion, screenNDC ); + + pcurVert->TanEyeAnglesR = tanEyeAnglesR; + pcurVert->TanEyeAnglesG = tanEyeAnglesG; + pcurVert->TanEyeAnglesB = tanEyeAnglesB; + + + HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type; + switch ( shutterType ) + { + case HmdShutter_Global: + pcurVert->TimewarpLerp = 0.0f; + break; + case HmdShutter_RollingLeftToRight: + // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0 + pcurVert->TimewarpLerp = screenNDC.x * 0.25f + 0.25f; + if (rightEye) + { + pcurVert->TimewarpLerp += 0.5f; + } + break; + case HmdShutter_RollingRightToLeft: + // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0 + pcurVert->TimewarpLerp = 0.75f - screenNDC.x * 0.25f; + if (rightEye) + { + pcurVert->TimewarpLerp -= 0.5f; + } + break; + case HmdShutter_RollingTopToBottom: + // Retrace is top to bottom on both eyes at the same time. + pcurVert->TimewarpLerp = screenNDC.y * 0.5f + 0.5f; + break; + default: OVR_ASSERT ( false ); break; + } + + // Fade out at texture edges. + float edgeFadeIn = ( 1.0f / fadeOutBorderFraction ) * + ( 1.0f - Alg::Max ( Alg::Abs ( sourceCoordNDC.x ), Alg::Abs ( sourceCoordNDC.y ) ) ); + // Also fade out at screen edges. + float edgeFadeInScreen = ( 2.0f / fadeOutBorderFraction ) * + ( 1.0f - Alg::Max ( Alg::Abs ( screenNDC.x ), Alg::Abs ( screenNDC.y ) ) ); + edgeFadeIn = Alg::Min ( edgeFadeInScreen, edgeFadeIn ); + + // Don't let verts overlap to the other eye. + screenNDC.x = Alg::Max ( -1.0f, Alg::Min ( screenNDC.x, 1.0f ) ); + screenNDC.y = Alg::Max ( -1.0f, Alg::Min ( screenNDC.y, 1.0f ) ); + + pcurVert->Shade = Alg::Max ( 0.0f, Alg::Min ( edgeFadeIn, 1.0f ) ); + pcurVert->ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset; + pcurVert->ScreenPosNDC.y = -screenNDC.y; + + pcurVert++; + } + } + + + // Populate index buffer info + UInt16 *pcurIndex = *ppTriangleListIndices; + + for ( int triNum = 0; triNum < DMA_GridSize * DMA_GridSize; triNum++ ) + { + // Use a Morton order to help locality of FB, texture and vertex cache. + // (0.325ms raster order -> 0.257ms Morton order) + OVR_ASSERT ( DMA_GridSize <= 256 ); + int x = ( ( triNum & 0x0001 ) >> 0 ) | + ( ( triNum & 0x0004 ) >> 1 ) | + ( ( triNum & 0x0010 ) >> 2 ) | + ( ( triNum & 0x0040 ) >> 3 ) | + ( ( triNum & 0x0100 ) >> 4 ) | + ( ( triNum & 0x0400 ) >> 5 ) | + ( ( triNum & 0x1000 ) >> 6 ) | + ( ( triNum & 0x4000 ) >> 7 ); + int y = ( ( triNum & 0x0002 ) >> 1 ) | + ( ( triNum & 0x0008 ) >> 2 ) | + ( ( triNum & 0x0020 ) >> 3 ) | + ( ( triNum & 0x0080 ) >> 4 ) | + ( ( triNum & 0x0200 ) >> 5 ) | + ( ( triNum & 0x0800 ) >> 6 ) | + ( ( triNum & 0x2000 ) >> 7 ) | + ( ( triNum & 0x8000 ) >> 8 ); + int FirstVertex = x * (DMA_GridSize+1) + y; + // Another twist - we want the top-left and bottom-right quadrants to + // have the triangles split one way, the other two split the other. + // +---+---+---+---+ + // | /| /|\ |\ | + // | / | / | \ | \ | + // |/ |/ | \| \| + // +---+---+---+---+ + // | /| /|\ |\ | + // | / | / | \ | \ | + // |/ |/ | \| \| + // +---+---+---+---+ + // |\ |\ | /| /| + // | \ | \ | / | / | + // | \| \|/ |/ | + // +---+---+---+---+ + // |\ |\ | /| /| + // | \ | \ | / | / | + // | \| \|/ |/ | + // +---+---+---+---+ + // This way triangle edges don't span long distances over the distortion function, + // so linear interpolation works better & we can use fewer tris. + if ( ( x < DMA_GridSize/2 ) != ( y < DMA_GridSize/2 ) ) // != is logical XOR + { + *pcurIndex++ = (UInt16)FirstVertex; + *pcurIndex++ = (UInt16)FirstVertex+1; + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1; + + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1; + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1); + *pcurIndex++ = (UInt16)FirstVertex; + } + else + { + *pcurIndex++ = (UInt16)FirstVertex; + *pcurIndex++ = (UInt16)FirstVertex+1; + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1); + + *pcurIndex++ = (UInt16)FirstVertex+1; + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1; + *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1); + } + } +} + + + +//----------------------------------------------------------------------------------- +// ***** Prediction and timewarp. +// + +// Calculates the values from the HMD info. +PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo, + bool withTimewarp /*= true*/, + bool withVsync /*= true*/ ) +{ + PredictionValues result; + + result.WithTimewarp = withTimewarp; + result.WithVsync = withVsync; + + // For unclear reasons, most graphics systems add an extra frame of latency + // somewhere along the way. In time we'll debug this and figure it out, but + // for now this gets prediction a little bit better. + const float extraFramesOfBufferingKludge = 1.0f; + + if ( withVsync ) + { + // These are the times from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp) + // So if you had no timewarp, this, plus the time until the next vsync, is how much to predict by. + result.PresentFlushToRenderedScene = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; + // Predict to the middle of the screen being scanned out. + result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.VsyncToFirstScanline + 0.5f * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; + // Time for pixels to get half-way to settling. + result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; + // Predict to half-way through persistence + result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; + + // The time from the Present+Flush to when the first scanline is "averagely visible". + result.PresentFlushToTimewarpStart = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; + // Predict to the first line being scanned out. + result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.VsyncToFirstScanline; + // Time for pixels to get half-way to settling. + result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; + // Predict to half-way through persistence + result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; + + // Time to the the last scanline. + result.PresentFlushToTimewarpEnd = result.PresentFlushToTimewarpStart + hmdRenderInfo.Shutter.FirstScanlineToLastScanline; + + // Ideal framerate. + result.PresentFlushToPresentFlush = hmdRenderInfo.Shutter.VsyncToNextVsync; + } + else + { + // Timewarp without vsync is a little odd. + // Currently, we assume that without vsync, we have no idea which scanline + // is currently being sent to the display. So we can't do lerping timewarp, + // we can just do a full-screen late-stage fixup. + + // "PresentFlushToRenderedScene" means the time from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp) + // So if you had no timewarp, this, plus the time until the next flush (which is usually the time to render the frame), is how much to predict by. + // Time for pixels to get half-way to settling. + result.PresentFlushToRenderedScene = hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; + // Predict to half-way through persistence + result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; + + // Without vsync, you don't know timings, and so can't do anything useful with lerped warping. + result.PresentFlushToTimewarpStart = result.PresentFlushToRenderedScene; + result.PresentFlushToTimewarpEnd = result.PresentFlushToRenderedScene; + + // There's no concept of "ideal" when vsync is off. + result.PresentFlushToPresentFlush = 0.0f; + } + + return result; +} + +Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld ) +{ + Matrix4f worldFromPredictedView = predictedViewFromWorld.InvertedHomogeneousTransform(); + Matrix4f matRenderFromNowStart = renderedViewFromWorld * worldFromPredictedView; + + // The sensor-predicted orientations have: X=right, Y=up, Z=backwards. + // The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards. + // So we need to perform a similarity transform on this delta matrix. + // The verbose code would look like this: + /* + Matrix4f matBasisChange; + matBasisChange.SetIdentity(); + matBasisChange.M[0][0] = 1.0f; + matBasisChange.M[1][1] = -1.0f; + matBasisChange.M[2][2] = -1.0f; + Matrix4f matBasisChangeInv = matBasisChange.Inverted(); + matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange; + */ + // ...but of course all the above is a constant transform and much more easily done. + // We flip the signs of the Y&Z row, then flip the signs of the Y&Z column, + // and of course most of the flips cancel: + // +++ +-- +-- + // +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++ + // +++ +-- -++ + matRenderFromNowStart.M[0][1] = -matRenderFromNowStart.M[0][1]; + matRenderFromNowStart.M[0][2] = -matRenderFromNowStart.M[0][2]; + matRenderFromNowStart.M[1][0] = -matRenderFromNowStart.M[1][0]; + matRenderFromNowStart.M[2][0] = -matRenderFromNowStart.M[2][0]; + matRenderFromNowStart.M[1][3] = -matRenderFromNowStart.M[1][3]; + matRenderFromNowStart.M[2][3] = -matRenderFromNowStart.M[2][3]; + + return matRenderFromNowStart; +} + + +TimewarpMachine::TimewarpMachine() +{ + for ( int i = 0; i < 2; i++ ) + { + EyeRenderPoses[i] = Posef(); + } + DistortionTimeCount = 0; + VsyncEnabled = false; +} + +void TimewarpMachine::Reset(HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow) +{ + RenderInfo = renderInfo; + VsyncEnabled = vsyncEnabled; + CurrentPredictionValues = PredictionGetDeviceValues ( renderInfo, true, VsyncEnabled ); + PresentFlushToPresentFlushSeconds = 0.0f; + DistortionTimeCount = 0; + DistortionTimeAverage = 0.0f; + LastFramePresentFlushTime = timeNow; + AfterPresentAndFlush(timeNow); +} + +void TimewarpMachine::AfterPresentAndFlush(double timeNow) +{ + PresentFlushToPresentFlushSeconds = (float)(timeNow - LastFramePresentFlushTime); + LastFramePresentFlushTime = timeNow; + NextFramePresentFlushTime = timeNow + (double)PresentFlushToPresentFlushSeconds; +} + +double TimewarpMachine::GetViewRenderPredictionTime() +{ + // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. + return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToRenderedScene; +} + +Posef TimewarpMachine::GetViewRenderPredictionPose(SensorFusion &sfusion) +{ + double predictionTime = GetViewRenderPredictionTime(); + return sfusion.GetPoseAtTime(predictionTime); +} + +double TimewarpMachine::GetVisiblePixelTimeStart() +{ + // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. + return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpStart; +} +double TimewarpMachine::GetVisiblePixelTimeEnd() +{ + // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. + return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpEnd; +} +Posef TimewarpMachine::GetPredictedVisiblePixelPoseStart(SensorFusion &sfusion) +{ + double predictionTime = GetVisiblePixelTimeStart(); + return sfusion.GetPoseAtTime(predictionTime); +} +Posef TimewarpMachine::GetPredictedVisiblePixelPoseEnd (SensorFusion &sfusion) +{ + double predictionTime = GetVisiblePixelTimeEnd(); + return sfusion.GetPoseAtTime(predictionTime); +} +Matrix4f TimewarpMachine::GetTimewarpDeltaStart(SensorFusion &sfusion, Posef const &renderedPose) +{ + Posef visiblePose = GetPredictedVisiblePixelPoseStart ( sfusion ); + Matrix4f visibleMatrix(visiblePose); + Matrix4f renderedMatrix(renderedPose); + return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix ); +} +Matrix4f TimewarpMachine::GetTimewarpDeltaEnd (SensorFusion &sfusion, Posef const &renderedPose) +{ + Posef visiblePose = GetPredictedVisiblePixelPoseEnd ( sfusion ); + Matrix4f visibleMatrix(visiblePose); + Matrix4f renderedMatrix(renderedPose); + return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix ); +} + + +// What time should the app wait until before starting distortion? +double TimewarpMachine::JustInTime_GetDistortionWaitUntilTime() +{ + if ( !VsyncEnabled || ( DistortionTimeCount < NumDistortionTimes ) ) + { + // Don't wait. + return LastFramePresentFlushTime; + } + + const float fudgeFactor = 0.002f; // Found heuristically - 1ms is too short because of timing granularity - may need further tweaking! + float howLongBeforePresent = DistortionTimeAverage + fudgeFactor; + // Subtlety here. Technically, the correct time is NextFramePresentFlushTime - howLongBeforePresent. + // However, if the app drops a frame, this then perpetuates it, + // i.e. if the display is running at 60fps, but the last frame was slow, + // (e.g. because of swapping or whatever), then NextFramePresentFlushTime is + // 33ms in the future, not 16ms. Since this function supplies the + // time to wait until, the app will indeed wait until 32ms, so the framerate + // drops to 30fps and never comes back up! + // So we return the *ideal* framerate, not the *actual* framerate. + return LastFramePresentFlushTime + (float)( CurrentPredictionValues.PresentFlushToPresentFlush - howLongBeforePresent ); +} + + +bool TimewarpMachine::JustInTime_NeedDistortionTimeMeasurement() const +{ + if (!VsyncEnabled) + { + return false; + } + return ( DistortionTimeCount < NumDistortionTimes ); +} + +void TimewarpMachine::JustInTime_BeforeDistortionTimeMeasurement(double timeNow) +{ + DistortionTimeCurrentStart = timeNow; +} + +void TimewarpMachine::JustInTime_AfterDistortionTimeMeasurement(double timeNow) +{ + float timeDelta = (float)( timeNow - DistortionTimeCurrentStart ); + if ( DistortionTimeCount < NumDistortionTimes ) + { + DistortionTimes[DistortionTimeCount] = timeDelta; + DistortionTimeCount++; + if ( DistortionTimeCount == NumDistortionTimes ) + { + // Median. + float distortionTimeMedian = 0.0f; + for ( int i = 0; i < NumDistortionTimes/2; i++ ) + { + // Find the maximum time of those remaining. + float maxTime = DistortionTimes[0]; + int maxIndex = 0; + for ( int j = 1; j < NumDistortionTimes; j++ ) + { + if ( maxTime < DistortionTimes[j] ) + { + maxTime = DistortionTimes[j]; + maxIndex = j; + } + } + // Zero that max time, so we'll find the next-highest time. + DistortionTimes[maxIndex] = 0.0f; + distortionTimeMedian = maxTime; + } + DistortionTimeAverage = distortionTimeMedian; + } + } + else + { + OVR_ASSERT ( !"Really didn't need more measurements, thanks" ); + } } diff --git a/LibOVR/Src/Util/Util_Render_Stereo.h b/LibOVR/Src/Util/Util_Render_Stereo.h index 492080d..2ad9103 100644 --- a/LibOVR/Src/Util/Util_Render_Stereo.h +++ b/LibOVR/Src/Util/Util_Render_Stereo.h @@ -4,18 +4,18 @@ PublicHeader: OVR.h Filename : Util_Render_Stereo.h Content : Sample stereo rendering configuration classes. Created : October 22, 2012 -Authors : Michael Antonov +Authors : Michael Antonov, Tom Forsyth -Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved. +Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved. -Licensed under the Oculus VR SDK License Version 2.0 (the "License"); -you may not use the Oculus VR SDK except in compliance with the License, +Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License"); +you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at -http://www.oculusvr.com/licenses/LICENSE-2.0 +http://www.oculusvr.com/licenses/LICENSE-3.1 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, @@ -28,135 +28,75 @@ limitations under the License. #ifndef OVR_Util_Render_Stereo_h #define OVR_Util_Render_Stereo_h -#include "../OVR_Device.h" +#include "../OVR_Stereo.h" -namespace OVR { namespace Util { namespace Render { +namespace OVR { -//----------------------------------------------------------------------------------- -// ***** Stereo Enumerations +class SensorFusion; -// StereoMode describes rendering modes that can be used by StereoConfig. -// These modes control whether stereo rendering is used or not (Stereo_None), -// and how it is implemented. -enum StereoMode -{ - Stereo_None = 0, - Stereo_LeftRight_Multipass = 1 -}; +namespace Util { namespace Render { -// StereoEye specifies which eye we are rendering for; it is used to -// retrieve StereoEyeParams. -enum StereoEye -{ - StereoEye_Center, - StereoEye_Left, - StereoEye_Right -}; - //----------------------------------------------------------------------------------- -// ***** Viewport - -// Viewport describes a rectangular area used for rendering, in pixels. -struct Viewport -{ - int x, y; - int w, h; - - Viewport() {} - Viewport(int x1, int y1, int w1, int h1) : x(x1), y(y1), w(w1), h(h1) { } +// **** Useful debug functions. +// +// Purely for debugging - the results are not very end-user-friendly. +char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType ); +char const* GetDebugNameHmdType ( HmdTypeEnum hmdType ); - bool operator == (const Viewport& vp) const - { return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h); } - bool operator != (const Viewport& vp) const - { return !operator == (vp); } -}; //----------------------------------------------------------------------------------- -// ***** DistortionConfig - -// DistortionConfig Provides controls for the distortion shader. -// - K[0] - K[3] are coefficients for the distortion function. -// - XCenterOffset is the offset of lens distortion center from the -// center of one-eye screen half. [-1, 1] Range. -// - Scale is a factor of how much larger will the input image be, -// with a factor of 1.0f being no scaling. An inverse of this -// value is applied to sampled UV coordinates (1/Scale). -// - ChromaticAberration is an array of parameters for controlling -// additional Red and Blue scaling in order to reduce chromatic aberration -// caused by the Rift lenses. -class DistortionConfig -{ -public: - DistortionConfig(float k0 = 1.0f, float k1 = 0.0f, float k2 = 0.0f, float k3 = 0.0f) - : XCenterOffset(0), YCenterOffset(0), Scale(1.0f) - { - SetCoefficients(k0, k1, k2, k3); - SetChromaticAberration(); - } +// **** Higher-level utility functions. - void SetCoefficients(float k0, float k1 = 0.0f, float k2 = 0.0f, float k3 = 0.0f) - { K[0] = k0; K[1] = k1; K[2] = k2; K[3] = k3; } +Sizei CalculateRecommendedTextureSize ( HmdRenderInfo const &hmd, + bool bRendertargetSharedByBothEyes, + float pixelDensityInCenter = 1.0f ); - void SetChromaticAberration(float red1 = 1.0f, float red2 = 0.0f, float blue1 = 1.0f, float blue2 = 0.0f) - { ChromaticAberration[0] = red1; ChromaticAberration[1] = red2; ChromaticAberration[2] = blue1; ChromaticAberration[3] = blue2; } +FovPort CalculateRecommendedFov ( HmdRenderInfo const &hmd, + StereoEye eyeType, + bool bMakeFovSymmetrical = false); +StereoEyeParams CalculateStereoEyeParams ( HmdRenderInfo const &hmd, + StereoEye eyeType, + Sizei const &actualRendertargetSurfaceSize, + bool bRendertargetSharedByBothEyes, + bool bRightHanded = true, + float zNear = 0.01f, float zFar = 10000.0f, + Sizei const *pOverrideRenderedPixelSize = NULL, + FovPort const *pOverrideFovport = NULL, + float zoomFactor = 1.0f ); - // DistortionFn applies distortion equation to the argument. The returned - // value should match distortion equation used in shader. - float DistortionFn(float r) const - { - float rsq = r * r; - float scale = r * (K[0] + K[1] * rsq + K[2] * rsq * rsq + K[3] * rsq * rsq * rsq); - return scale; - } - - // DistortionFnInverse computes the inverse of the distortion function on an argument. - float DistortionFnInverse(float r); +Vector3f CalculateEyeVirtualCameraOffset(HmdRenderInfo const &hmd, + StereoEye eyeType, bool bMonoRenderingMode ); - float K[4]; - float XCenterOffset, YCenterOffset; - float Scale; - float ChromaticAberration[4]; // Additional per-channel scaling is applied after distortion: - // Index [0] - Red channel constant coefficient. - // Index [1] - Red channel r^2 coefficient. - // Index [2] - Blue channel constant coefficient. - // Index [3] - Blue channel r^2 coefficient. +// These are two components from StereoEyeParams that can be changed +// very easily without full recomputation of everything. +struct ViewportScaleAndOffset +{ + Recti RenderedViewport; + ScaleAndOffset2D EyeToSourceUV; }; +// Three ways to override the size of the render view dynamically. +// None of these require changing the distortion parameters or the regenerating the distortion mesh, +// and can be called every frame if desired. +ViewportScaleAndOffset ModifyRenderViewport ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + Recti const &renderViewport ); -//----------------------------------------------------------------------------------- -// ***** StereoEyeParams +ViewportScaleAndOffset ModifyRenderSize ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + Sizei const &requestedRenderSize, + bool bRendertargetSharedByBothEyes = false ); -// StereoEyeParams describes RenderDevice configuration needed to render -// the scene for one eye. -class StereoEyeParams -{ -public: - StereoEye Eye; - Viewport VP; // Viewport that we are rendering to - const DistortionConfig* pDistortion; - - Matrix4f ViewAdjust; // Translation to be applied to view matrix. - Matrix4f Projection; // Projection matrix used with this eye. - Matrix4f OrthoProjection; // Orthographic projection used with this eye. - - void Init(StereoEye eye, const Viewport &vp, float vofs, - const Matrix4f& proj, const Matrix4f& orthoProj, - const DistortionConfig* distortion = 0) - { - Eye = eye; - VP = vp; - ViewAdjust = Matrix4f::Translation(Vector3f(vofs,0,0)); - Projection = proj; - OrthoProjection = orthoProj; - pDistortion = distortion; - } -}; +ViewportScaleAndOffset ModifyRenderDensity ( StereoEyeParams const ¶ms, + Sizei const &actualRendertargetSurfaceSize, + float pixelDensity = 1.0f, + bool bRendertargetSharedByBothEyes = false ); //----------------------------------------------------------------------------------- @@ -169,143 +109,358 @@ public: // parameters are returned though StereoEyeParams for each eye. // // Beyond regular 3D projection, this class supports rendering a 2D orthographic -// surface for UI and text. The 2D surface will be defined as fitting within a 2D -// field of view (85 degrees by default) and used [-1,1] coordinate system with -// square pixels. The (0,0) coordinate corresponds to eye center location -// that is properly adjusted during rendering through SterepRenderParams::Adjust2D. -// Genreally speaking, text outside [-1,1] coordinate range will not be readable. +// surface for UI and text. The 2D surface will be defined by CreateOrthoSubProjection(). +// The (0,0) coordinate corresponds to eye center location. +// +// Applications are not required to use this class, but they should be doing very +// similar sequences of operations, and it may be useful to start with this class +// and modify it. + +struct StereoEyeParamsWithOrtho +{ + StereoEyeParams StereoEye; + Matrix4f OrthoProjection; +}; + +struct ViewportScaleAndOffsetBothEyes +{ + ViewportScaleAndOffset Left; + ViewportScaleAndOffset Right; +}; class StereoConfig { public: - StereoConfig(StereoMode mode = Stereo_LeftRight_Multipass, - const Viewport& fullViewport = Viewport(0,0, 1280,800)); + // StereoMode describes rendering modes that can be used by StereoConfig. + // These modes control whether stereo rendering is used or not (Stereo_None), + // and how it is implemented. + enum StereoMode + { + Stereo_None = 0, // Single eye + Stereo_LeftRight_Multipass = 1, // One frustum per eye + }; + + + StereoConfig(StereoMode mode = Stereo_LeftRight_Multipass); + //--------------------------------------------------------------------------------------------- + // *** Core functions - every app MUST call these functions at least once. + + // Sets HMD parameters; also initializes distortion coefficients. + void SetHmdRenderInfo(const HmdRenderInfo& hmd); + + // Set the physical size of the rendertarget surface the app created, + // and whether one RT is shared by both eyes, or each eye has its own RT: + // true: both eyes are rendered to the same RT. Left eye starts at top-left, right eye starts at top-middle. + // false: each eye is rendered to its own RT. Some GPU architectures prefer this arrangement. + // Typically, the app would call CalculateRecommendedTextureSize() to suggest the choice of RT size. + // This setting must be exactly the size of the actual RT created, or the UVs produced will be incorrect. + // If the app wants to render to a subsection of the RT, it should use SetRenderSize() + void SetRendertargetSize (Size<int> const rendertargetSize, + bool rendertargetIsSharedByBothEyes ); + + // Returns full set of Stereo rendering parameters for the specified eye. + const StereoEyeParamsWithOrtho& GetEyeRenderParams(StereoEye eye); + + + + //--------------------------------------------------------------------------------------------- + // *** Optional functions - an app may call these to override default behaviours. + + const HmdRenderInfo& GetHmdRenderInfo() const { return Hmd; } - // *** Modifiable State Access + // Returns the recommended size of rendertargets. + // If rendertargetIsSharedByBothEyes is true, this is the size of the combined buffer. + // If rendertargetIsSharedByBothEyes is false, this is the size of each individual buffer. + // pixelDensityInCenter may be set to any number - by default it will match the HMD resolution in the center of the image. + // After creating the rendertargets, the application MUST call SetRendertargetSize() with the actual size created + // (which can be larger or smaller as the app wishes, but StereoConfig needs to know either way) + Sizei CalculateRecommendedTextureSize ( bool rendertargetSharedByBothEyes, + float pixelDensityInCenter = 1.0f ); // Sets a stereo rendering mode and updates internal cached // state (matrices, per-eye view) based on it. void SetStereoMode(StereoMode mode) { Mode = mode; DirtyFlag = true; } StereoMode GetStereoMode() const { return Mode; } - // Sets HMD parameters; also initializes distortion coefficients. - void SetHMDInfo(const HMDInfo& hmd); - const HMDInfo& GetHMDInfo() const { return HMD; } + // Sets the fieldOfView that the 2D coordinate area stretches to. + void Set2DAreaFov(float fovRadians); - // Query physical eye-to-screen distance in meters, which combines screen-to-lens and - // and lens-to-eye pupil distances. Modifying this value adjusts FOV. - float GetEyeToScreenDistance() const { return HMD.EyeToScreenDistance; } - void SetEyeToScreenDistance(float esd) { HMD.EyeToScreenDistance = esd; DirtyFlag = true; } + // Really only for science experiments - no normal app should ever need to override + // the HMD's lens descriptors. Passing NULL removes the override. + // Supply both = set left and right. + // Supply just left = set both to the same. + // Supply neither = remove override. + void SetLensOverride ( LensConfig const *pLensOverrideLeft = NULL, + LensConfig const *pLensOverrideRight = NULL ); + + // Override the rendered FOV in various ways. All angles in tangent units. + // This is not clamped to the physical FOV of the display - you'll need to do that yourself! + // Supply both = set left and right. + // Supply just left = set both to the same. + // Supply neither = remove override. + void SetFov ( FovPort const *pfovLeft = NULL, + FovPort const *pfovRight = NULL ); + + void SetFovPortRadians ( float horizontal, float vertical ) + { + FovPort fov = FovPort::CreateFromRadians(horizontal, vertical); + SetFov( &fov, &fov ); + } - // Interpupillary distance used for stereo, in meters. Default is 0.064m (64 mm). - void SetIPD(float ipd) { InterpupillaryDistance = ipd; IPDOverride = DirtyFlag = true; } - float GetIPD() const { return InterpupillaryDistance; } - // Set full render target viewport; for HMD this includes both eyes. - void SetFullViewport(const Viewport& vp); - const Viewport& GetFullViewport() const { return FullView; } + // This forces a "zero IPD" mode where there is just a single render with an FOV that + // is the union of the two calculated FOVs. + // The calculated render is for the left eye. Any size & FOV overrides for the right + // eye will be ignored. + // If you query the right eye's size, you will get the same render + // size & position as the left eye - you should not actually do the render of course! + // The distortion values will be different, because it goes to a different place on the framebuffer. + // Note that if you do this, the rendertarget does not need to be twice the width of + // the render size any more. + void SetZeroVirtualIpdOverride ( bool enableOverride ); - // Aspect ratio defaults to ((w/h)*multiplier) computed per eye. - // Aspect multiplier allows adjusting aspect ratio consistently for Stereo/NoStereo. - void SetAspectMultiplier(float m) { AspectMultiplier = m; DirtyFlag = true; } - float GetAspectMultiplier() const { return AspectMultiplier; } + // Allows the app to specify near and far clip planes and the right/left-handedness of the projection matrix. + void SetZClipPlanesAndHandedness ( float zNear = 0.01f, float zFar = 10000.0f, + bool rightHandedProjection = true ); - - // For the distorted image to fill rendered viewport, input texture render target needs to be - // scaled by DistortionScale before sampling. The scale factor is computed by fitting a point - // on of specified radius from a distortion center, more easily specified as a coordinate. - // SetDistortionFitPointVP sets the (x,y) coordinate of the point that scale will be "fit" to, - // assuming [-1,1] coordinate range for full left-eye viewport. A fit point is a location - // where source (pre-distortion) and target (post-distortion) image match each other. - // For the right eye, the interpretation of 'u' will be inverted. - void SetDistortionFitPointVP(float x, float y); - // SetDistortionFitPointPixels sets the (x,y) coordinate of the point that scale will be "fit" to, - // specified in pixeld for full left-eye texture. - void SetDistortionFitPointPixels(float x, float y); - - // Changes all distortion settings. - // Note that setting HMDInfo also changes Distortion coefficients. - void SetDistortionConfig(const DistortionConfig& d) { Distortion = d; DirtyFlag = true; } - - // Modify distortion coefficients; useful for adjustment tweaking. - void SetDistortionK(int i, float k) { Distortion.K[i] = k; DirtyFlag = true; } - float GetDistortionK(int i) const { return Distortion.K[i]; } + // Allows the app to specify how much extra eye rotation to allow when determining the visible FOV. + void SetExtraEyeRotation ( float extraEyeRotationInRadians = 0.0f ); - // Sets the fieldOfView that the 2D coordinate area stretches to. - void Set2DAreaFov(float fovRadians); + // The dirty flag is set by any of the above calls. Just handy for the app to know + // if e.g. the distortion mesh needs regeneration. + void SetDirty() { DirtyFlag = true; } + bool IsDirty() { return DirtyFlag; } + + // An app never needs to call this - GetEyeRenderParams will call it internally if + // the state is dirty. However apps can call this explicitly to control when and where + // computation is performed (e.g. not inside critical loops) + void UpdateComputedState(); + + // This returns the projection matrix with a "zoom". Does not modify any internal state. + Matrix4f GetProjectionWithZoom ( StereoEye eye, float fovZoom ) const; + //--------------------------------------------------------------------------------------------- + // The SetRender* functions are special. + // + // They do not require a full recalculation of state, and they do not change anything but the + // ViewportScaleAndOffset data for the eyes (which they return), and do not set the dirty flag! + // This means they can be called without regenerating the distortion mesh, and thus + // can happily be called every frame without causing performance problems. Dynamic rescaling + // of the rendertarget can help keep framerate up in demanding VR applications. + // See the documentation for more details on their use. + + // Specify a pixel density - how many rendered pixels per pixel in the physical display. + ViewportScaleAndOffsetBothEyes SetRenderDensity ( float pixelsPerDisplayPixel ); + + // Supply the size directly. Will be clamped to the physical rendertarget size. + ViewportScaleAndOffsetBothEyes SetRenderSize ( Sizei const &renderSizeLeft, Sizei const &renderSizeRight ); + + // Supply the viewport directly. This is not clamped to the physical rendertarget - careful now! + ViewportScaleAndOffsetBothEyes SetRenderViewport ( Recti const &renderViewportLeft, Recti const &renderViewportRight ); + +private: + + // *** Modifiable State + + StereoMode Mode; + HmdRenderInfo Hmd; + + float Area2DFov; // FOV range mapping to the 2D area. + + // Only one of these three overrides can be true! + enum SetViewportModeEnum + { + SVPM_Density, + SVPM_Size, + SVPM_Viewport, + } SetViewportMode; + // ...and depending which it is, one of the following are used. + float SetViewportPixelsPerDisplayPixel; + Sizei SetViewportSize[2]; + Recti SetViewport[2]; + + // Other overrides. + bool OverrideLens; + LensConfig LensOverrideLeft; + LensConfig LensOverrideRight; + Sizei RendertargetSize; + bool OverrideTanHalfFov; + FovPort FovOverrideLeft; + FovPort FovOverrideRight; + bool OverrideZeroIpd; + float ZNear; + float ZFar; + float ExtraEyeRotationInRadians; + bool IsRendertargetSharedByBothEyes; + bool RightHandedProjection; + + bool DirtyFlag; // Set when any if the modifiable state changed. Does NOT get set by SetRender*() + + // Utility function. + ViewportScaleAndOffsetBothEyes setupViewportScaleAndOffsets(); + // *** Computed State - // Return current aspect ratio. - float GetAspect() { updateIfDirty(); return Aspect; } - - // Return computed vertical FOV in radians/degrees. - float GetYFOVRadians() { updateIfDirty(); return YFov; } - float GetYFOVDegrees() { return RadToDegree(GetYFOVRadians()); } +public: // Small hack for the config tool. Normal code should never read EyeRenderParams directly - use GetEyeRenderParams() instead. + // 0/1 = left/right main views. + StereoEyeParamsWithOrtho EyeRenderParams[2]; +}; - // Query horizontal projection center offset as a distance away from the - // one-eye [-1,1] unit viewport. - // Positive return value should be used for left eye, negative for right eye. - float GetProjectionCenterOffset() { updateIfDirty(); return ProjectionCenterOffset; } - // GetDistortionConfig isn't const because XCenterOffset bay need to be recomputed. - const DistortionConfig& GetDistortionConfig() { updateIfDirty(); return Distortion; } +//----------------------------------------------------------------------------------- +// ***** Distortion Mesh Rendering +// - // Returns DistortionScale factor by which input texture size is increased to make - // post-distortion result distortion fit the viewport. - float GetDistortionScale() { updateIfDirty(); return Distortion.Scale; } +// Stores both texture UV coords, or tan(angle) values. +// Use whichever set of data the specific distortion algorithm requires. +// This struct *must* be binary compatible with CAPI ovrDistortionVertex. +struct DistortionMeshVertexData +{ + // [-1,+1],[-1,+1] over the entire framebuffer. + Vector2f ScreenPosNDC; + // [0.0-1.0] interpolation value for timewarping - see documentation for details. + float TimewarpLerp; + // [0.0-1.0] fade-to-black at the edges to reduce peripheral vision noise. + float Shade; + // The red, green, and blue vectors in tan(angle) space. + // Scale and offset by the values in StereoEyeParams.EyeToSourceUV.Scale + // and StereoParams.EyeToSourceUV.Offset to get to real texture UV coords. + Vector2f TanEyeAnglesR; + Vector2f TanEyeAnglesG; + Vector2f TanEyeAnglesB; +}; - // Returns the size of a pixel within 2D coordinate system. - float Get2DUnitPixel() { updateIfDirty(); return (2.0f / (FovPixels * Distortion.Scale)); } - // Returns full set of Stereo rendering parameters for the specified eye. - const StereoEyeParams& GetEyeRenderParams(StereoEye eye); +void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices, + int *pNumVertices, int *pNumTriangles, + const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo ); + +// Generate distortion mesh for a eye. This version requires less data then stereoParms, supporting +// dynamic change in render target viewport. +void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices, + int *pNumVertices, int *pNumTriangles, + bool rightEye, + const HmdRenderInfo &hmdRenderInfo, + const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC ); + +void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices ); + + + + +//----------------------------------------------------------------------------------- +// ***** Prediction and timewarp. +// + +struct PredictionValues +{ + // All values in seconds. + // These are the times in seconds from a present+flush to the relevant display element. + // The time is measured to the middle of that element's visibility window, + // e.g. if the device is a full-persistence display, the element will be visible for + // an entire frame, so the time measures to the middle of that period, i.e. half the frame time. + float PresentFlushToRenderedScene; // To the overall rendered 3D scene being visible. + float PresentFlushToTimewarpStart; // To when the first timewarped scanline will be visible. + float PresentFlushToTimewarpEnd; // To when the last timewarped scanline will be visible. + float PresentFlushToPresentFlush; // To the next present+flush, i.e. the ideal framerate. + + bool WithTimewarp; + bool WithVsync; +}; + +// Calculates the values from the HMD info. +PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo, + bool withTimewarp = true, + bool withVsync = true ); + +// Pass in an orientation used to render the scene, and then the predicted orientation +// (which may have been computed later on, and thus is more accurate), and this +// will return the matrix to pass to the timewarp distortion shader. +// TODO: deal with different handedness? +Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld ); + + + +// TimewarpMachine helps keep track of rendered frame timing and +// handles predictions for time-warp rendering. +class TimewarpMachine +{ +public: + TimewarpMachine(); -private: + // Call this on and every time something about the setup changes. + void Reset ( HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow ); - void updateIfDirty() { if (DirtyFlag) updateComputedState(); } - void updateComputedState(); + // The only reliable time in most engines is directly after the frame-present and GPU flush-and-wait. + // This call should be done right after that to give this system the timing info it needs. + void AfterPresentAndFlush(double timeNow); - void updateDistortionOffsetAndScale(); - void updateProjectionOffset(); - void update2D(); - void updateEyeParams(); + // The "average" time the rendered frame will show up, + // and the predicted pose of the HMD at that time. + // You usually only need to call one of these functions. + double GetViewRenderPredictionTime(); + Posef GetViewRenderPredictionPose(SensorFusion &sfusion); - // *** Modifiable State + // Timewarp prediction functions. You usually only need to call one of these three sets of functions. + + // The predicted times that the first and last pixel will be visible on-screen. + double GetVisiblePixelTimeStart(); + double GetVisiblePixelTimeEnd(); + // Predicted poses of the HMD at those first and last pixels. + Posef GetPredictedVisiblePixelPoseStart(SensorFusion &sfusion); + Posef GetPredictedVisiblePixelPoseEnd (SensorFusion &sfusion); + // The delta matrices to feed to the timewarp distortion code, + // given the pose that was used for rendering. + // (usually the one returned by GetViewRenderPredictionPose() earlier) + Matrix4f GetTimewarpDeltaStart(SensorFusion &sfusion, Posef const &renderedPose); + Matrix4f GetTimewarpDeltaEnd (SensorFusion &sfusion, Posef const &renderedPose); + + + // Just-In-Time distortion aims to delay the second sensor reading & distortion + // until the very last moment to improve prediction. However, it is a little scary, + // since the delay might wait too long and miss the vsync completely! + // Use of the JustInTime_* functions is entirely optional, and we advise allowing + // users to turn it off in their video options to cope with odd machine configurations. + + // What time should the app wait until before starting distortion? + double JustInTime_GetDistortionWaitUntilTime(); + + // Used to time the distortion rendering + bool JustInTime_NeedDistortionTimeMeasurement() const; + void JustInTime_BeforeDistortionTimeMeasurement(double timeNow); + void JustInTime_AfterDistortionTimeMeasurement(double timeNow); + + +private: + + bool VsyncEnabled; + HmdRenderInfo RenderInfo; + PredictionValues CurrentPredictionValues; + + enum { NumDistortionTimes = 10 }; + int DistortionTimeCount; + double DistortionTimeCurrentStart; + float DistortionTimes[NumDistortionTimes]; + float DistortionTimeAverage; + + // Pose at which last time the eye was rendered. + Posef EyeRenderPoses[2]; + + // Absolute time of the last present+flush + double LastFramePresentFlushTime; + // Seconds between presentflushes + float PresentFlushToPresentFlushSeconds; + // Predicted absolute time of the next present+flush + double NextFramePresentFlushTime; - StereoMode Mode; - float InterpupillaryDistance; - float AspectMultiplier; // Multiplied into aspect ratio to change it. - HMDInfo HMD; - DistortionConfig Distortion; - float DistortionFitX, DistortionFitY; // In [-1,1] half-screen viewport units. - Viewport FullView; // Entire window viewport. - - float Area2DFov; // FOV range mapping to [-1, 1] 2D area. - - // *** Computed State - - bool DirtyFlag; // Set when any if the modifiable state changed. - bool IPDOverride; // True after SetIPD was called. - float YFov; // Vertical FOV. - float Aspect; // Aspect ratio: (w/h)*AspectMultiplier. - float ProjectionCenterOffset; - StereoEyeParams EyeRenderParams[2]; - - - // ** 2D Rendering - - // Number of 2D pixels in the FOV. This defines [-1,1] coordinate range for 2D. - float FovPixels; - Matrix4f OrthoCenter; - float OrthoPixelOffset; }; + }}} // OVR::Util::Render #endif |