aboutsummaryrefslogtreecommitdiffstats
path: root/LibOVR/Src/Util/Util_Render_Stereo.cpp
diff options
context:
space:
mode:
authorSven Gothel <[email protected]>2015-03-21 23:01:12 +0100
committerSven Gothel <[email protected]>2015-03-21 23:01:12 +0100
commit0c5c4be020c2d55540058a49b2a879f46d5a1e13 (patch)
tree00f84c2ca18cc233b826014094b9cad0769a3ea5 /LibOVR/Src/Util/Util_Render_Stereo.cpp
parentcbbd775b6c754927632c333ff01424a0d2048c7c (diff)
parente490c3c7f7bb5461cfa78a214827aa534fb43a3e (diff)
Merge branch 'vanilla_0.4.4' and resolve conflicts
TODO: Validate for removed patches due to relocation Resolved Conflicts: LibOVR/Src/Kernel/OVR_ThreadsWinAPI.cpp LibOVR/Src/OVR_Linux_HMDDevice.cpp LibOVR/Src/OVR_OSX_HMDDevice.cpp LibOVR/Src/OVR_Profile.cpp LibOVR/Src/OVR_Sensor2Impl.cpp LibOVR/Src/OVR_SensorFusion.cpp LibOVR/Src/OVR_SensorImpl.cpp LibOVR/Src/OVR_Win32_DeviceStatus.cpp LibOVR/Src/OVR_Win32_HIDDevice.cpp LibOVR/Src/OVR_Win32_HIDDevice.h LibOVR/Src/OVR_Win32_HMDDevice.cpp
Diffstat (limited to 'LibOVR/Src/Util/Util_Render_Stereo.cpp')
-rw-r--r--LibOVR/Src/Util/Util_Render_Stereo.cpp446
1 files changed, 264 insertions, 182 deletions
diff --git a/LibOVR/Src/Util/Util_Render_Stereo.cpp b/LibOVR/Src/Util/Util_Render_Stereo.cpp
index e84381e..aa82b35 100644
--- a/LibOVR/Src/Util/Util_Render_Stereo.cpp
+++ b/LibOVR/Src/Util/Util_Render_Stereo.cpp
@@ -5,18 +5,18 @@ Content : Stereo rendering configuration implementation
Created : October 22, 2012
Authors : Michael Antonov, Andrew Reisse, Tom Forsyth
-Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+Copyright : Copyright 2014 Oculus VR, LLC All Rights reserved.
-Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
-you may not use the Oculus VR Rift SDK except in compliance with the License,
-which is provided at the time of installation or download, or which
+Licensed under the Oculus VR Rift SDK License Version 3.2 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
-http://www.oculusvr.com/licenses/LICENSE-3.1
+http://www.oculusvr.com/licenses/LICENSE-3.2
-Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
@@ -25,10 +25,11 @@ limitations under the License.
*************************************************************************************/
#include "Util_Render_Stereo.h"
-#include "../OVR_SensorFusion.h"
namespace OVR { namespace Util { namespace Render {
+using namespace OVR::Tracking;
+
//-----------------------------------------------------------------------------------
// **** Useful debug functions.
@@ -37,21 +38,21 @@ char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType )
{
switch ( eyeCupType )
{
- case EyeCup_DK1A: return "DK1 A"; break;
- case EyeCup_DK1B: return "DK1 B"; break;
- case EyeCup_DK1C: return "DK1 C"; break;
- case EyeCup_DKHD2A: return "DKHD2 A"; break;
- case EyeCup_OrangeA: return "Orange A"; break;
- case EyeCup_RedA: return "Red A"; break;
- case EyeCup_PinkA: return "Pink A"; break;
- case EyeCup_BlueA: return "Blue A"; break;
- case EyeCup_Delilah1A: return "Delilah 1 A"; break;
- case EyeCup_Delilah2A: return "Delilah 2 A"; break;
- case EyeCup_JamesA: return "James A"; break;
- case EyeCup_SunMandalaA: return "Sun Mandala A"; break;
- case EyeCup_DK2A: return "DK2 A"; break;
- case EyeCup_LAST: return "LAST"; break;
- default: OVR_ASSERT ( false ); return "Error"; break;
+ case EyeCup_DK1A: return "DK1 A";
+ case EyeCup_DK1B: return "DK1 B";
+ case EyeCup_DK1C: return "DK1 C";
+ case EyeCup_DKHD2A: return "DKHD2 A";
+ case EyeCup_OrangeA: return "Orange A";
+ case EyeCup_RedA: return "Red A";
+ case EyeCup_PinkA: return "Pink A";
+ case EyeCup_BlueA: return "Blue A";
+ case EyeCup_Delilah1A: return "Delilah 1 A";
+ case EyeCup_Delilah2A: return "Delilah 2 A";
+ case EyeCup_JamesA: return "James A";
+ case EyeCup_SunMandalaA: return "Sun Mandala A";
+ case EyeCup_DK2A: return "DK2 A";
+ case EyeCup_LAST: return "LAST";
+ default: OVR_ASSERT ( false ); return "Error";
}
}
@@ -59,17 +60,17 @@ char const* GetDebugNameHmdType ( HmdTypeEnum hmdType )
{
switch ( hmdType )
{
- case HmdType_None: return "None"; break;
- case HmdType_DK1: return "DK1"; break;
- case HmdType_DKProto: return "DK1 prototype"; break;
- case HmdType_DKHDProto: return "DK HD prototype 1"; break;
- case HmdType_DKHDProto566Mi: return "DK HD prototype 566 Mi"; break;
- case HmdType_DKHD2Proto: return "DK HD prototype 585"; break;
- case HmdType_CrystalCoveProto: return "Crystal Cove"; break;
- case HmdType_DK2: return "DK2"; break;
- case HmdType_Unknown: return "Unknown"; break;
- case HmdType_LAST: return "LAST"; break;
- default: OVR_ASSERT ( false ); return "Error"; break;
+ case HmdType_None: return "None";
+ case HmdType_DK1: return "DK1";
+ case HmdType_DKProto: return "DK1 prototype";
+ case HmdType_DKHDProto: return "DK HD prototype 1";
+ case HmdType_DKHDProto566Mi: return "DK HD prototype 566 Mi";
+ case HmdType_DKHD2Proto: return "DK HD prototype 585";
+ case HmdType_CrystalCoveProto: return "Crystal Cove";
+ case HmdType_DK2: return "DK2";
+ case HmdType_Unknown: return "Unknown";
+ case HmdType_LAST: return "LAST";
+ default: OVR_ASSERT ( false ); return "Error";
}
}
@@ -80,7 +81,7 @@ char const* GetDebugNameHmdType ( HmdTypeEnum hmdType )
struct DistortionAndFov
{
DistortionRenderDesc Distortion;
- FovPort Fov;
+ FovPort Fov;
};
static DistortionAndFov CalculateDistortionAndFovInternal ( StereoEye eyeType, HmdRenderInfo const &hmd,
@@ -198,14 +199,14 @@ static StereoEyeParams CalculateStereoEyeParamsInternal ( StereoEye eyeType, Hmd
// this is the mapping of actual physical eye FOV (and our eyes do not zoom!)
// to screen space.
ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov ( fov );
-
+
// The size of the final FB, which is fixed and determined by the physical size of the device display.
Recti distortedViewport = GetFramebufferViewport ( eyeType, hmd );
Vector3f virtualCameraOffset = CalculateEyeVirtualCameraOffset(hmd, eyeType, bMonoRenderingMode);
StereoEyeParams result;
result.Eye = eyeType;
- result.ViewAdjust = Matrix4f::Translation(virtualCameraOffset);
+ result.HmdToEyeViewOffset = Matrix4f::Translation(virtualCameraOffset);
result.Distortion = distortion;
result.DistortionViewport = distortedViewport;
result.Fov = fov;
@@ -372,6 +373,8 @@ StereoConfig::StereoConfig(StereoMode mode)
Hmd.ResolutionInPixels = Sizei(1280, 800);
Hmd.ScreenSizeInMeters = Sizef(0.1498f, 0.0936f);
Hmd.ScreenGapSizeInMeters = 0.0f;
+ Hmd.PelOffsetR = Vector2f ( 0.0f, 0.0f );
+ Hmd.PelOffsetB = Vector2f ( 0.0f, 0.0f );
Hmd.CenterFromTopInMeters = 0.0468f;
Hmd.LensSeparationInMeters = 0.0635f;
Hmd.LensDiameterInMeters = 0.035f;
@@ -442,7 +445,7 @@ const StereoEyeParamsWithOrtho& StereoConfig::GetEyeRenderParams(StereoEye eye)
UpdateComputedState();
}
- static const UByte eyeParamIndices[3] = { 0, 0, 1 };
+ static const uint8_t eyeParamIndices[3] = { 0, 0, 1 };
OVR_ASSERT(eye < sizeof(eyeParamIndices));
return EyeRenderParams[eyeParamIndices[eye]];
@@ -544,8 +547,10 @@ void StereoConfig::UpdateComputedState()
eyeTypes[1] = StereoEye_Right;
break;
- default:
- OVR_ASSERT( false ); break;
+ default:
+ numEyes = 0;
+ OVR_ASSERT( false );
+ break;
}
// If either of these fire, you've probably forgotten to call SetRendertargetSize()
@@ -787,13 +792,106 @@ static const int DMA_NumTrisPerEye = (DMA_GridSize)*(DMA_GridSize)*2;
-void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices )
+DistortionMeshVertexData DistortionMeshMakeVertex ( Vector2f screenNDC,
+ bool rightEye,
+ const HmdRenderInfo &hmdRenderInfo,
+ const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC )
+{
+ DistortionMeshVertexData result;
+
+ float xOffset = 0.0f;
+ if (rightEye)
+ {
+ xOffset = 1.0f;
+ }
+
+ Vector2f tanEyeAnglesR, tanEyeAnglesG, tanEyeAnglesB;
+ TransformScreenNDCToTanFovSpaceChroma ( &tanEyeAnglesR, &tanEyeAnglesG, &tanEyeAnglesB,
+ distortion, screenNDC );
+
+ result.TanEyeAnglesR = tanEyeAnglesR;
+ result.TanEyeAnglesG = tanEyeAnglesG;
+ result.TanEyeAnglesB = tanEyeAnglesB;
+
+ HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
+ switch ( shutterType )
+ {
+ case HmdShutter_Global:
+ result.TimewarpLerp = 0.0f;
+ break;
+ case HmdShutter_RollingLeftToRight:
+ // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0
+ result.TimewarpLerp = screenNDC.x * 0.25f + 0.25f;
+ if (rightEye)
+ {
+ result.TimewarpLerp += 0.5f;
+ }
+ break;
+ case HmdShutter_RollingRightToLeft:
+ // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0
+ result.TimewarpLerp = 0.75f - screenNDC.x * 0.25f;
+ if (rightEye)
+ {
+ result.TimewarpLerp -= 0.5f;
+ }
+ break;
+ case HmdShutter_RollingTopToBottom:
+ // Retrace is top to bottom on both eyes at the same time.
+ result.TimewarpLerp = screenNDC.y * 0.5f + 0.5f;
+ break;
+ default: OVR_ASSERT ( false ); break;
+ }
+
+ // When does the fade-to-black edge start? Chosen heuristically.
+ float fadeOutBorderFractionTexture = 0.1f;
+ float fadeOutBorderFractionTextureInnerEdge = 0.1f;
+ float fadeOutBorderFractionScreen = 0.1f;
+ float fadeOutFloor = 0.6f; // the floor controls how much black is in the fade region
+
+ if (hmdRenderInfo.HmdType == HmdType_DK1)
+ {
+ fadeOutBorderFractionTexture = 0.3f;
+ fadeOutBorderFractionTextureInnerEdge = 0.075f;
+ fadeOutBorderFractionScreen = 0.075f;
+ fadeOutFloor = 0.25f;
+ }
+
+ // Fade out at texture edges.
+ // The furthest out will be the blue channel, because of chromatic aberration (true of any standard lens)
+ Vector2f sourceTexCoordBlueNDC = TransformTanFovSpaceToRendertargetNDC ( eyeToSourceNDC, tanEyeAnglesB );
+ if (rightEye)
+ {
+ // The inner edge of the eye texture is usually much more magnified, because it's right against the middle of the screen, not the FOV edge.
+ // So we want a different scaling factor for that. This code flips the texture NDC so that +1.0 is the inner edge
+ sourceTexCoordBlueNDC.x = -sourceTexCoordBlueNDC.x;
+ }
+ float edgeFadeIn = ( 1.0f / fadeOutBorderFractionTextureInnerEdge ) * ( 1.0f - sourceTexCoordBlueNDC.x ) ; // Inner
+ edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f + sourceTexCoordBlueNDC.x ) ); // Outer
+ edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f - sourceTexCoordBlueNDC.y ) ); // Upper
+ edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f + sourceTexCoordBlueNDC.y ) ); // Lower
+
+ // Also fade out at screen edges. Since this is in pixel space, no need to do inner specially.
+ float edgeFadeInScreen = ( 1.0f / fadeOutBorderFractionScreen ) *
+ ( 1.0f - Alg::Max ( Alg::Abs ( screenNDC.x ), Alg::Abs ( screenNDC.y ) ) );
+ edgeFadeIn = Alg::Min ( edgeFadeInScreen, edgeFadeIn ) + fadeOutFloor;
+
+ // Note - this is NOT clamped negatively.
+ // For rendering methods that interpolate over a coarse grid, we need the values to go negative for correct intersection with zero.
+ result.Shade = Alg::Min ( edgeFadeIn, 1.0f );
+ result.ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset;
+ result.ScreenPosNDC.y = -screenNDC.y;
+
+ return result;
+}
+
+
+void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, uint16_t *pTriangleMeshIndices )
{
OVR_FREE ( pVertices );
OVR_FREE ( pTriangleMeshIndices );
}
-void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices,
int *pNumVertices, int *pNumTriangles,
const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo )
{
@@ -804,17 +902,17 @@ void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTr
// Generate mesh into allocated data and return result.
DistortionMeshCreate(ppVertices, ppTriangleListIndices, &vertexCount, &triangleCount,
rightEye, hmdRenderInfo, stereoParams.Distortion, stereoParams.EyeToSourceNDC);
-
+
*pNumVertices = vertexCount;
*pNumTriangles = triangleCount;
}
// Generate distortion mesh for a eye.
-void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices,
int *pNumVertices, int *pNumTriangles,
bool rightEye,
- const HmdRenderInfo &hmdRenderInfo,
+ const HmdRenderInfo &hmdRenderInfo,
const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC )
{
*pNumVertices = DMA_NumVertsPerEye;
@@ -822,7 +920,7 @@ void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTri
*ppVertices = (DistortionMeshVertexData*)
OVR_ALLOC( sizeof(DistortionMeshVertexData) * (*pNumVertices) );
- *ppTriangleListIndices = (UInt16*) OVR_ALLOC( sizeof(UInt16) * (*pNumTriangles) * 3 );
+ *ppTriangleListIndices = (uint16_t*) OVR_ALLOC( sizeof(uint16_t) * (*pNumTriangles) * 3 );
if (!*ppVertices || !*ppTriangleListIndices)
{
@@ -841,20 +939,9 @@ void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTri
return;
}
- // When does the fade-to-black edge start? Chosen heuristically.
- const float fadeOutBorderFraction = 0.075f;
-
-
- // Populate vertex buffer info
- float xOffset = 0.0f;
- float uOffset = 0.0f;
- OVR_UNUSED(uOffset);
- if (rightEye)
- {
- xOffset = 1.0f;
- uOffset = 0.5f;
- }
+
+ // Populate vertex buffer info
// First pass - build up raw vertex data.
DistortionMeshVertexData* pcurVert = *ppVertices;
@@ -870,72 +957,23 @@ void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTri
sourceCoordNDC.y = 2.0f * ( (float)y / (float)DMA_GridSize ) - 1.0f;
Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC );
- // This is the function that does the really heavy lifting.
+ // Find a corresponding screen position.
+ // Note - this function does not have to be precise - we're just trying to match the mesh tessellation
+ // with the shape of the distortion to minimise the number of trianlges needed.
Vector2f screenNDC = TransformTanFovSpaceToScreenNDC ( distortion, tanEyeAngle, false );
-
- // We then need RGB UVs. Since chromatic aberration is generated from screen coords, not
- // directly from texture NDCs, we can't just use tanEyeAngle, we need to go the long way round.
- Vector2f tanEyeAnglesR, tanEyeAnglesG, tanEyeAnglesB;
- TransformScreenNDCToTanFovSpaceChroma ( &tanEyeAnglesR, &tanEyeAnglesG, &tanEyeAnglesB,
- distortion, screenNDC );
-
- pcurVert->TanEyeAnglesR = tanEyeAnglesR;
- pcurVert->TanEyeAnglesG = tanEyeAnglesG;
- pcurVert->TanEyeAnglesB = tanEyeAnglesB;
-
-
- HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
- switch ( shutterType )
- {
- case HmdShutter_Global:
- pcurVert->TimewarpLerp = 0.0f;
- break;
- case HmdShutter_RollingLeftToRight:
- // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0
- pcurVert->TimewarpLerp = screenNDC.x * 0.25f + 0.25f;
- if (rightEye)
- {
- pcurVert->TimewarpLerp += 0.5f;
- }
- break;
- case HmdShutter_RollingRightToLeft:
- // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0
- pcurVert->TimewarpLerp = 0.75f - screenNDC.x * 0.25f;
- if (rightEye)
- {
- pcurVert->TimewarpLerp -= 0.5f;
- }
- break;
- case HmdShutter_RollingTopToBottom:
- // Retrace is top to bottom on both eyes at the same time.
- pcurVert->TimewarpLerp = screenNDC.y * 0.5f + 0.5f;
- break;
- default: OVR_ASSERT ( false ); break;
- }
-
- // Fade out at texture edges.
- float edgeFadeIn = ( 1.0f / fadeOutBorderFraction ) *
- ( 1.0f - Alg::Max ( Alg::Abs ( sourceCoordNDC.x ), Alg::Abs ( sourceCoordNDC.y ) ) );
- // Also fade out at screen edges.
- float edgeFadeInScreen = ( 2.0f / fadeOutBorderFraction ) *
- ( 1.0f - Alg::Max ( Alg::Abs ( screenNDC.x ), Alg::Abs ( screenNDC.y ) ) );
- edgeFadeIn = Alg::Min ( edgeFadeInScreen, edgeFadeIn );
-
- // Don't let verts overlap to the other eye.
+ // ...but don't let verts overlap to the other eye.
screenNDC.x = Alg::Max ( -1.0f, Alg::Min ( screenNDC.x, 1.0f ) );
screenNDC.y = Alg::Max ( -1.0f, Alg::Min ( screenNDC.y, 1.0f ) );
- pcurVert->Shade = Alg::Max ( 0.0f, Alg::Min ( edgeFadeIn, 1.0f ) );
- pcurVert->ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset;
- pcurVert->ScreenPosNDC.y = -screenNDC.y;
-
+ // From those screen positions, generate the vertex.
+ *pcurVert = DistortionMeshMakeVertex ( screenNDC, rightEye, hmdRenderInfo, distortion, eyeToSourceNDC );
pcurVert++;
}
}
- // Populate index buffer info
- UInt16 *pcurIndex = *ppTriangleListIndices;
+ // Populate index buffer info
+ uint16_t *pcurIndex = *ppTriangleListIndices;
for ( int triNum = 0; triNum < DMA_GridSize * DMA_GridSize; triNum++ )
{
@@ -982,23 +1020,23 @@ void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTri
// so linear interpolation works better & we can use fewer tris.
if ( ( x < DMA_GridSize/2 ) != ( y < DMA_GridSize/2 ) ) // != is logical XOR
{
- *pcurIndex++ = (UInt16)FirstVertex;
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
- *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex;
}
else
{
- *pcurIndex++ = (UInt16)FirstVertex;
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1);
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1);
}
}
}
@@ -1013,13 +1051,13 @@ static const int HMA_NumVertsPerEye = (HMA_GridSize+1)*(HMA_GridSize+1);
static const int HMA_NumTrisPerEye = (HMA_GridSize)*(HMA_GridSize)*2;
-void HeightmapMeshDestroy ( HeightmapMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices )
+void HeightmapMeshDestroy ( HeightmapMeshVertexData *pVertices, uint16_t *pTriangleMeshIndices )
{
OVR_FREE ( pVertices );
OVR_FREE ( pTriangleMeshIndices );
}
-void HeightmapMeshCreate ( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+void HeightmapMeshCreate ( HeightmapMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices,
int *pNumVertices, int *pNumTriangles,
const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo )
{
@@ -1037,7 +1075,7 @@ void HeightmapMeshCreate ( HeightmapMeshVertexData **ppVertices, UInt16 **ppTria
// Generate heightmap mesh for one eye.
-void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices,
int *pNumVertices, int *pNumTriangles, bool rightEye,
const HmdRenderInfo &hmdRenderInfo,
const ScaleAndOffset2D &eyeToSourceNDC )
@@ -1046,7 +1084,7 @@ void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTrian
*pNumTriangles = HMA_NumTrisPerEye;
*ppVertices = (HeightmapMeshVertexData*) OVR_ALLOC( sizeof(HeightmapMeshVertexData) * (*pNumVertices) );
- *ppTriangleListIndices = (UInt16*) OVR_ALLOC( sizeof(UInt16) * (*pNumTriangles) * 3 );
+ *ppTriangleListIndices = (uint16_t*) OVR_ALLOC( sizeof(uint16_t) * (*pNumTriangles) * 3 );
if (!*ppVertices || !*ppTriangleListIndices)
{
@@ -1066,14 +1104,7 @@ void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTrian
}
// Populate vertex buffer info
- float xOffset = 0.0f;
- float uOffset = 0.0f;
-
- if (rightEye)
- {
- xOffset = 1.0f;
- uOffset = 0.5f;
- }
+ // float xOffset = (rightEye ? 1.0f : 0.0f); Currently disabled because its usage is disabled below.
// First pass - build up raw vertex data.
HeightmapMeshVertexData* pcurVert = *ppVertices;
@@ -1087,7 +1118,7 @@ void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTrian
sourceCoordNDC.x = 2.0f * ( (float)x / (float)HMA_GridSize ) - 1.0f;
sourceCoordNDC.y = 2.0f * ( (float)y / (float)HMA_GridSize ) - 1.0f;
Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC );
-
+
pcurVert->TanEyeAngles = tanEyeAngle;
HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
@@ -1132,8 +1163,8 @@ void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTrian
}
- // Populate index buffer info
- UInt16 *pcurIndex = *ppTriangleListIndices;
+ // Populate index buffer info
+ uint16_t *pcurIndex = *ppTriangleListIndices;
for ( int triNum = 0; triNum < HMA_GridSize * HMA_GridSize; triNum++ )
{
@@ -1180,23 +1211,23 @@ void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTrian
// so linear interpolation works better & we can use fewer tris.
if ( ( x < HMA_GridSize/2 ) != ( y < HMA_GridSize/2 ) ) // != is logical XOR
{
- *pcurIndex++ = (UInt16)FirstVertex;
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
- *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex;
}
else
{
- *pcurIndex++ = (UInt16)FirstVertex;
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex;
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1);
- *pcurIndex++ = (UInt16)FirstVertex+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
- *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
+ *pcurIndex++ = (uint16_t)FirstVertex+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1;
+ *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1);
}
}
}
@@ -1272,10 +1303,10 @@ PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo,
return result;
}
-Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust )
+Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&hmdToEyeViewOffset )
{
- Matrix4f worldFromPredictedView = (eyeViewAdjust * predictedViewFromWorld).InvertedHomogeneousTransform();
- Matrix4f matRenderFromNowStart = (eyeViewAdjust * renderedViewFromWorld) * worldFromPredictedView;
+ Matrix4f worldFromPredictedView = (hmdToEyeViewOffset * predictedViewFromWorld).InvertedHomogeneousTransform();
+ Matrix4f matRenderFromNowStart = (hmdToEyeViewOffset * renderedViewFromWorld) * worldFromPredictedView;
// The sensor-predicted orientations have: X=right, Y=up, Z=backwards.
// The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards.
@@ -1306,22 +1337,35 @@ Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matri
return matRenderFromNowStart;
}
-Matrix4f TimewarpComputePoseDeltaPosition ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust )
+Matrix4f TimewarpComputePoseDeltaPosition ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&hmdToEyeViewOffset )
{
- Matrix4f worldFromPredictedView = (eyeViewAdjust * predictedViewFromWorld).InvertedHomogeneousTransform();
- Matrix4f matRenderXform = (eyeViewAdjust * renderedViewFromWorld) * worldFromPredictedView;
+ Matrix4f worldFromPredictedView = (hmdToEyeViewOffset * predictedViewFromWorld).InvertedHomogeneousTransform();
+ Matrix4f matRenderXform = (hmdToEyeViewOffset * renderedViewFromWorld) * worldFromPredictedView;
return matRenderXform.Inverted();
}
TimewarpMachine::TimewarpMachine()
-{
+ : VsyncEnabled(false),
+ RenderInfo(),
+ CurrentPredictionValues(),
+ DistortionTimeCount(0),
+ DistortionTimeCurrentStart(0.0),
+ //DistortionTimes[],
+ DistortionTimeAverage(0.f),
+ //EyeRenderPoses[],
+ LastFramePresentFlushTime(0.0),
+ PresentFlushToPresentFlushSeconds(0.f),
+ NextFramePresentFlushTime(0.0)
+{
+ #if defined(OVR_BUILD_DEBUG)
+ memset(DistortionTimes, 0, sizeof(DistortionTimes));
+ #endif
+
for ( int i = 0; i < 2; i++ )
{
- EyeRenderPoses[i] = Transformf();
+ EyeRenderPoses[i] = Posef();
}
- DistortionTimeCount = 0;
- VsyncEnabled = false;
}
void TimewarpMachine::Reset(HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow)
@@ -1338,21 +1382,36 @@ void TimewarpMachine::Reset(HmdRenderInfo& renderInfo, bool vsyncEnabled, double
void TimewarpMachine::AfterPresentAndFlush(double timeNow)
{
+ AfterPresentWithoutFlush();
+ AfterPresentFinishes ( timeNow );
+}
+
+void TimewarpMachine::AfterPresentWithoutFlush()
+{
+ // We've only issued the Present - it hasn't actually finished (i.e. appeared)
+ // But we need to estimate when the next Present will appear, so extrapolate from previous data.
+ NextFramePresentFlushTime = LastFramePresentFlushTime + 2.0 * (double)PresentFlushToPresentFlushSeconds;
+}
+
+void TimewarpMachine::AfterPresentFinishes(double timeNow)
+{
+ // The present has now actually happened.
PresentFlushToPresentFlushSeconds = (float)(timeNow - LastFramePresentFlushTime);
LastFramePresentFlushTime = timeNow;
NextFramePresentFlushTime = timeNow + (double)PresentFlushToPresentFlushSeconds;
}
+
+
double TimewarpMachine::GetViewRenderPredictionTime()
{
// Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us.
return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToRenderedScene;
}
-Transformf TimewarpMachine::GetViewRenderPredictionPose(SensorFusion &sfusion)
+bool TimewarpMachine::GetViewRenderPredictionPose(SensorStateReader* reader, Posef& pose)
{
- double predictionTime = GetViewRenderPredictionTime();
- return sfusion.GetPoseAtTime(predictionTime);
+ return reader->GetPoseAtTime(GetViewRenderPredictionTime(), pose);
}
double TimewarpMachine::GetVisiblePixelTimeStart()
@@ -1365,31 +1424,43 @@ double TimewarpMachine::GetVisiblePixelTimeEnd()
// Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us.
return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpEnd;
}
-Transformf TimewarpMachine::GetPredictedVisiblePixelPoseStart(SensorFusion &sfusion)
+bool TimewarpMachine::GetPredictedVisiblePixelPoseStart(SensorStateReader* reader, Posef& pose)
{
- double predictionTime = GetVisiblePixelTimeStart();
- return sfusion.GetPoseAtTime(predictionTime);
+ return reader->GetPoseAtTime(GetVisiblePixelTimeStart(), pose);
}
-Transformf TimewarpMachine::GetPredictedVisiblePixelPoseEnd (SensorFusion &sfusion)
+bool TimewarpMachine::GetPredictedVisiblePixelPoseEnd(SensorStateReader* reader, Posef& pose)
{
- double predictionTime = GetVisiblePixelTimeEnd();
- return sfusion.GetPoseAtTime(predictionTime);
+ return reader->GetPoseAtTime(GetVisiblePixelTimeEnd(), pose);
}
-Matrix4f TimewarpMachine::GetTimewarpDeltaStart(SensorFusion &sfusion, Transformf const &renderedPose)
+bool TimewarpMachine::GetTimewarpDeltaStart(SensorStateReader* reader, Posef const &renderedPose, Matrix4f& transform)
{
- Transformf visiblePose = GetPredictedVisiblePixelPoseStart ( sfusion );
+ Posef visiblePose;
+ if (!GetPredictedVisiblePixelPoseStart(reader, visiblePose))
+ {
+ return false;
+ }
+
Matrix4f visibleMatrix(visiblePose);
Matrix4f renderedMatrix(renderedPose);
Matrix4f identity; // doesn't matter for orientation-only timewarp
- return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+ transform = TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+
+ return true;
}
-Matrix4f TimewarpMachine::GetTimewarpDeltaEnd (SensorFusion &sfusion, Transformf const &renderedPose)
+bool TimewarpMachine::GetTimewarpDeltaEnd(SensorStateReader* reader, Posef const &renderedPose, Matrix4f& transform)
{
- Transformf visiblePose = GetPredictedVisiblePixelPoseEnd ( sfusion );
+ Posef visiblePose;
+ if (!GetPredictedVisiblePixelPoseEnd(reader, visiblePose))
+ {
+ return false;
+ }
+
Matrix4f visibleMatrix(visiblePose);
Matrix4f renderedMatrix(renderedPose);
Matrix4f identity; // doesn't matter for orientation-only timewarp
- return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+ transform = TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+
+ return true;
}
@@ -1402,19 +1473,30 @@ double TimewarpMachine::JustInTime_GetDistortionWaitUntilTime()
return LastFramePresentFlushTime;
}
- const float fudgeFactor = 0.002f; // Found heuristically - 1ms is too short because of timing granularity - may need further tweaking!
- float howLongBeforePresent = DistortionTimeAverage + fudgeFactor;
+ // Note - 1-2ms fudge factor (because Windows timer granularity etc) is NOT added here,
+ // because otherwise you end up adding multiple fudge factors!
+ // So it's left for the calling app to add just one fudge factor.
+
+ float howLongBeforePresent = DistortionTimeAverage;
// Subtlety here. Technically, the correct time is NextFramePresentFlushTime - howLongBeforePresent.
// However, if the app drops a frame, this then perpetuates it,
// i.e. if the display is running at 60fps, but the last frame was slow,
// (e.g. because of swapping or whatever), then NextFramePresentFlushTime is
- // 33ms in the future, not 16ms. Since this function supplies the
+ // 33ms in the future, not 16ms. Since this function supplies the
// time to wait until, the app will indeed wait until 32ms, so the framerate
// drops to 30fps and never comes back up!
// So we return the *ideal* framerate, not the *actual* framerate.
return LastFramePresentFlushTime + (float)( CurrentPredictionValues.PresentFlushToPresentFlush - howLongBeforePresent );
}
+double TimewarpMachine::JustInTime_AverageDistortionTime()
+{
+ if ( JustInTime_NeedDistortionTimeMeasurement() )
+ {
+ return 0.0;
+ }
+ return DistortionTimeAverage;
+}
bool TimewarpMachine::JustInTime_NeedDistortionTimeMeasurement() const
{