本文整理汇总了C++中PVRTVec3类的典型用法代码示例。如果您正苦于以下问题:C++ PVRTVec3类的具体用法?C++ PVRTVec3怎么用?C++ PVRTVec3使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PVRTVec3类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: glUseProgram
/*!****************************************************************************
@Function DrawBalloons
@Input psProgram Program to use
mProjection Projection matrix to use
mView View matrix to use
pmModels A pointer to an array of model matrices
iNum Number of balloons to draw
@Description Draws balloons.
******************************************************************************/
void OGLES2Glass::DrawBalloons(Program* psProgram, PVRTMat4 mProjection, PVRTMat4 mView, PVRTMat4* pmModels, int iNum) {
// Use shader program
glUseProgram(psProgram->uiId);
// Bind texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_uiBalloonTex);
PVRTMat4 mModelView, mMVP;
for (int i = 0; i < iNum; ++i)
{
mModelView = mView * pmModels[i];
mMVP = mProjection * mModelView;
glUniformMatrix4fv(psProgram->auiLoc[eMVMatrix], 1, GL_FALSE, mModelView.ptr());
glUniformMatrix4fv(psProgram->auiLoc[eMVPMatrix], 1, GL_FALSE, mMVP.ptr());
// Calculate and set the model space light direction
PVRTVec3 vLightDir = pmModels[i].inverse() * PVRTVec4(19, 22, -50, 0);
vLightDir = vLightDir.normalize();
glUniform3fv(psProgram->auiLoc[eLightDir], 1, vLightDir.ptr());
// Calculate and set the model space eye position
PVRTVec3 vEyePos = mModelView.inverse() * PVRTVec4(0.0f, 0.0f, 0.0f, 1.0f);
glUniform3fv(psProgram->auiLoc[eEyePos], 1, vEyePos.ptr());
// Now that the uniforms are set, call another function to actually draw the mesh.
DrawMesh(0, &m_Balloon, &m_puiBalloonVbo, &m_puiBalloonIndexVbo, 3);
}
}
示例2: FlyState
void BulletNode::FlyState()
{
mBulletPos += mDir * mSpeed;
mBulletPos.y -= mGravity;
mSpeed -= mFriction;
if (mHit)
{
mState = state_hit;
return;
}
if (mSpeed < 0.0f || mBulletPos.y < 0.0f)
{
mState = state_missed;
}
else
{
PVRTVec3 fromStart = mBulletPos - mBulletStartingPos;
float fromStartDist = fromStart.lenSqr();
if (fromStartDist > MM(mRange))
{
mState = state_missed;
}
}
}
示例3: eglSwapBuffers
/*!****************************************************************************
@Function RenderScene
@Return bool true if no error occured
@Description Main rendering loop function of the program. The shell will
call this function every frame.
eglSwapBuffers() will be performed by PVRShell automatically.
PVRShell will also manage important OS events.
Will also manage relevent OS events. The user has access to
these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2Fog::RenderScene()
{
// Clear the color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Keyboard input (cursor to change fog function)
if (PVRShellIsKeyPressed(PVRShellKeyNameLEFT))
{
m_eFogMode = EFogMode((m_eFogMode + eNumFogModes - 1) % eNumFogModes);
}
if (PVRShellIsKeyPressed(PVRShellKeyNameRIGHT))
{
m_eFogMode = EFogMode((m_eFogMode + 1) % eNumFogModes);
}
// Use the loaded shader program
glUseProgram(m_ShaderProgram.uiId);
// Bind texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_uiTexture);
// Set uniforms
glUniform1i(m_ShaderProgram.uiFogFuncLoc, m_eFogMode);
// Rotate and translate the model matrix
PVRTMat4 mModel = PVRTMat4::RotationY(m_fAngleY);
m_fAngleY += PVRT_PI / 90;
mModel.preTranslate(0, 0, 500 * cos(m_fPositionZ) - 450);
m_fPositionZ += (2*PVRT_PI)*0.0008f;
// Feed Projection and Model View matrices to the shaders
PVRTMat4 mModelView = m_mView * mModel;
PVRTMat4 mMVP = m_mProjection * mModelView;
glUniformMatrix4fv(m_ShaderProgram.uiModelViewLoc, 1, GL_FALSE, mModelView.ptr());
glUniformMatrix4fv(m_ShaderProgram.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.ptr());
// Pass the light direction transformed with the inverse of the ModelView matrix
// This saves the transformation of the normals per vertex. A simple dot3 between this direction
// and the un-transformed normal will allow proper smooth shading.
PVRTVec3 vMsLightDir = (PVRTMat3(mModel).inverse() * PVRTVec3(1, 1, 1)).normalized();
glUniform3fv(m_ShaderProgram.uiLightDirLoc, 1, vMsLightDir.ptr());
/*
Now that the model-view matrix is set and the materials ready,
call another function to actually draw the mesh.
*/
DrawMesh(0);
// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
m_Print3D.DisplayDefaultTitle("Fog", "", ePVRTPrint3DLogoIMG);
m_Print3D.Print3D(0.3f, 7.5f, 0.75f, PVRTRGBA(255,255,255,255), "Fog Mode: %s", g_FogFunctionList[m_eFogMode]);
m_Print3D.Flush();
return true;
}
示例4: RenderStatue
// ---------------------------------------------------------------
void MyPVRDemo::RenderStatue(const PVRTMat4& mxModel, const PVRTMat4& mxCam, const PVRTVec3& vLightPos, const StatueShader* pShader)
{
PVRTMat4 mxModelView = mxCam * mxModel;
PVRTMat4 mxMVP = m_mxProjection * mxModelView;
PVRTVec3 vLightPosModel = vLightPos; // Light position in World space
glUniform3fv(pShader->uiLightPos, 1, vLightPosModel.ptr());
glUniformMatrix4fv(pShader->uiMVP, 1, GL_FALSE, mxMVP.ptr());
glUniformMatrix4fv(pShader->uiModelView, 1, GL_FALSE, mxModelView.ptr());
DrawMesh(enumMODEL_Statue, FLAG_VRT | FLAG_TEX0 | FLAG_NRM | FLAG_TAN);
}
示例5: f2vt
void SimpleCamera::updatePosition()
{
// Most of this stuff is to try and smooth movement when controlled by the primitive keyboard input available
PVRTVec3 vDec = m_vVelocity * f2vt(TimeController::inst().getDeltaTime()) * m_fMoveSpeed * f2vt(0.1f);
while(vDec.lenSqr()>m_vVelocity.lenSqr())
{
vDec /= f2vt(2.0f);
}
m_vVelocity -= vDec;
if(m_vVelocity.lenSqr()>m_fMoveSpeed*m_fMoveSpeed)
{
m_vVelocity = m_vVelocity.normalized()*m_fMoveSpeed;
}
m_vPosition += m_vVelocity * f2vt((float)TimeController::inst().getDeltaTime());
}
示例6: eglSwapBuffers
/*!****************************************************************************
@Function RenderScene
@Return bool true if no error occured
@Description Main rendering loop function of the program. The shell will
call this function every frame.
eglSwapBuffers() will be performed by PVRShell automatically.
PVRShell will also manage important OS events.
Will also manage relevent OS events. The user has access to
these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2FastTnL::RenderScene()
{
// Clear the color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use shader program
glUseProgram(m_ShaderProgram.uiId);
// Bind texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_uiTexture);
/*
Now that the uniforms are set, call another function to actually draw the mesh.
*/
DrawMesh(0);
// Rotate the model matrix
PVRTMat4 mModel = PVRTMat4::RotationY(m_fAngleY);
m_fAngleY += 0.02f;
// Calculate model view projection matrix
PVRTMat4 mMVP = m_mViewProj * mModel;
// Feeds Projection Model View matrix to the shaders
glUniformMatrix4fv(m_ShaderProgram.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.ptr());
/*
The inverse of a rotation matrix is the transposed matrix
Because of v * M = transpose(M) * v, this means:
v * R == inverse(R) * v
So we don't have to actually invert or transpose the matrix
to transform back from world space to model space
*/
PVRTVec3 vMsLightDir = (PVRTVec3(1, 1, 1) * PVRTMat3(mModel)).normalized();
glUniform3fv(m_ShaderProgram.uiLightDirLoc, 1, vMsLightDir.ptr());
// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
m_Print3D.DisplayDefaultTitle("FastTnL", "", ePVRTPrint3DLogoIMG);
m_Print3D.Flush();
return true;
}
示例7: eglSwapBuffers
/*!****************************************************************************
@Function RenderScene
@Return bool true if no error occured
@Description Main rendering loop function of the program. The shell will
call this function every frame.
eglSwapBuffers() will be performed by PVRShell automatically.
PVRShell will also manage important OS events.
Will also manage relevent OS events. The user has access to
these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES3CellShading::RenderScene()
{
// Clears the color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use the loaded shader program
glUseProgram(m_ShaderProgram.uiId);
// Bind textures
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_uiShadingTex);
// Calculate the model matrix
PVRTMat4 mModel = PVRTMat4::RotationY(m_fAngleY);
m_fAngleY += PVRT_PI / 210;
// Set model view projection matrix
PVRTMat4 mMVP = m_mViewProj * mModel;
glUniformMatrix4fv(m_ShaderProgram.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.ptr());
// Set eye position in model space
PVRTVec4 vMsEyePos = PVRTVec4(0, 0, 125, 1) * mModel;
glUniform3fv(m_ShaderProgram.uiEyePosLoc, 1, vMsEyePos.ptr());
// transform directional light from world space to model space
PVRTVec3 vMsLightDir = PVRTVec3(PVRTVec4(1, 2, 1, 0) * mModel).normalized();
glUniform3fv(m_ShaderProgram.uiLightDirLoc, 1, vMsLightDir.ptr());
DrawMesh(0);
// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
m_Print3D.DisplayDefaultTitle("CellShading", "", ePVRTPrint3DSDKLogo);
m_Print3D.Flush();
return true;
}
示例8: vecA
void SceneManager::Update()
{
RenderLayerManager & renderManager = RenderLayerManager::GetRenderLayerManager();
const PVRTVec3 center = renderManager.GetCenter();
float occlusionRadius = renderManager.GetOcclusionRadius();
PVRTVec4 vecA( mLookMtx->f[12], 0.0f, mLookMtx->f[14], 1);
PVRTVec4 vecB( GLOBAL_SCALE * FRUSTUM_W, 0.0f, GLOBAL_SCALE * FRUSTUM_D, 1);
PVRTVec4 vecC( GLOBAL_SCALE * -FRUSTUM_W, 0.0f, GLOBAL_SCALE * FRUSTUM_D, 1);
vecB = *mLookMtx * vecB;
vecC = *mLookMtx * vecC;
PVRTVec2 A(vecA.x, vecA.z);
PVRTVec2 B(vecB.x, vecB.z);
PVRTVec2 C(vecC.x, vecC.z);
mToApplyCount = 0;
if (mQuadTree)
{
static QuadNode * quadNodes[256]={0};
int quadNodeCount = 0;
//mQuadTree->GetQuads(center.x, center.z, occlusionRadius, quadNodes, quadNodeCount);
mQuadTree->GetQuadsCameraFrustum(quadNodes, quadNodeCount, mLookMtx);
quadNodeCount--;
bool useFrustumCulling = true; //!!!!!!!!!!!!!!!!!!!!!
for (int quad = quadNodeCount ; quad >=0 ; quad--)
{
QuadNode * pQuadNode = quadNodes[quad];
List & dataList = pQuadNode->GetDataList();
ListIterator listIter(dataList);
while( Node * pRootNode = (Node*)listIter.GetPtr() )
{
if (!pRootNode->IsVisible())
continue;
//pRootNode->UpdateWithoutChildren();
bool useOcclusionRadius = pRootNode->GetUseOcclusionCulling();
PVRTVec3 worldPos = pRootNode->GetWorldTranslation();
if (!useFrustumCulling && useOcclusionRadius)
{
PVRTVec3 distVec = worldPos - center;
if ( distVec.lenSqr() < MM(occlusionRadius) )
{
pRootNode->SetInFrustum(true);
pRootNode->Update();
mToApply[mToApplyCount] = pRootNode;
mToApplyCount++;
}
else
{
pRootNode->SetInFrustum(false);
}
}
else if (useFrustumCulling)
{
PVRTVec2 P(worldPos.x, worldPos.z);
PVRTVec2 v0 = C - A;
PVRTVec2 v1 = B - A;
PVRTVec2 v2 = P - A;
// Compute dot products
float dot00 = v0.dot(v0);
float dot01 = v0.dot(v1);
float dot02 = v0.dot(v2);
float dot11 = v1.dot(v1);
float dot12 = v1.dot(v2);
// Compute barycentric coordinates
float invDenom = 1.0f / (dot00 * dot11 - dot01 * dot01);
float u = (dot11 * dot02 - dot01 * dot12) * invDenom;
float v = (dot00 * dot12 - dot01 * dot02) * invDenom;
bool addToList = false;
// Check if point is in triangle
//PVRTVec3 distVec = worldPos - center;
//if ( distVec.lenSqr() < MM(occlusionRadius) )
{
if ( (u > 0) && (v > 0) && (u + v < 1))
{
addToList = true;
}
else if ( Collision::CircleTriangleEdgeIntersection(A,B,P, pRootNode->GetRadius() ) )
{
addToList = true;
}
else if ( Collision::CircleTriangleEdgeIntersection(A,C,P, pRootNode->GetRadius() ))
{
addToList = true;
//.........这里部分代码省略.........
示例9: eglSwapBuffers
/*!****************************************************************************
@Function RenderScene
@Return bool true if no error occured
@Description Main rendering loop function of the program. The shell will
call this function every frame.
eglSwapBuffers() will be performed by PVRShell automatically.
PVRShell will also manage important OS events.
Will also manage relevent OS events. The user has access to
these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2AnisotropicLighting::RenderScene()
{
// Clear the color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Keyboard input (cursor to change render mode)
if (PVRShellIsKeyPressed(PVRShellKeyNameLEFT))
{
m_eRenderMode = ERenderMode((m_eRenderMode + eNumRenderModes - 1) % eNumRenderModes);
}
if (PVRShellIsKeyPressed(PVRShellKeyNameRIGHT))
{
m_eRenderMode = ERenderMode((m_eRenderMode + 1) % eNumRenderModes);
}
// Rotate the model matrix
PVRTMat4 mModel = PVRTMat4::RotationY(m_fAngleY);
m_fAngleY += 0.02f;
// Calculate model view projection matrix
PVRTMat4 mMVP = m_mViewProj * mModel;
if (m_eRenderMode == eTexLookup)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_uiTexture);
glUseProgram(m_FastShader.uiId);
glUniformMatrix4fv(m_FastShader.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.ptr());
/*
The inverse of a rotation matrix is the transposed matrix
Because of v * M = transpose(M) * v, this means:
v * R == inverse(R) * v
So we don't have to actually invert or transpose the matrix
to transform back from world space to model space
*/
PVRTVec3 vMsEyePos = PVRTVec3(PVRTVec4(0, 0, 150, 1) * mModel);
glUniform3fv(m_FastShader.uiMsEyePosLoc, 1, vMsEyePos.ptr());
PVRTVec3 vMsLightDir = PVRTVec3(PVRTVec4(1, 1, 1, 1) * mModel).normalized();
glUniform3fv(m_FastShader.uiMsLightDirLoc, 1, vMsLightDir.ptr());
}
else
{
glUseProgram(m_SlowShader.uiId);
glUniformMatrix4fv(m_SlowShader.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.ptr());
PVRTVec3 vMsEyeDir = PVRTVec3(PVRTVec4(0, 0, 150, 1) * mModel).normalized();
glUniform3fv(m_SlowShader.uiMsEyeDirLoc, 1, vMsEyeDir.ptr());
PVRTVec3 vMsLightDir = PVRTVec3(PVRTVec4(1, 1, 1, 1) * mModel).normalized();
glUniform3fv(m_SlowShader.uiMsLightDirLoc, 1, vMsLightDir.ptr());
}
/*
Now that the uniforms are set, call another function to actually draw the mesh.
*/
DrawMesh(0);
// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
m_Print3D.DisplayDefaultTitle("AnisotropicLighting", "", ePVRTPrint3DLogoIMG);
m_Print3D.Print3D(0.3f, 7.5f, 0.75f, PVRTRGBA(255,255,255,255), c_aszRenderModes[m_eRenderMode]);
m_Print3D.Flush();
return true;
}
示例10: InitView
/*!****************************************************************************
@Function InitView
@Return bool true if no error occured
@Description Code in InitView() will be called by PVRShell upon
initialization or after a change in the rendering context.
Used to initialize variables that are dependant on the rendering
context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES2AnisotropicLighting::InitView()
{
CPVRTString ErrorStr;
/*
Initialize VBO data
*/
LoadVbos();
/*
Load textures
*/
if (!LoadTextures(&ErrorStr))
{
PVRShellSet(prefExitMessage, ErrorStr.c_str());
return false;
}
/*
Load and compile the shaders & link programs
*/
if (!LoadShaders(&ErrorStr))
{
PVRShellSet(prefExitMessage, ErrorStr.c_str());
return false;
}
// Is the screen rotated?
bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);
/*
Initialize Print3D
*/
if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
{
PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
return false;
}
/*
Calculate the projection and view matrices
*/
float fAspect = PVRShellGet(prefWidth) / (float)PVRShellGet(prefHeight);
m_mViewProj = PVRTMat4::PerspectiveFovFloatDepthRH(CAM_FOV, fAspect, CAM_NEAR, PVRTMat4::OGL, bRotate);
m_mViewProj *= PVRTMat4::LookAtRH(PVRTVec3(0.f, 0.f, 150.f), PVRTVec3(0.f), PVRTVec3(0.f, 1.f, 0.f));
/*
Set uniforms that are constant throughout this training course
*/
// Set the sampler2D variable to the first texture unit
glUseProgram(m_FastShader.uiId);
glUniform1i(glGetUniformLocation(m_FastShader.uiId, "sTexture"), 0);
// Define material properties
glUseProgram(m_SlowShader.uiId);
float afMaterial[4] = {
0.4f, // Diffuse intensity scale
0.6f, // Diffuse intensity bias
0.82f, // Specular intensity scale
0.0f, // Specular bias
};
glUniform4fv(glGetUniformLocation(m_SlowShader.uiId, "Material"), 1, afMaterial);
// Set surface grain direction
PVRTVec3 vMsGrainDir = PVRTVec3(2, 1, 0).normalized();
glUniform3fv(glGetUniformLocation(m_SlowShader.uiId, "GrainDir"), 1, vMsGrainDir.ptr());
/*
Set OpenGL ES render states needed for this training course
*/
// Enable backface culling and depth test
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
// Enable z-buffer test
// We are using a projection matrix optimized for a floating point depth buffer,
// so the depth test and clear value need to be inverted (1 becomes near, 0 becomes far).
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_GEQUAL);
glClearDepthf(0.0f);
// Use a nice bright blue as clear colour
glClearColor(0.6f, 0.8f, 1.0f, 1.0f);
m_fAngleY = 0;
m_eRenderMode = eTexLookup;
return true;
}
示例11: PVRShellOutputDebug
//==========================================================================================================================================
bool OGLES2Water::GenerateNormalisationCubeMap(int uiTextureSize)
{
// variables
float fOffset = 0.5f;
float fHalfSize = uiTextureSize *0.5f;
PVRTVec3 vTemp;
unsigned char* pByte;
unsigned char* pData = new unsigned char[uiTextureSize*uiTextureSize*3];
if(!pData)
{
PVRShellOutputDebug("Unable to allocate memory for texture data for cube map\n");
return false;
}
// Positive X
pByte = pData;
for(int j = 0; j < uiTextureSize; ++j)
{
for(int i = 0; i < uiTextureSize; ++i)
{
vTemp.x = fHalfSize;
vTemp.y = -(j + fOffset - fHalfSize);
vTemp.z = -(i + fOffset - fHalfSize);
// normalize, pack 0 to 1 here, and normalize again
vTemp = vTemp.normalize() *0.5 + 0.5;
pByte[0] = (unsigned char)(vTemp.x * 255);
pByte[1] = (unsigned char)(vTemp.y * 255);
pByte[2] = (unsigned char)(vTemp.z * 255);
pByte += 3;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGB /*GL_RGBA8*/, uiTextureSize, uiTextureSize, 0, GL_RGB, GL_UNSIGNED_BYTE, pData);
// Negative X
pByte = pData;
for(int j = 0; j < uiTextureSize; ++j)
{
for(int i = 0; i < uiTextureSize; ++i)
{
vTemp.x = -fHalfSize;
vTemp.y = -(j + fOffset - fHalfSize);
vTemp.z = (i + fOffset - fHalfSize);
// normalize, pack 0 to 1 here, and normalize again
vTemp = vTemp.normalize() *0.5 + 0.5;
pByte[0] = (unsigned char)(vTemp.x * 255);
pByte[1] = (unsigned char)(vTemp.y * 255);
pByte[2] = (unsigned char)(vTemp.z * 255);
pByte += 3;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGB /*GL_RGBA8*/, uiTextureSize, uiTextureSize, 0, GL_RGB, GL_UNSIGNED_BYTE, pData);
// Positive Y
pByte = pData;
for(int j = 0; j < uiTextureSize; ++j)
{
for(int i = 0; i < uiTextureSize; ++i)
{
vTemp.x = i + fOffset - fHalfSize;
vTemp.y = fHalfSize;
vTemp.z = j + fOffset - fHalfSize;
// normalize, pack 0 to 1 here, and normalize again
vTemp = vTemp.normalize() *0.5 + 0.5;
pByte[0] = (unsigned char)(vTemp.x * 255);
pByte[1] = (unsigned char)(vTemp.y * 255);
pByte[2] = (unsigned char)(vTemp.z * 255);
pByte += 3;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGB /*GL_RGBA8*/, uiTextureSize, uiTextureSize, 0, GL_RGB, GL_UNSIGNED_BYTE, pData);
// Negative Y
pByte = pData;
for(int j = 0; j < uiTextureSize; ++j)
{
for(int i = 0; i < uiTextureSize; ++i)
{
vTemp.x = i + fOffset - fHalfSize;
vTemp.y = -fHalfSize;
vTemp.z = -(j + fOffset - fHalfSize);
// normalize, pack 0 to 1 here, and normalize again
vTemp = vTemp.normalize() *0.5 + 0.5;
pByte[0] = (unsigned char)(vTemp.x * 255);
//.........这里部分代码省略.........
示例12: function
void ParametricSurface::ComputeVertexAndNormals(PFUNCTION function, float dMinU, float dMaxU, float dMinV, float dMaxV)
{
int nVertex = nSampleU * nSampleV;
pVertex = new float[nVertex*3];
pNormal = new float[nVertex*3];
pUV = new float[nVertex*2];
fMinU = dMinU;
fMaxU = dMaxU;
fMinV = dMinV;
fMaxV = dMaxV;
for (int i=0; i<nSampleU; i++)
{
for (int j=0; j<nSampleV; j++)
{
float u = fMinU + i * (fMaxU-fMinU) / (float)(nSampleU-1);
float v = fMinV + j * (fMaxV-fMinV) / (float)(nSampleV-1);
float x,y,z;
function(u,v, &x,&y,&z);
pVertex[(j*nSampleU+i)*3 + 0] = x;
pVertex[(j*nSampleU+i)*3 + 1] = y;
pVertex[(j*nSampleU+i)*3 + 2] = z;
}
}
for (int i=0; i<nSampleU; i++)
{
for (int j=0; j<nSampleV; j++)
{
pUV[ (j*nSampleU+i)*2 + 0 ] = (float)i / (float)(nSampleU-1);
pUV[ (j*nSampleU+i)*2 + 1 ] = (float)j / (float)(nSampleV-1);
}
}
for (int i=0; i<nSampleU-1; i++)
{
for (int j=0; j<nSampleV-1; j++)
{
PVRTVec3 ptA = PVRTVec3(pVertex[(j*nSampleU+i)*3+0],pVertex[(j*nSampleU+i)*3+1],pVertex[(j*nSampleU+i)*3+2]);
PVRTVec3 ptB = PVRTVec3(pVertex[(j*nSampleU+i+1)*3+0],pVertex[(j*nSampleU+i+1)*3+1],pVertex[(j*nSampleU+i+1)*3+2]);
PVRTVec3 ptC = PVRTVec3(pVertex[((j+1)*nSampleU+i)*3+0],pVertex[((j+1)*nSampleU+i)*3+1],pVertex[((j+1)*nSampleU+i)*3+2]);
PVRTVec3 AB = PVRTVec3(ptB.x-ptA.x, ptB.y-ptA.y, ptB.z-ptA.z);
PVRTVec3 AC = PVRTVec3(ptC.x-ptA.x, ptC.y-ptA.y, ptC.z-ptA.z);
PVRTVec3 normal;
normal = AB.cross(AC);
normal.normalize();
pNormal[(j*nSampleU+i)*3 + 0] = -normal.x;
pNormal[(j*nSampleU+i)*3 + 1] = -normal.y;
pNormal[(j*nSampleU+i)*3 + 2] = -normal.z;
}
}
for (int i=0; i<nSampleU-1; i++)
{
pNormal[((nSampleV-1)*nSampleU+i)*3+0] = pNormal[(i)*3+0];
pNormal[((nSampleV-1)*nSampleU+i)*3+1] = pNormal[(i)*3+1];
pNormal[((nSampleV-1)*nSampleU+i)*3+2] = pNormal[(i)*3+2];
}
for (int j=0; j<nSampleV-1; j++)
{
pNormal[(j*nSampleU+nSampleU-1)*3+0] = pNormal[(j*nSampleU)*3+0];
pNormal[(j*nSampleU+nSampleU-1)*3+1] = pNormal[(j*nSampleU)*3+1];
pNormal[(j*nSampleU+nSampleU-1)*3+2] = pNormal[(j*nSampleU)*3+2];
}
pNormal[((nSampleV-1)*nSampleU + (nSampleU-1))*3+0]= pNormal[((nSampleV-2)*nSampleU + (nSampleU-2))*3+0];
pNormal[((nSampleV-1)*nSampleU + (nSampleU-1))*3+1]= pNormal[((nSampleV-2)*nSampleU + (nSampleU-2))*3+1];
pNormal[((nSampleV-1)*nSampleU + (nSampleU-1))*3+2]= pNormal[((nSampleV-2)*nSampleU + (nSampleU-2))*3+2];
// Insert generated data into vertex buffer objects.
glBindBuffer(GL_ARRAY_BUFFER, iVertexVBO);
glBufferData(GL_ARRAY_BUFFER, nVertex * 3 * sizeof (float), pVertex, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, iUvVBO);
glBufferData(GL_ARRAY_BUFFER, nVertex * 2 * sizeof (float), pUV, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, iNormalVBO);
glBufferData(GL_ARRAY_BUFFER, nVertex * 3 * sizeof (float), pNormal, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0); // Unbind the last buffer used.
delete[] pVertex;
delete[] pNormal;
delete[] pUV;
}
示例13: PVRTMat3
/*!****************************************************************************
@Function UpdateFurShells
@Description Update the fur shells. This is only called when the number of
shells change.
******************************************************************************/
void OGLESFur::UpdateFurShells()
{
PVRTVec3 *pvSrcN, *pvSrcV;
PVRTVec3 vTransNorm;
PVRTVec4 vTransPos;
SVertex *pvData;
int i;
unsigned int j;
float fDepth, *pUV;
int i32MeshIndex = m_Scene.pNode[eDuckBody].nIdx;
SPODMesh* pMesh = &m_Scene.pMesh[i32MeshIndex];
PVRTMat4 mModel;
PVRTMat3 mModel3;
m_Scene.GetWorldMatrix(mModel, m_Scene.pNode[eDuckBody]);
mModel3 = PVRTMat3(mModel);
pvData = new SVertex[pMesh->nNumVertex];
if(!pvData)
return;
for(i = 0; i < m_i32FurShellNo; ++i)
{
fDepth = (c_fFurDepth * (float)(i+1) / (float)m_i32FurShellNo);
for(j = 0; j < pMesh->nNumVertex; ++j)
{
pvSrcN = (PVRTVec3*) (pMesh->pInterleaved + (size_t) pMesh->sNormals.pData + (j * pMesh->sNormals.nStride));
pvSrcV = (PVRTVec3*) (pMesh->pInterleaved + (size_t) pMesh->sVertex.pData + (j * pMesh->sVertex.nStride));
pUV = (float*) (pMesh->pInterleaved + (size_t) pMesh->psUVW[0].pData + (j * pMesh->psUVW[0].nStride));
// Transform the vertex position so it is in world space
PVRTVec4 vPos4 = PVRTVec4(*pvSrcV, 1.0f);
PVRTTransform(&vTransPos, &vPos4, &mModel);
// Transform the vertex normal so it is in world space
vTransNorm.x = mModel.f[0] * pvSrcN->x + mModel.f[4] * pvSrcN->y + mModel.f[8] * pvSrcN->z;
vTransNorm.y = mModel.f[1] * pvSrcN->x + mModel.f[5] * pvSrcN->y + mModel.f[9] * pvSrcN->z;
vTransNorm.z = mModel.f[2] * pvSrcN->x + mModel.f[6] * pvSrcN->y + mModel.f[10]* pvSrcN->z;
vTransNorm.normalize();
pvData[j].x = vTransPos.x + (vTransNorm.x * fDepth);
pvData[j].y = vTransPos.y + (vTransNorm.y * fDepth);
pvData[j].z = vTransPos.z + (vTransNorm.z * fDepth);
pvData[j].nx = vTransNorm.x;
pvData[j].ny = vTransNorm.y;
pvData[j].nz = vTransNorm.z;
pvData[j].tu = pUV[0];
pvData[j].tv = pUV[1];
}
glBindBuffer(GL_ARRAY_BUFFER, m_uiShellVbo[i]);
unsigned int uiSize = pMesh->nNumVertex * sizeof(SVertex);
glBufferData(GL_ARRAY_BUFFER, uiSize, pvData, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
delete[] pvData;
}
示例14: eglSwapBuffers
/*!****************************************************************************
@Function RenderScene
@Return bool true if no error occured
@Description Main rendering loop function of the program. The shell will
call this function every frame.
eglSwapBuffers() will be performed by PVRShell automatically.
PVRShell will also manage important OS events.
Will also manage relevent OS events. The user has access to
these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2ChameleonMan::RenderScene()
{
// Clear the color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use shader program
glUseProgram(m_SkinnedShaderProgram.uiId);
if(PVRShellIsKeyPressed(PVRShellKeyNameACTION1))
{
m_bEnableDOT3 = !m_bEnableDOT3;
glUniform1i(m_SkinnedShaderProgram.auiLoc[ebUseDot3], m_bEnableDOT3);
}
/*
Calculates the frame number to animate in a time-based manner.
Uses the shell function PVRShellGetTime() to get the time in milliseconds.
*/
unsigned long iTime = PVRShellGetTime();
if(iTime > m_iTimePrev)
{
float fDelta = (float) (iTime - m_iTimePrev);
m_fFrame += fDelta * g_fDemoFrameRate;
// Increment the counters to make sure our animation works
m_fLightPos += fDelta * 0.0034f;
m_fWallPos += fDelta * 0.00027f;
m_fBackgroundPos += fDelta * -0.000027f;
// Wrap the Animation back to the Start
if(m_fLightPos >= PVRT_TWO_PI)
m_fLightPos -= PVRT_TWO_PI;
if(m_fWallPos >= PVRT_TWO_PI)
m_fWallPos -= PVRT_TWO_PI;
if(m_fBackgroundPos <= 0)
m_fBackgroundPos += 1.0f;
if(m_fFrame > m_Scene.nNumFrame - 1)
m_fFrame = 0;
}
m_iTimePrev = iTime;
// Set the scene animation to the current frame
m_Scene.SetFrame(m_fFrame);
// Set up camera
PVRTVec3 vFrom, vTo, vUp(0.0f, 1.0f, 0.0f);
PVRTMat4 mView, mProjection;
PVRTVec3 LightPos;
float fFOV;
int i;
bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);
// Get the camera position, target and field of view (fov)
if(m_Scene.pCamera[0].nIdxTarget != -1) // Does the camera have a target?
fFOV = m_Scene.GetCameraPos( vFrom, vTo, 0); // vTo is taken from the target node
else
fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, 0); // vTo is calculated from the rotation
fFOV *= bRotate ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) : (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth);
/*
We can build the model view matrix from the camera position, target and an up vector.
For this we use PVRTMat4::LookAtRH().
*/
mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);
// Calculate the projection matrix
mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);
// Update Light Position and related VGP Program constant
LightPos.x = 200.0f;
LightPos.y = 350.0f;
LightPos.z = 200.0f * PVRTABS(sin((PVRT_PI / 4.0f) + m_fLightPos));
glUniform3fv(m_SkinnedShaderProgram.auiLoc[eLightPos], 1, LightPos.ptr());
// Set up the View * Projection Matrix
PVRTMat4 mViewProjection;
mViewProjection = mProjection * mView;
glUniformMatrix4fv(m_SkinnedShaderProgram.auiLoc[eViewProj], 1, GL_FALSE, mViewProjection.ptr());
// Enable the vertex attribute arrays
for(i = 0; i < eNumAttribs; ++i) glEnableVertexAttribArray(i);
//.........这里部分代码省略.........
示例15: ReleaseView
/*!****************************************************************************
@Function ReleaseView
@Return bool true if no error occured
@Description Code in ReleaseView() will be called by PVRShell when the
application quits or before a change in the rendering context.
******************************************************************************/
bool OGLES3EdgeDetection::ReleaseView()
{
// Delete the color texture
glDeleteTextures(1, &m_uiColorTexture);
// Delete the depth render buffer
glDeleteRenderbuffers(1, &m_uiDepthRenderbuffer);
// delete shader program , and shaders
glDeleteProgram(m_PreShader.uiId);
glDeleteShader(m_uiPreVertShader);
glDeleteShader(m_uiPreFragShader);
for (int i=0; i<eNumPostShaders; ++i)
{
glDeleteProgram(m_PostShaders[i].uiId);
glDeleteShader(m_uiPostVertShaders[i]);
glDeleteShader(m_uiPostFragShaders[i]);
}
// Delete the stored color data.
delete [] m_pvColorData->ptr();
m_pvColorData=NULL;
// Release Print3D Textures
m_Print3D.ReleaseTextures();
// Delete frame buffer objects
glDeleteFramebuffers(1, &m_uiFramebufferObject);
return true;
}