当前位置: 首页>>代码示例>>C++>>正文


C++ CPVRTModelPOD::GetCameraPos方法代码示例

本文整理汇总了C++中CPVRTModelPOD::GetCameraPos方法的典型用法代码示例。如果您正苦于以下问题:C++ CPVRTModelPOD::GetCameraPos方法的具体用法?C++ CPVRTModelPOD::GetCameraPos怎么用?C++ CPVRTModelPOD::GetCameraPos使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在CPVRTModelPOD的用法示例。


在下文中一共展示了CPVRTModelPOD::GetCameraPos方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1:

/*!****************************************************************************
 @Function		SetupView()
 @Return		N/A
 @Description	Sets up the view matrices required for the training course
******************************************************************************/
void OGLES3EdgeDetection::SetupView(bool bRotate)
{
	PVRTVec3 vEyePos, vLookAt, vCamUp=PVRTVec3(0.00f, 1.0001f, 0.00f);

	// Checks if a camera is in the scene, if there is, uses it, otherwise creates one.
	if(m_Scene.nNumCamera>0)
	{
		// vLookAt is taken from the target node, or..
		if(m_Scene.pCamera[0].nIdxTarget != -1) m_Scene.GetCameraPos(vEyePos, vLookAt, 0);
		// ..it is calculated from the rotation
		else m_Scene.GetCamera(vEyePos, vLookAt, vCamUp, 0);
	}
	else
	{
		//Creates a camera if none exist.
		vEyePos = PVRTVec3(0, 0, 200);
		vLookAt = PVRTVec3(0, 0, 0);
	}

	// Set the view and projection matrix for rendering to texture.
	m_mR2TView = PVRTMat4::LookAtRH(vEyePos, vLookAt, vCamUp);
	m_mR2TProjection = PVRTMat4::PerspectiveFovRH(PVRT_PI*0.125, (float)m_i32TexWidth/(float)m_i32TexHeight, g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	// The textured quad this program renders to will be rendered full screen, in orthographic mode, so doesn't need camera variables to be set.
}
开发者ID:joyfish,项目名称:GameThirdPartyLibs,代码行数:30,代码来源:OGLES3EdgeDetection.cpp

示例2: PVRShellGetTime

/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occured
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevent OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES3TextureStreaming::RenderScene()
{
	// Clears the color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Time based animation and locks the app to 60 FPS.
	// Uses the shell function PVRShellGetTime() to get the time in milliseconds.
	unsigned long ulTime = PVRShellGetTime();
	unsigned long ulDeltaTime = ulTime - m_ulTimePrev;
	m_ulTimePrev = ulTime;
	m_fFrame      += (float)ulDeltaTime * (60.0f/1000.0f);
	m_fBandScroll += (float)ulDeltaTime * (60.0f/1000.0f) * c_fBandScrollSpeed;
	if(m_fFrame > m_Scene.nNumFrame - 1)
		m_fFrame = 0.0f;

	if(m_fBandScroll > 1.0f)
		m_fBandScroll = -c_fBandWidth;

	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	m_Scene.SetFrame(m_fFrame);

	// Setup the main camera
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + c_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, c_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, c_ui32Camera); // vTo is calculated from the rotation

    float fTargetAspect = 960.0f/640.0f;
    float fAspect       = (float)PVRShellGet(prefWidth) / (float)PVRShellGet(prefHeight);
    fFOV               *= fTargetAspect / fAspect;

	PVRTMat4 mView           = PVRTMat4::LookAtRH(vFrom, vTo, vUp);
	PVRTMat4 mProjection     = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), c_fCameraNear,
														  c_fCameraFar, PVRTMat4::OGL, bRotate);
	PVRTMat4 mViewProjection = mProjection * mView;

	DrawPODScene(mViewProjection);

	// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
	m_Print3D.DisplayDefaultTitle("Texture Streaming", c_pszDescription, ePVRTPrint3DSDKLogo);
	m_Print3D.Flush();

	++m_i32Frame;
	return true;
}
开发者ID:joyfish,项目名称:GameThirdPartyLibs,代码行数:63,代码来源:OGLES3TextureStreaming.cpp

示例3: PVRShellGetTime

/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occured
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevent OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES3IntroducingPOD::RenderScene()
{
	// Clear the color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Use shader program
	glUseProgram(m_ShaderProgram.uiId);

	/*
		Calculates the frame number to animate in a time-based manner.
		Uses the shell function PVRShellGetTime() to get the time in milliseconds.
	*/
	unsigned long ulTime = PVRShellGetTime();

	if(m_ulTimePrev > ulTime)
		m_ulTimePrev = ulTime;

	unsigned long ulDeltaTime = ulTime - m_ulTimePrev;
	m_ulTimePrev	= ulTime;
	m_fFrame += (float)ulDeltaTime * g_fDemoFrameRate;
	if (m_fFrame > m_Scene.nNumFrame - 1) m_fFrame = 0;

	// Sets the scene animation to this frame
	m_Scene.SetFrame(m_fFrame);

	/*
		Get the direction of the first light from the scene.
	*/
	PVRTVec4 vLightDirection;
	vLightDirection = m_Scene.GetLightDirection(0);
	// For direction vectors, w should be 0
	vLightDirection.w = 0.0f;

	/*
		Set up the view and projection matrices from the camera
	*/
	PVRTMat4 mView, mProjection;
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Setup the camera

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	// We can build the model view matrix from the camera position, target and an up vector.
	// For this we use PVRTMat4::LookAtRH()
	mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);
	mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	/*
		A scene is composed of nodes. There are 3 types of nodes:
		- MeshNodes :
			references a mesh in the pMesh[].
			These nodes are at the beginning of the pNode[] array.
			And there are nNumMeshNode number of them.
			This way the .pod format can instantiate several times the same mesh
			with different attributes.
		- lights
		- cameras
		To draw a scene, you must go through all the MeshNodes and draw the referenced meshes.
	*/
	for (unsigned int i = 0; i < m_Scene.nNumMeshNode; ++i)
	{
		SPODNode& Node = m_Scene.pNode[i];

		// Get the node model matrix
		PVRTMat4 mWorld;
		mWorld = m_Scene.GetWorldMatrix(Node);

		// Pass the model-view-projection matrix (MVP) to the shader to transform the vertices
		PVRTMat4 mModelView, mMVP;
		mModelView = mView * mWorld;
		mMVP = mProjection * mModelView;
		glUniformMatrix4fv(m_ShaderProgram.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.f);

		// Pass the light direction in model space to the shader
		PVRTVec4 vLightDir;
		vLightDir = mWorld.inverse() * vLightDirection;

		PVRTVec3 vLightDirModel = *(PVRTVec3*)&vLightDir;
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例4: RenderScene

/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occured
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevent OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLESIntroducingPFX::RenderScene()
{
	// Clears the color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Use the loaded effect
	m_pEffect->Activate();

	/*
		Calculates the frame number to animate in a time-based manner.
		Uses the shell function PVRShellGetTime() to get the time in milliseconds.
	*/
	int iTime = PVRShellGetTime();
	int iDeltaTime = iTime - m_iTimePrev;
	m_iTimePrev	= iTime;
	m_fFrame	+= (float)iDeltaTime * DEMO_FRAME_RATE;
	if (m_fFrame > m_Scene.nNumFrame-1)
		m_fFrame = 0;

	// Sets the scene animation to this frame
	m_Scene.SetFrame(m_fFrame);

	{
		PVRTVec3	vFrom, vTo, vUp;
		VERTTYPE	fFOV;
		vUp.x = 0.0f;
		vUp.y = 1.0f;
		vUp.z = 0.0f;

		// We can get the camera position, target and field of view (fov) with GetCameraPos()
		fFOV = m_Scene.GetCameraPos(vFrom, vTo, 0) * 0.4f;

		/*
			We can build the world view matrix from the camera position, target and an up vector.
			For this we use PVRTMat4LookAtRH().
		*/
		m_mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

		// Calculates the projection matrix
		bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);
		m_mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), CAM_NEAR, CAM_FAR, PVRTMat4::OGL, bRotate);
	}

	/*
		A scene is composed of nodes. There are 3 types of nodes:
		- MeshNodes :
			references a mesh in the pMesh[].
			These nodes are at the beginning of the pNode[] array.
			And there are nNumMeshNode number of them.
			This way the .pod format can instantiate several times the same mesh
			with different attributes.
		- lights
		- cameras
		To draw a scene, you must go through all the MeshNodes and draw the referenced meshes.
	*/
	for (int i=0; i<(int)m_Scene.nNumMeshNode; i++)
	{
		SPODNode* pNode = &m_Scene.pNode[i];

		// Gets pMesh referenced by the pNode
		SPODMesh* pMesh = &m_Scene.pMesh[pNode->nIdx];

		glBindBuffer(GL_ARRAY_BUFFER, m_aiVboID[i]);

		// Gets the node model matrix
		PVRTMat4 mWorld;
		mWorld = m_Scene.GetWorldMatrix(*pNode);

		PVRTMat4 mWorldView;
		mWorldView = m_mView * mWorld;

		for(unsigned int j = 0; j < m_nUniformCnt; ++j)
		{
			switch(m_psUniforms[j].nSemantic)
			{
			case eUsPOSITION:
				{
					glVertexAttribPointer(m_psUniforms[j].nLocation, 3, GL_FLOAT, GL_FALSE, pMesh->sVertex.nStride, pMesh->sVertex.pData);
					glEnableVertexAttribArray(m_psUniforms[j].nLocation);
				}
				break;
			case eUsNORMAL:
				{
					glVertexAttribPointer(m_psUniforms[j].nLocation, 3, GL_FLOAT, GL_FALSE, pMesh->sNormals.nStride, pMesh->sNormals.pData);
					glEnableVertexAttribArray(m_psUniforms[j].nLocation);
				}
				break;
			case eUsUV:
				{
					glVertexAttribPointer(m_psUniforms[j].nLocation, 2, GL_FLOAT, GL_FALSE, pMesh->psUVW[0].nStride, pMesh->psUVW[0].pData);
//.........这里部分代码省略.........
开发者ID:anonymousjustice,项目名称:pvr-pi,代码行数:101,代码来源:OGLES2IntroducingPFX.cpp

示例5: DoAnimation

/*!****************************************************************************
 @Function		DoAnimation
 @Description	Calculate the duck and camera animation as well as the cloud and
				water.
******************************************************************************/
void OGLESFur::DoAnimation()
{
	PVRTMat4		mCamera, mTmp;
	PVRTVec3		pvPlane[5];
	unsigned long	ui32Time;
	float			fDeltaTime;
	int i;

	if(!m_bPause)
	{
		ui32Time = PVRShellGetTime();

		fDeltaTime = (float) (ui32Time < m_ui32PrevTime ? m_ui32PrevTime - ui32Time : ui32Time - m_ui32PrevTime) + 0.1f;

		if(fDeltaTime > 50.0f) // Cap delta time
			fDeltaTime = 50.0f;

		m_ui32PrevTime = ui32Time;

		m_fCameraRot += 0.00006f * fDeltaTime;
		fDeltaTime = fDeltaTime * 0.001f;
	}
	else
		fDeltaTime = 0.0f;

	if(m_bViewMode)
	{
		// Viewing duck alone
		mCamera = PVRTMat4::Translation(0, 0, 160.0f);
		mTmp = PVRTMat4::RotationX(0.35f * (float) sin(0.0003f * m_ui32PrevTime) + 0.2f);
		mCamera = mTmp * mCamera;
		mTmp = PVRTMat4::RotationY(m_fCameraRot);
		mCamera = mTmp * mCamera;
		mTmp = PVRTMat4::Translation(m_mDuckWorld.f[12], m_mDuckWorld.f[13], m_mDuckWorld.f[14]);
		mCamera = mTmp * mCamera;

		m_vCamFrom.x += fDeltaTime * (mCamera.f[12] - m_vCamFrom.x);
		m_vCamFrom.y += fDeltaTime * (mCamera.f[13] - m_vCamFrom.y);
		m_vCamFrom.z += fDeltaTime * (mCamera.f[14] - m_vCamFrom.z);

		m_vCamTo.x	+= fDeltaTime * (m_mDuckWorld.f[12] - m_vCamTo.x);
		m_vCamTo.y	+= fDeltaTime * (m_mDuckWorld.f[13] + 25.0f - m_vCamTo.y);
		m_vCamTo.z	+= fDeltaTime * (m_mDuckWorld.f[14] - m_vCamTo.z);

		// Build view matrix
		m_mView = PVRTMat4::LookAtRH(m_vCamFrom, m_vCamTo, c_vUp);
	}
	else
	{
		//	Viewing duck in a wee river
		m_fDuckRot -= 0.1f * fDeltaTime;

		// Duck world transform
		m_mDuckWorld = PVRTMat4::Translation(140.0f, 0, 0);

		mTmp = PVRTMat4::RotationY(m_fDuckRot);
		m_mDuckWorld = mTmp * m_mDuckWorld;

		PVRTVec3	vFrom, vTo;

		// We can get the camera position, target with GetCameraPos()
		m_Scene.GetCameraPos(vFrom, vTo, 0);

		// Position camera
		mCamera = PVRTMat4::Translation(vFrom.x, vFrom.y, vFrom.z);

		mTmp = PVRTMat4::RotationY(m_fCameraRot);
		mCamera = mTmp * mCamera;

		m_vCamFrom.x += fDeltaTime * (mCamera.f[12] - m_vCamFrom.x);
		m_vCamFrom.y += fDeltaTime * (mCamera.f[13] - m_vCamFrom.y);
		m_vCamFrom.z += fDeltaTime * (mCamera.f[14] - m_vCamFrom.z);

		m_vCamTo.x	+= fDeltaTime * (2.0f * (m_mDuckWorld.f[12] - m_vCamTo.x));
		m_vCamTo.y	+= fDeltaTime * (2.0f * (m_mDuckWorld.f[13] + 25.0f - m_vCamTo.y));
		m_vCamTo.z	+= fDeltaTime * (2.0f * (m_mDuckWorld.f[14] - m_vCamTo.z));

		// Build view matrix
		m_mView = PVRTMat4::LookAtRH(m_vCamFrom, m_vCamTo, c_vUp);

		// Calc ViewProjInv matrix
		mTmp = m_mProj * m_mView;
		mTmp = mTmp.inverseEx();

		// Calculate the ground plane
		m_i32WaterPlaneNo = PVRTMiscCalculateInfinitePlane(&pvPlane->x, sizeof(*pvPlane), &c_vPlaneWater, &mTmp, &m_vCamFrom, g_fFar);

		for(i = 0; i < m_i32WaterPlaneNo; ++i)
		{
			m_pvPlaneWater[i].x		= pvPlane[i].x;
			m_pvPlaneWater[i].y		= pvPlane[i].y;
			m_pvPlaneWater[i].z		= pvPlane[i].z;
			m_pvPlaneWater[i].nx	= c_vPlaneWater.x;
			m_pvPlaneWater[i].ny	= c_vPlaneWater.y;
			m_pvPlaneWater[i].nz	= c_vPlaneWater.z;
//.........这里部分代码省略.........
开发者ID:deepbansal15,项目名称:Native_SDK,代码行数:101,代码来源:OGLESFur.cpp

示例6: InitView

/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occured
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependant on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLESFur::InitView()
{
	// Setup the projection matrix
	glMatrixMode(GL_PROJECTION);

	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	m_mProj = PVRTMat4::PerspectiveFovRH(g_fFOV, (float)PVRShellGet(prefWidth) / (float)PVRShellGet(prefHeight), g_fNear, g_fFar, PVRTMat4::OGL, bRotate);
	glLoadMatrixf(m_mProj.f);

	// Set clear colour
	glClearColor(c_vFogColour.x, c_vFogColour.y, c_vFogColour.z, c_vFogColour.w);

	// Enable Smooth Color Shading
	glShadeModel(GL_SMOOTH);

	// Enable the depth buffer
	glEnable(GL_DEPTH_TEST);

	// Load fur data
	glGenBuffers(g_ui32MaxNoOfFurShells, m_uiShellVbo);
	UpdateFurShells();

	// Initialise 3D text
	if(m_Print3D.SetTextures(NULL, PVRShellGet(prefWidth), PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
		return false;

	// Load textures
	CPVRTString ErrorStr;

	if(!LoadTextures(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	// Create VBOs
	if(!LoadVbos())
	{
		PVRShellSet(prefExitMessage, "Failed to create VBOs");
		return false;
	}

	// Initialise camera
	m_Scene.GetCameraPos(m_vCamFrom, m_vCamTo, 0);

	m_vCamFrom = PVRTVec3(0.0f, 400.0f, 0.0f);

	// Enable fog
	glFogf(GL_FOG_MODE, GL_EXP2);
	glFogf(GL_FOG_DENSITY, c_fFogDensity);
	glFogfv(GL_FOG_COLOR, &c_vFogColour.x);
	glEnable(GL_FOG);

	// Enable lighting
	glLightfv(GL_LIGHT0, GL_POSITION, &c_vLightPosition.x);
	glLightfv(GL_LIGHT0, GL_DIFFUSE, &c_vLightColour.x);
	glLightfv(GL_LIGHT0, GL_AMBIENT, &c_vLightAmbient.x);
	glLightfv(GL_LIGHT0, GL_SPECULAR, &c_vLightColour.x);
	glEnable(GL_LIGHT0);
	glEnable(GL_LIGHTING);

	// Disable culling
	glDisable(GL_CULL_FACE);

	// Initialise time
	m_ui32PrevTime = PVRShellGetTime();
	return true;
}
开发者ID:deepbansal15,项目名称:Native_SDK,代码行数:77,代码来源:OGLESFur.cpp

示例7: InitView


//.........这里部分代码省略.........
	if(!LoadShaders(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Initialize Print3D
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	//Set OpenGL ES render states needed for this demo

	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);

	glEnable(GL_DEPTH_TEST);

	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

	// Find the largest square power of two texture that fits into the viewport
	m_i32TexSize = 1;
	int iSize = PVRT_MIN(PVRShellGet(prefWidth), PVRShellGet(prefHeight));
	while (m_i32TexSize * 2 < iSize) m_i32TexSize *= 2;

	// Get the currently bound frame buffer object. On most platforms this just gives 0.
	glGetIntegerv(GL_FRAMEBUFFER_BINDING, &m_i32OriginalFB);

	for(int i = 0; i < 2; ++i)
	{
		// Create texture for the FBO
		glGenTextures(1, &m_uiTexture[i]);
		glBindTexture(GL_TEXTURE_2D, m_uiTexture[i]);
		glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_i32TexSize, m_i32TexSize, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, 0);

		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

		// Create FBO
		glGenFramebuffers(1, &m_uiFbo[i]);
		glBindFramebuffer(GL_FRAMEBUFFER, m_uiFbo[i]);
		glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_uiTexture[i], 0);

		glGenRenderbuffers(1, &m_uiDepthBuffer[i]);
		glBindRenderbuffer(GL_RENDERBUFFER, m_uiDepthBuffer[i]);

		glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, m_i32TexSize, m_i32TexSize);
		glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_uiDepthBuffer[i]);

        // Check that our FBO creation was successful
        GLuint uStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);

        if(uStatus != GL_FRAMEBUFFER_COMPLETE)
        {
            m_bFBOsCreated = false;
            PVRShellOutputDebug("ERROR: Failed to initialise FBO");
            break;
        }

		// Clear the colour buffer for this FBO
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	}

	glBindFramebuffer(GL_FRAMEBUFFER, m_i32OriginalFB);

	// Setup the main camera
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	m_View = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	PVRTMat4 mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);
	m_ViewProjection = mProjection * m_View;

	// Check to see if the GL_EXT_discard_framebuffer extension is supported
    if(m_bFBOsCreated && (m_bDiscard = CPVRTgles2Ext::IsGLExtensionSupported("GL_EXT_discard_framebuffer")) != false)
	{
		m_Extensions.LoadExtensions();
		m_bDiscard = m_Extensions.glDiscardFramebufferEXT != 0;
	}

	return true;
}
开发者ID:klokik,项目名称:Examples,代码行数:101,代码来源:OGLES2FilmTV.cpp

示例8: if


//.........这里部分代码省略.........

	for(unsigned int i = 0; i < m_Scene.nNumMaterial; ++i)
	{
		m_puiTextures[i] = 0;
		SPODMaterial* pMaterial = &m_Scene.pMaterial[i];

		if (!strcmp(pMaterial->pszName, "background"))
		{
			m_puiTextures[i] = m_uiBackgroundTex;
		}
		else if (!strcmp(pMaterial->pszName, "rust"))
		{
			m_puiTextures[i] = m_uiRustTex;
		}
	}

	// Go through the object type and find out how many shadows we are going to need
	m_ui32NoOfShadows = 0;

	for (int i = 0; i < eNumMeshes; ++i)
	{
		if(m_i32ObjectType[i] != eDoesntCast) ++m_ui32NoOfShadows;
	}

	// Build the shadow volumes and meshes

	// Create the number of shadow meshes and volumes we require
	m_pShadowMesh = new PVRTShadowVolShadowMesh[m_ui32NoOfShadows];
	m_pShadowVol  = new PVRTShadowVolShadowVol [m_ui32NoOfShadows];

	// Create the array that stores the SPODNode ID for each shadow
	m_pui32MeshIndex = new unsigned int[m_ui32NoOfShadows];

	// Go through the meshes and initialise the shadow meshes, volumes and mesh index for each requried shadow
	int i32Index = 0;
	for (int i = 0; i < eNumMeshes; ++i)
	{
		if(m_i32ObjectType[i] != eDoesntCast)
		{
			m_pui32MeshIndex[i32Index] = i;

			SPODNode* pNode = &m_Scene.pNode[i];

			/*
				This function will take the POD mesh referenced by the current node and generate a
				new mesh suitable for creating shadow volumes and the shadow volume itself.
			*/
			BuildShadowVolume(&m_pShadowMesh[i32Index], &m_pShadowVol[i32Index], &m_Scene.pMesh[pNode->nIdx]);

			/*
				The function will initialise the shadow volume with regard to the meshes current transformation
				and the light position.

				As the light position is fixed this is only done once for static objects where as dynamic objects
				are updated every frame.
			*/
			BuildVolume(i32Index, &m_vLightPosWorld);

			++i32Index;
		}
	}

	// Is the screen rotated?
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	/*
		Initialize Print3D
	*/
	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	// Calculate the view matrix
	PVRTVec3	vFrom, vTo;
	float fFOV;

	// We can get the camera position, target and field of view (fov) with GetCamera()
	fFOV = m_Scene.GetCameraPos( vFrom, vTo, 0);
	m_mView = PVRTMat4::LookAtRH(vFrom, vTo, PVRTVec3(0, 1, 0));

	// Calculate the projection matrix
	m_mProjection = PVRTMat4::PerspectiveFovRH(fFOV,  (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), CAM_NEAR, CAM_FAR, PVRTMat4::OGL, bRotate);

	/*
		Set OpenGL ES render states needed for this training course
	*/
	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);
	glEnable(GL_DEPTH_TEST);

	// Use a nice bright blue as clear colour
	glClearColor(0.6f, 0.8f, 1.0f, 1.0f);
	glClearStencil(0);

	m_ulTimePrev = PVRShellGetTime();
	return true;
}
开发者ID:,项目名称:,代码行数:101,代码来源:

示例9: PVRShellGetTime

/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occured
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevent OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2ChameleonMan::RenderScene()
{
	// Clear the color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Use shader program
	glUseProgram(m_SkinnedShaderProgram.uiId);

	if(PVRShellIsKeyPressed(PVRShellKeyNameACTION1))
	{
		m_bEnableDOT3 = !m_bEnableDOT3;
		glUniform1i(m_SkinnedShaderProgram.auiLoc[ebUseDot3], m_bEnableDOT3);
	}

	/*
		Calculates the frame number to animate in a time-based manner.
		Uses the shell function PVRShellGetTime() to get the time in milliseconds.
	*/
	unsigned long iTime = PVRShellGetTime();

	if(iTime > m_iTimePrev)
	{
		float fDelta = (float) (iTime - m_iTimePrev);
		m_fFrame += fDelta * g_fDemoFrameRate;

		// Increment the counters to make sure our animation works
		m_fLightPos	+= fDelta * 0.0034f;
		m_fWallPos	+= fDelta * 0.00027f;
		m_fBackgroundPos += fDelta * -0.000027f;

		// Wrap the Animation back to the Start
		if(m_fLightPos >= PVRT_TWO_PI)
			m_fLightPos -= PVRT_TWO_PI;

		if(m_fWallPos >= PVRT_TWO_PI)
			m_fWallPos -= PVRT_TWO_PI;

		if(m_fBackgroundPos <= 0)
			m_fBackgroundPos += 1.0f;

		if(m_fFrame > m_Scene.nNumFrame - 1)
			m_fFrame = 0;
	}

	m_iTimePrev	= iTime;

	// Set the scene animation to the current frame
	m_Scene.SetFrame(m_fFrame);

	// Set up camera
	PVRTVec3	vFrom, vTo, vUp(0.0f, 1.0f, 0.0f);
	PVRTMat4 mView, mProjection;
	PVRTVec3	LightPos;
	float fFOV;
	int i;

	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[0].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, 0); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, 0); // vTo is calculated from the rotation

	fFOV *= bRotate ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) : (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth);

	/*
		We can build the model view matrix from the camera position, target and an up vector.
		For this we use PVRTMat4::LookAtRH().
	*/
	mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	mProjection = PVRTMat4::PerspectiveFovRH(fFOV,  (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	// Update Light Position and related VGP Program constant
	LightPos.x = 200.0f;
	LightPos.y = 350.0f;
	LightPos.z = 200.0f * PVRTABS(sin((PVRT_PI / 4.0f) + m_fLightPos));

	glUniform3fv(m_SkinnedShaderProgram.auiLoc[eLightPos], 1, LightPos.ptr());

	// Set up the View * Projection Matrix
	PVRTMat4 mViewProjection;

	mViewProjection = mProjection * mView;
	glUniformMatrix4fv(m_SkinnedShaderProgram.auiLoc[eViewProj], 1, GL_FALSE, mViewProjection.ptr());

	// Enable the vertex attribute arrays
	for(i = 0; i < eNumAttribs; ++i) glEnableVertexAttribArray(i);
//.........这里部分代码省略.........
开发者ID:Abraham2591,项目名称:Swiftshader,代码行数:101,代码来源:OGLES2ChameleonMan.cpp

示例10: glClear

/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occurred
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevant OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES3PhantomMask::RenderScene()
{
    if(PVRShellIsKeyPressed(PVRShellKeyNameACTION1))
        m_bEnableSH = !m_bEnableSH;

    // Clear the colour and depth buffer
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Draw the background
    m_Background.Draw(m_ui32TexBackground);

    // Enable culling
    glEnable(GL_CULL_FACE);

    // Enable depth testing
    glEnable(GL_DEPTH_TEST);

    // Use shader program
    GLuint ProgramID, MVPLoc, ModelLoc;

    if(m_bEnableSH)
    {
        ProgramID = m_SHShaderProgram.uiId;
        MVPLoc	  = m_SHShaderProgram.auiLoc[eSHMVPMatrix];
        ModelLoc  = m_SHShaderProgram.auiLoc[eSHModel];
    }
    else
    {
        ProgramID = m_DiffuseShaderProgram.uiId;
        MVPLoc	  = m_DiffuseShaderProgram.auiLoc[eDifMVPMatrix];
        ModelLoc  = m_DiffuseShaderProgram.auiLoc[eDifModel];
    }

    glUseProgram(ProgramID);

    /*
    	Calculates the frame number to animate in a time-based manner.
    	Uses the shell function PVRShellGetTime() to get the time in milliseconds.
    */
    unsigned long ulTime = PVRShellGetTime();

    if(ulTime > m_ulTimePrev)
    {
        unsigned long ulDeltaTime = ulTime - m_ulTimePrev;
        m_fFrame += (float)ulDeltaTime * g_fDemoFrameRate;

        if(m_fFrame > m_Scene.nNumFrame - 1)
            m_fFrame = 0;

        // Sets the scene animation to this frame
        m_Scene.SetFrame(m_fFrame);
    }

    m_ulTimePrev = ulTime;

    /*
    	Set up the view and projection matrices from the camera
    */
    PVRTMat4 mView, mProjection;
    PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
    float fFOV;

    // Setup the camera
    bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

    // Camera nodes are after the mesh and light nodes in the array
    int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

    // Get the camera position, target and field of view (fov)
    if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
        fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
    else
        fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

    fFOV *= bRotate ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) : (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth);

    // We can build the model view matrix from the camera position, target and an up vector.
    // For this we usePVRTMat4LookAtRH()
    mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

    // Calculate the projection matrix
    mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

    SPODNode& Node = m_Scene.pNode[0];

    // Get the node model matrix
    PVRTMat4 mWorld;
    mWorld = m_Scene.GetWorldMatrix(Node);

    // Set the model inverse transpose matrix
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例11: InitView

/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occurred
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependant on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES3MagicLantern::InitView()
{
	CPVRTString ErrorStr;

	// At this point m_Scene should have been already processed by InitApplication()
	// and all the POD data properly loaded, but lets do a little test just in case.
	 if (!m_Scene.IsLoaded())
	{
		PVRShellSet(prefExitMessage, "ERROR: POD file has not been loaded correctly. Cannot continue. \n");
		return false;
	}

	// Initialize VBO data
	if(!LoadVbos())
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Load and compile the shaders, link programs and load textures.
	if(!LoadPFX())
	{
		return false;
	}

	// Initialize Print3D
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);
	glEnable(GL_DEPTH_TEST);

	// Black as clear colour
	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

	// Disable blending
	glDisable(GL_BLEND);
	
	// Set up the view and projection matrices from the camera.
	// The camera does not moves so these matrices only need to be 
	// calculated once.
	// If you want to make the camera dynamic, re-calculate the view matrix 
	// every frame.
	PVRTVec3 vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Camera nodes are after the mesh and light nodes in the array.
	// We grab camera num 0 (the only one in the scene)
	const int g_ui32Camera = 0;
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position and target 
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node.
	else
		m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation.

	// Calculate the FOV depending of the screen dimensions so everything fit in view
	// regardless whether the screen is rotated or not.
	// if the aspect ratio is different than 640x480 adapt FOV so the scene still looks correct.
	float fRatioWoverH = (480.0f/640.0f) * ((!bRotate) ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) :  (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth));
	
	fFOV = m_Scene.pCamera[i32CamID].fFOV / fRatioWoverH;

	// We can build the model view matrix from the camera position, target and an up vector.
	m_mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix.
	m_mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), 	m_Scene.pCamera[i32CamID].fNear, m_Scene.pCamera[i32CamID].fFar, PVRTMat4::OGL, bRotate);

	return true;
}
开发者ID:klokik,项目名称:Examples,代码行数:87,代码来源:OGLES3MagicLantern.cpp

示例12: InitView

/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occurred
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependent on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES3DisplacementMap::InitView()
{
	CPVRTString ErrorStr;

	/*
		Initialize VBO data
	*/
	if(!LoadVbos(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Load textures
	*/
	if(!LoadTextures(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Load and compile the shaders & link programs
	*/
	if(!LoadShaders(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Initialize Print3D
	*/
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	/*
		Set OpenGL ES render states needed for this training course
	*/
	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);

	glEnable(GL_DEPTH_TEST);

	// Use a nice bright blue as clear colour
	glClearColor(0.6f, 0.8f, 1.0f, 1.0f);

	//Get the direction of the first light from the scene.
	m_LightDir = m_Scene.GetLightDirection(0);

	// For direction vectors, w should be 0
	m_LightDir.w = 0.0f;


	//	Set up the view and projection matrices from the camera
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Setup the camera

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	// We can build the model view matrix from the camera position, target and an up vector.
	// For this we usePVRTMat4LookAtRH()
	m_View = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	m_Projection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	// Initialize variables used for the animation
	m_ulTimePrev = PVRShellGetTime();

	return true;
}
开发者ID:,项目名称:,代码行数:96,代码来源:

示例13: InitView

/*******************************************************************************
 * Function Name  : InitView
 * Inputs		  : uWidth, uHeight
 * Returns        : true if no error occured
 * Description    : Code in InitView() will be called by the Shell upon a change
 *					in the rendering context.
 *					Used to initialize variables that are dependant on the rendering
 *					context (e.g. textures, vertex buffers, etc.)
 *******************************************************************************/
bool OGLESPolybump::InitView()
{
	CPVRTString  ErrorStr;
	SPVRTContext sContext;

	// Is the screen rotated?
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	// Initialise Print3D textures
	if(m_Print3D.SetTextures(&sContext, PVRShellGet(prefWidth), PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "Error: Failed to initialise Print3D\n");
		return false;
	}

	// Load textures
	if(!LoadTextures(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Initialise VBO data
	LoadVbos();

	// Retrieve OpenGL ES driver version
	int		i32OGLESVersionMajor = 1, i32OGLESVersionMinor = 0;
	char	*pszVersionNumber;

	pszVersionNumber = (char*) strchr((const char *)glGetString(GL_VERSION), '.');

	if(pszVersionNumber)
	{
		i32OGLESVersionMajor = pszVersionNumber[-1] - '0';
		i32OGLESVersionMinor = pszVersionNumber[+1] - '0';
	}

	// Check for support of required extensions
	if(i32OGLESVersionMajor > 1 || (i32OGLESVersionMajor == 1 && i32OGLESVersionMinor > 0))
	{
		m_bCombinersPresent = true;
	}
	else
	{
        m_bCombinersPresent = CPVRTglesExt::IsGLExtensionSupported("GL_ARB_texture_env_combine");

		if(!m_bCombinersPresent)
		{
			m_bIMGTextureFFExtPresent = CPVRTglesExt::IsGLExtensionSupported("GL_IMG_texture_env_enhanced_fixed_function");

			if(!m_bIMGTextureFFExtPresent)
			{
				PVRShellSet(prefExitMessage, "Error: Can't run this demo without support for GL_ARB_texture_env_combine or GL_IMG_texture_env_enhanced_fixed_function.\n");
				return false;
			}
		}
	}

	// Calculates the projection matrix
	m_mProjection = PVRTMat4::PerspectiveFovRH(30.0f*(3.14f/180.0f), (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), 10.0f, 8000.0f, PVRTMat4::OGL, bRotate);

	glMatrixMode(GL_PROJECTION);
	glLoadMatrixf(m_mProjection.f);

	// Set up the view matrix from the camera's position, taget and up vector using PVRTMatrixLookAtRH.
	PVRTVec3	vFrom, vTo, vUp(0.0f,1.0f,0.0f);

	// We can get the camera position, target with GetCameraPos()
	m_Scene.GetCameraPos( vFrom, vTo, 0);
	m_mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// setup clear colour
	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

	// Enable states
	glActiveTexture(GL_TEXTURE0);
	glEnable(GL_TEXTURE_2D);

	glEnable(GL_DEPTH_TEST);
	glEnable(GL_CULL_FACE);
	return true;
}
开发者ID:joyfish,项目名称:GameThirdPartyLibs,代码行数:91,代码来源:OGLESPolyBump.cpp


注:本文中的CPVRTModelPOD::GetCameraPos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。