本文整理汇总了C++中qcar::State::getNumTrackableResults方法的典型用法代码示例。如果您正苦于以下问题:C++ State::getNumTrackableResults方法的具体用法?C++ State::getNumTrackableResults怎么用?C++ State::getNumTrackableResults使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类qcar::State
的用法示例。
在下文中一共展示了State::getNumTrackableResults方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: glClear
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_Dominoes_DominoesRenderer_renderFrame(JNIEnv* , jobject)
{
// Clear the color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
// to no to hide
std::vector<const char*> found;
// Did we find any trackables this frame?
if (state.getNumTrackableResults() > 0) {
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); ++tIdx) {
// Get the first trackable
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = trackableResult->getTrackable();
found.push_back(trackable.getName());
// The image target specific result:
assert(trackableResult->getType() == QCAR::TrackableResult::IMAGE_TARGET_RESULT);
const QCAR::ImageTargetResult* imageTargetResult =
static_cast<const QCAR::ImageTargetResult*>(trackableResult);
// If this is our first time seeing the target, display a tip
if (!displayedMessage) {
displayMessage("Find marker man!");
displayedMessage = true;
}
//const QCAR::TrackerManager& trackerManager = QCAR::TrackerManager::getInstance();
//const QCAR::Tracker* tracker = trackerManager.getTracker(QCAR::Tracker::IMAGE_TRACKER);
const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::Vec2F cameraPoint = QCAR::Tool::projectPoint(cameraCalibration, trackableResult->getPose(), QCAR::Vec3F(0,0,0));
QCAR::Vec2F xyPoint = cameraPointToScreenPoint(cameraPoint);
showTrackerButton(xyPoint.data[0], xyPoint.data[1], trackable.getName());
}
} else {
hideTrackerButton(found);
}
QCAR::Renderer::getInstance().end();
}
示例2: glClear
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_ImageTargets_ImageTargetsRenderer_renderFrame(JNIEnv *, jobject)
{
//LOG("Java_com_qualcomm_QCARSamples_ImageTargets_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
#ifdef USE_OPENGL_ES_1_1
// Set GL11 flags:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
#endif
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::Vec2F cameraPoint = QCAR::Tool::projectPoint(cameraCalibration, result->getPose(), QCAR::Vec3F(0,0,0));
QCAR::Vec2F xyPoint = cameraPointToScreenPoint(cameraPoint);
// LOG("xyPoint %f, %f ", xyPoint.data[0], xyPoint.data[1] );
if (xyPoint.data[1] > (screenHeight / 2) + tolerancy || xyPoint.data[1] < (screenHeight / 2) - tolerancy) {
continue;
}
// Choose the texture based on the target name:
int textureIndex = 0;
// LOG("texture = %s", trackable->getName());
// LOG("textureCount %d", textureCount);
char trackJpg[strlen(trackable.getName()) + 4];
strcpy(trackJpg, trackable.getName());
strcat(trackJpg, ".jpg");
// LOG("trackJpg %s", trackJpg);
char trackPng[strlen(trackable.getName()) + 4];
strcpy(trackPng, trackable.getName());
strcat(trackPng, ".png");
// LOG("trackPng %s", trackPng);
for(int i = 0; i < textureCount; i++) {
// LOG("textures[i]->mName %s", textures[i]->mName);
if (strcmp(textures[i]->mName, trackPng) == 0 ||
strcmp(textures[i]->mName, trackJpg) == 0) {
textureIndex = i;
}
}
const Texture* const thisTexture = textures[textureIndex];
// LOG("thisTexture->mName %s", textures[textureIndex]->mName);
#ifdef USE_OPENGL_ES_1_1
// Load projection matrix:
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix.data);
// Load model view matrix:
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelViewMatrix.data);
glTranslatef(0.f, 0.f, kObjectScale);
glScalef(kObjectScale, kObjectScale, kObjectScale);
// Draw object:
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &teapotNormals[0]);
glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &teapotIndices[0]);
#else
/*
QCAR::Matrix44F modelViewProjection;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0] ,
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
//.........这里部分代码省略.........
示例3: glClear
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv *, jobject)
{
//LOG("Java_com_qualcomm_QCARSamples_VideoPlayback_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
for (int i=0; i<NUM_TARGETS; i++)
{
isTracking[i] = false;
targetPositiveDimensions[i].data[0] = 0.0;
targetPositiveDimensions[i].data[1] = 0.0;
}
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
const QCAR::ImageTarget& imageTarget = (const QCAR::ImageTarget&) trackableResult->getTrackable();
int currentTarget;
// We store the modelview matrix to be used later by the tap calculation
if (strcmp(imageTarget.getName(), "stones") == 0)
currentTarget=STONES;
else
currentTarget=CHIPS;
modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
isTracking[currentTarget] = true;
targetPositiveDimensions[currentTarget] = imageTarget.getSize();
// The pose delivers the center of the target, thus the dimensions
// go from -width/2 to width/2, same for height
targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
targetPositiveDimensions[currentTarget].data[1] /= 2.0f;
// If the movie is ready to start playing or it has reached the end
// of playback we render the keyframe
if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
(currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
{
QCAR::Matrix44F modelViewMatrixKeyframe =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
QCAR::Matrix44F modelViewProjectionKeyframe;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
// Here we use the aspect ratio of the keyframe since it
// is likely that it is not a perfect square
float ratio=1.0;
if (textures[currentTarget]->mSuccess)
ratio = keyframeQuadAspectRatio[currentTarget];
else
ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];
SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
targetPositiveDimensions[currentTarget].data[0]*ratio,
targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixKeyframe.data[0] ,
&modelViewProjectionKeyframe.data[0]);
glUseProgram(keyframeShaderID);
// Prepare for rendering the keyframe
glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadTexCoords[0]);
//.........这里部分代码省略.........
示例4: if
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_ImageTargets_ImageTargetsRenderer_renderFrame(JNIEnv *env, jobject obj)
{
//LOG("Java_com_qualcomm_QCARSamples_ImageTargets_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
#ifdef USE_OPENGL_ES_1_1
// Set GL11 flags:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
#endif
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW);//Front camera
else
glFrontFace(GL_CCW);//Back camera
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
// Choose the texture based on the target name:
int textureIndex;
if (strcmp(trackable.getName(), "chips") == 0)
{
textureIndex = 0;
}
else if (strcmp(trackable.getName(), "stones") == 0)
{
textureIndex = 1;
}
else
{
textureIndex = 2;
}
const Texture* const thisTexture = textures[textureIndex];
#ifdef USE_OPENGL_ES_1_1
// Load projection matrix:
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix.data);
// Load model view matrix:
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelViewMatrix.data);
glTranslatef(0.f, 0.f, kObjectScale);
glScalef(kObjectScale, kObjectScale, kObjectScale);
// Draw object:
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
//glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &cubeVerts[0]);
glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &cubeNormals[0]);
glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &teapotIndices[0]);
#else
QCAR::Matrix44F modelViewProjection;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0] ,
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, bananaVerts);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, bananaNormals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, bananaTexCoords);
// glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
// (const GLvoid*) &teapotVertices[0]);
// glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
//.........这里部分代码省略.........
示例5: glClear
// ----------------------------------------------------------------------------
// renderFrame Method - Takes care of drawing in the different render states
// ----------------------------------------------------------------------------
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_CloudRecognition_CloudRecoRenderer_renderFrame(JNIEnv *, jobject)
{
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
if (deleteCurrentProductTexture)
{
// Deletes the product texture if necessary
if (productTexture != 0)
{
glDeleteTextures(1, &(productTexture->mTextureID));
delete productTexture;
productTexture = 0;
}
deleteCurrentProductTexture = false;
}
// If the render state indicates that the texture is generated it generates
// the OpenGL texture for start drawing the plane with the book data
if (renderState == RS_TEXTURE_GENERATED)
{
generateProductTextureInOpenGL();
}
// Did we find any trackables this frame?
if (state.getNumTrackableResults() > 0)
{
trackingStarted = true;
// If we are already tracking something we don't need
// to wait any frame before starting the 2D transition
// when the target gets lost
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition = 0;
pthread_mutex_unlock(&framesToSkipMutex);
// Gets current trackable result
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(0);
if (trackableResult == NULL)
{
return;
}
modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
// Get the size of the ImageTarget
QCAR::ImageTargetResult *imageResult = (QCAR::ImageTargetResult *)trackableResult;
targetSize = imageResult->getTrackable().getSize();
// Renders the Augmentation View with the 3D Book data Panel
renderAugmentation(trackableResult);
}
else
{
// Manages the 3D to 2D Transition initialization
if (!scanningMode && showAnimation3Dto2D && renderState == RS_NORMAL
&& framesToSkipBeforeRenderingTransition == 0)
{
startTransitionTo2D();
}
// Reduces the number of frames to wait before triggering
// the transition by 1
if( framesToSkipBeforeRenderingTransition > 0 && renderState == RS_NORMAL)
{
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition -= 1;
pthread_mutex_unlock(&framesToSkipMutex);
}
}
// Logic for rendering Transition to 2D
if (renderState == RS_TRANSITION_TO_2D && showAnimation3Dto2D)
{
renderTransitionTo2D();
}
// Logic for rendering Transition to 3D
if (renderState == RS_TRANSITION_TO_3D )
{
renderTransitionTo3D();
}
//.........这里部分代码省略.........
示例6: if
JNIEXPORT void JNICALL
Java_rajawali_vuforia_RajawaliVuforiaRenderer_renderFrame(JNIEnv* env,
jobject object, jint frameBufferId, int frameBufferTextureId) {
//LOG("Java_com_qualcomm_QCARSamples_FrameMarkers_GLRenderer_renderFrame");
jclass ownerClass = env->GetObjectClass(object);
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
glBindFramebuffer(GL_FRAMEBUFFER, frameBufferId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
frameBufferTextureId, 0);
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
jfloatArray modelViewMatrixOut = env->NewFloatArray(16);
// Did we find any trackables this frame?
for (int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++) {
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(
tIdx);
const QCAR::Trackable& trackable = trackableResult->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(
trackableResult->getPose());
if (isActivityInPortraitMode)
Utils::rotatePoseMatrix(90.0f, 0, 1.0f, 0,
&modelViewMatrix.data[0]);
Utils::rotatePoseMatrix(-90.0f, 1.0f, 0, 0, &modelViewMatrix.data[0]);
if (trackable.isOfType(QCAR::Marker::getClassType())) {
jmethodID foundFrameMarkerMethod = env->GetMethodID(ownerClass,
"foundFrameMarker", "(I[F)V");
env->SetFloatArrayRegion(modelViewMatrixOut, 0, 16,
modelViewMatrix.data);
env->CallVoidMethod(object, foundFrameMarkerMethod,
(jint) trackable.getId(), modelViewMatrixOut);
} else if (trackable.isOfType(QCAR::CylinderTarget::getClassType())
|| trackable.isOfType(QCAR::ImageTarget::getClassType())
|| trackable.isOfType(QCAR::MultiTarget::getClassType())) {
jmethodID foundImageMarkerMethod = env->GetMethodID(ownerClass,
"foundImageMarker", "(Ljava/lang/String;[F)V");
env->SetFloatArrayRegion(modelViewMatrixOut, 0, 16,
modelViewMatrix.data);
const char* trackableName = trackable.getName();
jstring trackableNameJava = env->NewStringUTF(trackableName);
env->CallVoidMethod(object, foundImageMarkerMethod,
trackableNameJava, modelViewMatrixOut);
}
}
env->DeleteLocalRef(modelViewMatrixOut);
if (state.getNumTrackableResults() == 0) {
jmethodID noFrameMarkersFoundMethod = env->GetMethodID(ownerClass,
"noFrameMarkersFound", "()V");
env->CallVoidMethod(object, noFrameMarkersFoundMethod);
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
QCAR::Renderer::getInstance().end();
}
示例7: glClear
JNIEXPORT void JNICALL
Java_com_codered_ared_TextRecoRenderer_renderFrame(JNIEnv * env, jobject obj)
{
//LOG("JJava_com_codered_ared_TextRecoRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We need Front Face, CW for the back camera and Front Face CCW for the front camera...
// or more accuratly, we need CW for 0 and 2 reflections and CCW for 1 reflection
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
{
glFrontFace(GL_CCW); //Front camera
}
else
{
glFrontFace(GL_CW); //Back camera
}
// Enable blending to support transparency
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
jclass rendererJavaClass = env->GetObjectClass(obj);
env->CallVoidMethod(obj, env->GetMethodID(rendererJavaClass, "wordsStartLoop", "()V"));
NbWordsFound = 0;
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
QCAR::Vec2F wordBoxSize(0, 0);
if (result->getType() == QCAR::TrackableResult::WORD_RESULT)
{
const QCAR::WordResult* wordResult = (const QCAR::WordResult*) result;
// Get the word
const QCAR::Word& word = wordResult->getTrackable();
const QCAR::Obb2D& obb = wordResult->getObb();
wordBoxSize = word.getSize();
if (word.getStringU())
{
// in portrait, the obb coordinate is based on
// a 0,0 position being in the upper right corner
// with :
// X growing from top to bottom and
// Y growing from right to left
//
// we convert those coordinates to be more natural
// with our application:
// - 0,0 is the upper left corner
// - X grows from left to right
// - Y grows from top to bottom
float wordx = - obb.getCenter().data[1];
float wordy = obb.getCenter().data[0];
// For debugging purposes convert the string to 7bit ASCII
// (if possible) and log it.
char* stringA = 0;
if (unicodeToAscii(word, stringA))
{
// we store the word
if (NbWordsFound < MAX_NB_WORDS)
{
struct WordDesc * word = & WordsFound[NbWordsFound];
NbWordsFound++;
strncpy(word->text, stringA, MAX_WORD_LENGTH - 1);
word->text[MAX_WORD_LENGTH - 1] = '\0';
word->Ax = wordx - (int)(wordBoxSize.data[0] / 2);
word->Ay = wordy - (int)(wordBoxSize.data[1] / 2);
word->Bx = wordx + (int)(wordBoxSize.data[0] / 2);
word->By = wordy + (int)(wordBoxSize.data[1] / 2);
}
delete[] stringA;
}
}
}
else
{
//.........这里部分代码省略.........
示例8: glDisableVertexAttribArray
JNIEXPORT void JNICALL
Java_com_tvc_supastriker_SupaStrikerRenderer_renderFrame(JNIEnv* env, jobject obj){
LOG("Java_com_tvc_supastriker_SupaStrikerRenderer_renderFrame");
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QCAR::State state = QCAR::Renderer::getInstance().begin();
QCAR::Renderer::getInstance().drawVideoBackground();
#ifdef USE_OPENGL_ES_1_1
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
//glDisable(GL_LIGHTING);
glEnable(GL_LIGHTING);
#endif
glEnable(GL_DEPTH_TEST);
//glEnable(GL_CULL_FACE);
glDisable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CCW);
else
glFrontFace(GL_CCW);
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++){
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
int textureIndex;
if(strcmp(trackable.getName(), "SupaStrika") == 0){
textureIndex = 0;
}
const Texture* const thisTexture = textures[textureIndex];
#ifdef USE_OPENGL_ES_1_1
//load projection matrix
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix.data);
//load model view matrix
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelViewMatrix.data);
glTranslatef(0.f, 0.f, kObjectScale);
glScalef(kObjectScale, kObjectScale, kObjectScale);
//draw object
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &teapotNormals[0]);
//glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
// (const GLvoid*) &teapotIndices[0]);
glDrawArrays(GL_TRIANGLES, 0, NUM_TEAPOT_OBJECT_VERTEX);
#else
QCAR::Matrix44F modelViewProjection;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0],
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotVertices[0]);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotNormals[0]);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotTexCoords[0]);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glUniform1i(texSampler2DHandle, 0);
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
(GLfloat*) &modelViewProjection.data[0]);
glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &teapotIndices[0]);
SampleUtils::checkGlError("SupaStriker renderFrame");
#endif
}
glDisable(GL_DEPTH_TEST);
#ifdef USE_OPENGL_ES_1_1
glDisable(GL_TEXTURE_2D);
//.........这里部分代码省略.........
示例9: assert
JNIEXPORT void JNICALL
Java_com_wheelphone_targetNavigation_WheelphoneTargetNavigation_getTrackInfo(JNIEnv *env, jobject obj)
{
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
//QCAR::Vec2F markerSize;
jint jx[4] = {0};
jint jy[4] = {0};
jfloat distance[4] = {0};
jfloat cam_x[4] = {0};
jfloat cam_y[4] = {0};
jfloat cam_z[4] = {0};
jfloat target_pose_x[4] = {0}; // x, y, z coordinates of the targets with respect to the camera frame
jfloat target_pose_y[4] = {0};
jfloat target_pose_z[4] = {0};
jboolean detected[4] = {false};
jclass javaClass = env->GetObjectClass(obj); // obj is the java class object calling the "renderFrame" method, that is an FrameMarkersRenderer object
//jclass javaClass = env->FindClass("Lcom/wheelphone/targetNavigation/WheelphoneTargetNavigation;"); // doesn't work!
jmethodID method = env->GetMethodID(javaClass, "updateMarkersInfo", "(IZIIFFFF)V");
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
if(trackableResult == NULL) {
continue;
}
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
//if(modelViewMatrix == NULL) {
// continue;
//}
// Choose the texture based on the target name:
int textureIndex = 0;
// Check the type of the trackable:
assert(trackableResult->getType() == QCAR::TrackableResult::MARKER_RESULT);
const QCAR::MarkerResult* markerResult = static_cast<
const QCAR::MarkerResult*>(trackableResult);
if(markerResult == NULL) {
continue;
}
const QCAR::Marker& marker = markerResult->getTrackable();
//if(marker == NULL) {
// continue;
//}
textureIndex = marker.getMarkerId();
//markerSize = marker.getSize(); // this is the size specified during marker creation! Not the current size!
assert(textureIndex < textureCount);
// Select which model to draw:
const GLvoid* vertices = 0;
const GLvoid* normals = 0;
const GLvoid* indices = 0;
const GLvoid* texCoords = 0;
int numIndices = 0;
QCAR::Vec2F result(0,0);
const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::Vec2F cameraPoint = QCAR::Tool::projectPoint(cameraCalibration, trackableResult->getPose(), QCAR::Vec3F(0, 0, 0));
QCAR::VideoMode videoMode = QCAR::CameraDevice::getInstance().getVideoMode(QCAR::CameraDevice::MODE_OPTIMIZE_QUALITY); //MODE_DEFAULT);
QCAR::VideoBackgroundConfig config = QCAR::Renderer::getInstance().getVideoBackgroundConfig();
//if(config == NULL) {
// continue;
//}
int xOffset = ((int) screenWidth - config.mSize.data[0]) / 2.0f + config.mPosition.data[0];
int yOffset = ((int) screenHeight - config.mSize.data[1]) / 2.0f - config.mPosition.data[1];
if (isActivityInPortraitMode)
{
// camera image is rotated 90 degrees
int rotatedX = videoMode.mHeight - cameraPoint.data[1];
int rotatedY = cameraPoint.data[0];
result = QCAR::Vec2F(rotatedX * config.mSize.data[0] / (float) videoMode.mHeight + xOffset,
rotatedY * config.mSize.data[1] / (float) videoMode.mWidth + yOffset);
}
else
{
result = QCAR::Vec2F(cameraPoint.data[0] * config.mSize.data[0] / (float) videoMode.mWidth + xOffset,
cameraPoint.data[1] * config.mSize.data[1] / (float) videoMode.mHeight + yOffset);
}
jx[textureIndex] = (int)result.data[0];
jy[textureIndex] = (int)result.data[1];
// get position and orientation of the target respect to the camera reference frame
QCAR::Matrix34F pose = trackableResult->getPose();
target_pose_x[textureIndex] = pose.data[3];
target_pose_y[textureIndex] = pose.data[7];
target_pose_z[textureIndex] = pose.data[11];
QCAR::Vec3F position(pose.data[3], pose.data[7], pose.data[11]);
// dist = modulo del vettore traslazione = sqrt(x*x + y*y + z*z)
//.........这里部分代码省略.........
示例10: atan
JNIEXPORT void JNICALL
Java_com_ar4android_rayPickingJME_RayPickingJME_updateTracking(JNIEnv *env, jobject obj)
{
//LOG("Java_com_ar4android_rayPickingJME_RayPickingJMEActivity_GLRenderer_renderFrame");
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
// QCAR::Renderer::getInstance().drawVideoBackground();
// if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
//const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
//get the camera transformation
QCAR::Matrix44F inverseMV = MathUtil::Matrix44FInverse(modelViewMatrix);
//QCAR::Matrix44F invTranspMV = modelViewMatrix;
QCAR::Matrix44F invTranspMV = MathUtil::Matrix44FTranspose(inverseMV);
//get position
float cam_x = invTranspMV.data[12];
float cam_y = invTranspMV.data[13];
float cam_z = invTranspMV.data[14];
//get rotation
float cam_right_x = invTranspMV.data[0];
float cam_right_y = invTranspMV.data[1];
float cam_right_z = invTranspMV.data[2];
float cam_up_x = invTranspMV.data[4];
float cam_up_y = invTranspMV.data[5];
float cam_up_z = invTranspMV.data[6];
float cam_dir_x = invTranspMV.data[8];
float cam_dir_y = invTranspMV.data[9];
float cam_dir_z = invTranspMV.data[10];
//get perspective transformation
float nearPlane = 1.0f;
float farPlane = 1000.0f;
const QCAR::CameraCalibration& cameraCalibration =
QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::VideoBackgroundConfig config = QCAR::Renderer::getInstance().getVideoBackgroundConfig();
float viewportWidth = config.mSize.data[0];
float viewportHeight = config.mSize.data[1];
QCAR::Vec2F size = cameraCalibration.getSize();
QCAR::Vec2F focalLength = cameraCalibration.getFocalLength();
float fovRadians = 2 * atan(0.5f * size.data[1] / focalLength.data[1]);
float fovDegrees = fovRadians * 180.0f / M_PI;
float aspectRatio=size.data[0]/size.data[1];
//adjust for screen vs camera size distorsion
float viewportDistort=1.0;
if (viewportWidth != screenWidth)
{
viewportDistort = viewportWidth / (float) screenWidth;
fovDegrees=fovDegrees*viewportDistort;
aspectRatio=aspectRatio/viewportDistort;
}
if (viewportHeight != screenHeight)
{
viewportDistort = viewportHeight / (float) screenHeight;
fovDegrees=fovDegrees/viewportDistort;
aspectRatio=aspectRatio*viewportDistort;
}
//JNIEnv *env;
//jvm->AttachCurrentThread((void **)&env, NULL);
jclass activityClass = env->GetObjectClass(obj);
jmethodID setCameraPerspectiveMethod = env->GetMethodID(activityClass,"setCameraPerspectiveNative", "(FF)V");
env->CallVoidMethod(obj,setCameraPerspectiveMethod,fovDegrees,aspectRatio);
// jclass activityClass = env->GetObjectClass(obj);
jmethodID setCameraViewportMethod = env->GetMethodID(activityClass,"setCameraViewportNative", "(FFFF)V");
env->CallVoidMethod(obj,setCameraViewportMethod,viewportWidth,viewportHeight,cameraCalibration.getSize().data[0],cameraCalibration.getSize().data[1]);
// jclass activityClass = env->GetObjectClass(obj);
jmethodID setCameraPoseMethod = env->GetMethodID(activityClass,"setCameraPoseNative", "(FFF)V");
env->CallVoidMethod(obj,setCameraPoseMethod,cam_x,cam_y,cam_z);
//jclass activityClass = env->GetObjectClass(obj);
jmethodID setCameraOrientationMethod = env->GetMethodID(activityClass,"setCameraOrientationNative", "(FFFFFFFFF)V");
env->CallVoidMethod(obj,setCameraOrientationMethod,cam_right_x,cam_right_y,cam_right_z,
cam_up_x,cam_up_y,cam_up_z,cam_dir_x,cam_dir_y,cam_dir_z);
//.........这里部分代码省略.........
示例11: if
JNIEXPORT void JNICALL
//Java_com_miosys_finder_ui_ImageTargetsRenderer_renderFrame(JNIEnv *, jobject)
Java_com_miosys_finder_ui_ImageTargetsRenderer_renderFrame(JNIEnv* env, jobject obj)
{
LOG("Java_com_miosys_finder_ui_PfinderTargets_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
if(!isExtendedTrackingActivated)
{
/*java method switch to listview activity after detected objects*/
jstring jstrTargetName = env->NewStringUTF(trackable.getName());
jclass renderClass = env->GetObjectClass(obj);
jmethodID switchToListViewID = env->GetMethodID(renderClass,"switchToListView", "(Ljava/lang/String;)V");
env->CallVoidMethod(obj, switchToListViewID, jstrTargetName);
/*
// Choose the texture based on the target name:
int textureIndex;
if (strcmp(trackable.getName(), "chips") == 0)
{
textureIndex = 0;
}
else if (strcmp(trackable.getName(), "stones") == 0)
{
textureIndex = 1;
}
else
{
textureIndex = 2;
}
// if(strcmp(trackable.getName(), "P1_01") == 0){
// textureIndex = 0;
// }
// else if(strcmp(trackable.getName(),"P1_02") == 1){
// textureIndex = 1;
// }
// else {
// textureIndex = 2;
// }
const Texture* const thisTexture = textures[textureIndex];
QCAR::Matrix44F modelViewProjection;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0] ,
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotVertices[0]);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotNormals[0]);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotTexCoords[0]);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0);
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjection.data[0] );
glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
//.........这里部分代码省略.........
示例12: glClear
void
MSRenderer::renderFrame() {
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
MSRenderer::scalePoseMatrix(MSController::getFrameRatio(),
1,
1,
&modelViewMatrix.data[0]);
// get the target info
void *userData = trackableResult->getTrackable().getUserData();
MSTargetInfo *info = static_cast<MSTargetInfo *>(userData);
MSTexture *tex = info->getTexture();
MSModel *model = info->getModel();
// Bind texture to OpenGL if not done yet
if (!tex->mHasID) {
glGenTextures(1, &(tex->mTextureID));
glBindTexture(GL_TEXTURE_2D, tex->mTextureID);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tex->mWidth,
tex->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
(GLvoid*) tex->mData);
tex->mHasID = true;
}
MSRenderer::multiplyMatrix(&modelViewMatrix.data[0],
info->getPose(),
&modelViewMatrix.data[0]);
QCAR::Matrix44F modelViewProjection;
MSRenderer::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0] ,
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, model->vertices);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, model->normals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, model->texCoords);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex->mTextureID);
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (GLfloat*)modelViewProjection.data);
glUniform1i(texSampler2DHandle, 0);
glDrawElements(GL_TRIANGLES, 3*model->nFaces, GL_UNSIGNED_SHORT, model->faces);
}
glDisable(GL_DEPTH_TEST);
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
glDisable(GL_BLEND);
QCAR::Renderer::getInstance().end();
}
示例13: glClear
JNIEXPORT void JNICALL
Java_com_siu_android_arapp_vuforia_ImageTargetsRenderer_renderFrame(JNIEnv* env, jobject object)
{
//LOG("Java_com_siu_android_arapp_vuforia_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
#ifdef USE_OPENGL_ES_1_1
// Set GL11 flags:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
#endif
// glEnable(GL_DEPTH_TEST);
//
// // We must detect if background reflection is active and adjust the culling direction.
// // If the reflection is active, this means the post matrix has been reflected as well,
// // therefore standard counter clockwise face culling will result in "inside out" models.
// glEnable(GL_CULL_FACE);
// glCullFace(GL_BACK);
// if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
// glFrontFace(GL_CW); //Front camera
// else
// glFrontFace(GL_CCW); //Back camera
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix34F pose = result->getPose();
QCAR::Vec3F position(pose.data[3], pose.data[7], pose.data[11]);
float distance = sqrt(position.data[0] * position.data[0] +
position.data[1] * position.data[1] +
position.data[2] * position.data[2]);
//LOG("DISTANCE: %f", distance);
jclass clazz = env->FindClass("com/siu/android/arapp/vuforia/ImageTargetsRenderer");
if (clazz == 0) {
LOG("FindClass error");
return;
}
jmethodID jmethod = env->GetMethodID(clazz, "objectDetected", "(Ljava/lang/String;F)V");
if (jmethod == 0) {
LOG("GetMethodID error");
return;
}
jstring s = env->NewStringUTF(trackable.getName());
env->CallVoidMethod(object, jmethod, s, distance);
// QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
//
// // Choose the texture based on the target name:
// int textureIndex;
// if (strcmp(trackable.getName(), "chips") == 0)
// {
// textureIndex = 0;
// }
// else if (strcmp(trackable.getName(), "stones") == 0)
// {
// textureIndex = 1;
// }
// else
// {
// textureIndex = 2;
// }
//
// const Texture* const thisTexture = textures[textureIndex];
//
//#ifdef USE_OPENGL_ES_1_1
// // Load projection matrix:
// glMatrixMode(GL_PROJECTION);
// glLoadMatrixf(projectionMatrix.data);
//
// // Load model view matrix:
// glMatrixMode(GL_MODELVIEW);
// glLoadMatrixf(modelViewMatrix.data);
// glTranslatef(0.f, 0.f, kObjectScale);
// glScalef(kObjectScale, kObjectScale, kObjectScale);
//
// // Draw object:
// glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
//.........这里部分代码省略.........
示例14: if
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_ImageTargets_ImageTargetsRenderer_renderFrame(JNIEnv *, jobject)
{
//LOG("Java_com_qualcomm_QCARSamples_ImageTargets_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if (QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
// Did we find any trackables this frame?
for (int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
// Choose the texture based on the target name:
int textureIndex;
modeltype = 11;
if (strcmp(trackable.getName(), "huitailang") == 0||strcmp(trackable.getName(), "stones") == 0||strcmp(trackable.getName(), "chips") == 0)
{
modeltype = 2;
textureIndex = 0;
const Texture* const thisTexture = textures[textureIndex];
QCAR::Matrix44F modelViewProjection;
modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
animateteapot(modelViewMatrix);
//1.35f*120.0f
SampleUtils::translatePoseMatrix(0.0f,-0.50f*120.0f,1.35f*120.0f,
&modelViewMatrix.data[0]);
//-90.0f
SampleUtils::rotatePoseMatrix(objectx, objecty,0.0f, 0,
&modelViewMatrix.data[0]);
SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
&modelViewMatrix.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrix.data[0] ,
&modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotVerts[0]);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotNormals[0]);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &teapotTexCoords[0]);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glUniform1i(texSampler2DHandle, 0 );
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjection.data[0] );
glDrawArrays(GL_TRIANGLES, 0,teapotNumVerts);
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
SampleUtils::checkGlError("ImageTargets renderFrame");
glDisable(GL_DEPTH_TEST);
QCAR::Renderer::getInstance().end();
}
else if (strcmp(trackable.getName(), "heroin") == 0)
{
textureIndex = 0;
modeltype = 1;
const Texture* const thisTexture = textures[textureIndex];
//.........这里部分代码省略.........