本文整理汇总了C++中VRCamera::init方法的典型用法代码示例。如果您正苦于以下问题:C++ VRCamera::init方法的具体用法?C++ VRCamera::init怎么用?C++ VRCamera::init使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VRCamera
的用法示例。
在下文中一共展示了VRCamera::init方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initScreen
void initScreen()
{
screen.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screen.setFocalDistance(focalDistance);
cam.init(screen);
cam.setNearFarPlanes(0.2,1000);
}
示例2: initProjectionScreen
void initProjectionScreen(double _focalDist, const Affine3d &_transformation)
{ screen.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screen.setOffset(alignmentX,alignmentY);
screen.setFocalDistance(_focalDist);
screen.transform(_transformation);
cam.init(screen);
}
示例3: initScreen
void initScreen()
{
screen.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screen.setOffset(alignmentX,alignmentY);
screen.setFocalDistance(focalDistance);
cam.init(screen);
}
示例4: initializeExperiment
/**
* @brief initializeExperiment
*/
void initializeExperiment()
{
// MUST BE CALLED WITHIN A VALID OPENGL CONTEXT
//Screen screen(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH, alignmentX, alignmentY, focalDistance );
screen.init(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH, alignmentX, alignmentY, focalDistance );
screen.setOffset(alignmentX,alignmentY);
screen.setFocalDistance(focalDistance);
cam.init(screen);
// Initialize all the streams for file output eccetera
parameters.loadParameterFile("C:/cncsvisiondata/parametersFiles/Fulvio/expMicroHeadMovements/parametersExpMicroHeadMovementsActive.txt");
// Initialize focal distance and interocular distance from parameter file
focalDistance = parameters.get("FocalDistance");
interocularDistance = parameters.get("IOD");
infoDraw = (int)parameters.get("DrawInfo");
useCircularOcclusor = (int)parameters.get("UseCircularOcclusor");
// Initialize trials balance factors
trial.init(parameters);
trial.next();
// Base directory, full path
string baseDir = parameters.find("BaseDir");
// Subject name
string subjectName = parameters.find("SubjectName");
// Principal streams file
string markersFileName = baseDir + "markersFile_MicroHeadMovementsActive_" + subjectName + ".txt";
string outputFileName = baseDir + "responseFile_MicroHeadMovementsActive_" + subjectName +".txt";
string timingFileName = baseDir + "timingFile_MicroHeadMovementsActive_" + subjectName + ".txt";
// Check for output file existence
if ( !util::fileExists((outputFileName)) )
responseFile.open(outputFileName.c_str());
cerr << "File " << outputFileName << " loaded successfully" << endl;
// Check for output markers file existence
if ( !util::fileExists((markersFileName)) )
markersFile.open(markersFileName.c_str());
cerr << "File " << markersFileName << " loaded successfully" << endl;
// Check for output timing file existence
if ( !util::fileExists((timingFileName)) )
timingFile.open(( timingFileName ).c_str());
cerr << "File " << timingFileName << " opened successfully" << endl;
// Write the response file header
responseFile << "# SubjectName\tTrialNumber\tFocalDistance\tKeyPressed\tResponseTime\tfZWidth\tfSlant\tfTilt\tfStimAnchored" << endl;
// Finished loading parameter files
// Update the stimulus
updateStimulus();
trialTimer.start();
globalTimer.start();
}
示例5: getPassiveMatrix
Affine3d getPassiveMatrix()
{
Screen screenPassive;
screenPassive.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screenPassive.setOffset(alignmentX,alignmentY);
screenPassive.setFocalDistance(0);
screenPassive.transform(headEyeCoords.getRigidStart().getFullTransformation()*Translation3d(Vector3d(0,0,focalDistance)));
VRCamera camPassive;
camPassive.init(screenPassive);
camPassive.setDrySimulation(true);
camPassive.setEye(eyeRight);
return ( camPassive.getModelViewMatrix()*objectActiveTransformation );
}
示例6: initRendering
void initRendering()
{
glEnable(GL_DEPTH_TEST);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable (GL_BLEND);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_NORMALIZE);
glEnable(GL_COLOR_MATERIAL);
glClearColor(0.0,0.0,0.0,1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
cam.init(Screen(20,20,0,0,focalDistance));
cam.setNearFarPlanes(1.0,200.0);
cam.setOrthoGraphicProjection(true);
}
示例7: main
int main(int argc, char*argv[])
{
optotrak.setTranslation(calibration);
if ( optotrak.init(LastAlignedFile) != 0)
{ cleanup();
exit(0);
}
screen.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screen.setOffset(alignmentX,alignmentY);
screen.setFocalDistance(focalDistance);
cam.init(screen);
recordHeadEyeRelativePositions();
glutInit(&argc, argv);
if (stereo)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH | GLUT_STEREO);
else
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
if ( gameMode )
{
glutGameModeString(ROVERETO_GAME_MODE_STRING);
glutEnterGameMode();
}
else
{
glutInitWindowSize(SCREEN_WIDTH, SCREEN_HEIGHT);
glutCreateWindow("CNCSVISION Example 2 HappyBuddha");
glutFullScreen();
}
initRendering();
model.load("../../data/objmodels/happyBuddha.obj");
glutDisplayFunc(drawGLScene);
glutKeyboardFunc(handleKeypress);
glutReshapeFunc(handleResize);
glutTimerFunc(TIMER_MS, update, 0);
glutSetCursor(GLUT_CURSOR_NONE);
/* Application main loop */
glutMainLoop();
cleanup();
return 0;
}
示例8: initProjectionScreen
void initProjectionScreen(double _focalDist)
{
cam.init(Screen(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH,alignmentX,alignmentY,_focalDist));
RoveretoMotorFunctions::moveScreenAbsolute(_focalDist,homeFocalDistance,3500);
}
示例9: initProjectionScreen
// Questa funzione definisce la distanza e l'orientazione dello schermo di proiezione virtuale, e' importante che la lasci cosi almeno che tu non voglia:
// 1) simulare uno schermo di proiezione che si muove o comunque con un orientamento diverso da quello standard cioe' il piano a z=focalDistance
// 2) spostare il piano di proiezione cambiando alignmentX ed alignmentY ma per quello ti consiglio di contattarmi o comunque utilizzare il file headCalibration.cpp che ha legato ai tasti 2,4,6,8 il movimento dello schermo per allineare mondo virtuale e mondo optotrak
void initProjectionScreen(double _focalDist, const Affine3d &_transformation, bool synchronous)
{
cam.init(Screen(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH,alignmentX,alignmentY,_focalDist));
}
示例10: update
void update(int value)
{
// Conta i cicli di presentazione dello stimolo
if ( (sumOutside > str2num<int>(parameters.find("StimulusCycles")) ) && (trialMode == STIMULUSMODE) )
{
sumOutside=0;
trialMode++;
trialMode=trialMode%4;
}
if (conditionInside && (sumOutside*2 > str2num<int>(parameters.find("FixationCycles"))) && (trialMode ==FIXATIONMODE ) )
{
sumOutside=0;
trialMode++;
trialMode=trialMode%4;
stimulusDuration.start();
}
if ( trialMode == STIMULUSMODE )
stimulusFrames++;
if ( trialMode == FIXATIONMODE )
stimulusFrames=0;
Screen screenPassive;
screenPassive.setWidthHeight(SCREEN_WIDE_SIZE, SCREEN_WIDE_SIZE*SCREEN_HEIGHT/SCREEN_WIDTH);
screenPassive.setOffset(alignmentX,alignmentY);
screenPassive.setFocalDistance(0);
screenPassive.transform(headEyeCoords.getRigidStart().getFullTransformation()*Translation3d(center));
camPassive.init(screenPassive);
camPassive.setDrySimulation(true);
camPassive.setEye(eyeRight);
objectPassiveTransformation = ( camPassive.getModelViewMatrix()*objectActiveTransformation );
// Coordinates picker
markers = optotrak.getAllPoints();
if ( isVisible(markers[1]) && isVisible(markers[2]) && isVisible(markers[3]) )
headEyeCoords.update(markers[1],markers[2],markers[3]);
Affine3d active = headEyeCoords.getRigidStart().getFullTransformation();
eulerAngles.init( headEyeCoords.getRigidStart().getFullTransformation().rotation() );
eyeLeft = headEyeCoords.getLeftEye();
eyeRight = headEyeCoords.getRightEye();
cyclopeanEye = (eyeLeft+eyeRight)/2.0;
// Projection of view normal on the focal plane
Vector3d directionOfSight = (active.rotation()*Vector3d(0,0,-1)).normalized();
Eigen::ParametrizedLine<double,3> lineOfSightRight = Eigen::ParametrizedLine<double,3>::Through( eyeRight , eyeRight+directionOfSight );
Eigen::ParametrizedLine<double,3> lineOfSightLeft = Eigen::ParametrizedLine<double,3>::Through( eyeLeft, eyeLeft+directionOfSight );
double lineOfSightRightDistanceToFocalPlane = lineOfSightRight.intersection(focalPlane);
double lineOfSightLeftDistanceToFocalPlane = lineOfSightLeft.intersection(focalPlane);
//double lenghtOnZ = (active*(center-eyeCalibration )+eyeRight).z();
projPointEyeRight = lineOfSightRightDistanceToFocalPlane *(directionOfSight)+ (eyeRight);
projPointEyeLeft= lineOfSightLeftDistanceToFocalPlane * (directionOfSight) + (eyeLeft);
// second projection the fixation point computed with z non constant but perfectly parallel to projPointEyeRight
lineOfSightRightDistanceToFocalPlane= (( active.rotation()*(center)) - eyeRight).norm();
Vector3d secondProjection = lineOfSightRightDistanceToFocalPlane *(directionOfSight)+ (eyeRight);
if ( !zOnFocalPlane )
projPointEyeRight=secondProjection ;
// Compute the translation to move the eye in order to avoid shear components
Vector3d posAlongLineOfSight = (headEyeCoords.getRigidStart().getFullTransformation().rotation())*(eyeRight -eyeCalibration);
switch ( (int)factors["Translation"] )
{
case -1:
case -2:
translationFactor.setZero();
if ( trialMode == STIMULUSMODE )
projPointEyeRight=center;
break;
case 0:
translationFactor.setZero();
break;
case 1:
translationFactor = factors["TranslationConstant"]*Vector3d(posAlongLineOfSight.z(),0,0);
break;
case 2:
translationFactor = factors["TranslationConstant"]*Vector3d(0,posAlongLineOfSight.z(),0);
break;
}
if ( passiveMode )
initProjectionScreen(0,headEyeCoords.getRigidStart().getFullTransformation()*Translation3d(Vector3d(0,0,focalDistance)));
else
initProjectionScreen(focalDistance,Affine3d::Identity());
checkBounds();
/**** Save to file part ****/
// Markers file save the used markers and time-depending experimental variable to a file
// (Make sure that in passive experiment the list of variables has the same order)
markersFile << trialNumber << " " << headCalibrationDone << " " << trialMode << " " ;
markersFile <<markers[1].transpose() << " " << markers[2].transpose() << " " << markers[3].transpose() << " " << markers[17].transpose() << " " << markers[18].transpose() << " " ;
markersFile << factors["Tilt"] << " " <<
factors["Slant"] << " " <<
factors["Translation"] << " " <<
//.........这里部分代码省略.........