本文整理汇总了C++中SbVec3f类的典型用法代码示例。如果您正苦于以下问题:C++ SbVec3f类的具体用法?C++ SbVec3f怎么用?C++ SbVec3f使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SbVec3f类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
SbBool
SoXipPolygon::canClose( const SbVec3f& pos ) const
{
// Do not need to close the shape if already closed
if( isClosed() )
return FALSE;
// The number of points should be at least 4.
// The 4th point corresponds to the one which is moving.
if( point.getNum() > 3 )
{
// minimum size for contour is given, check if the last point
// is very close to the start point so we can close the contour
SbVec3f screenPt[2];
mViewVolume.projectToScreen( point[0], screenPt[0] );
mViewVolume.projectToScreen( pos, screenPt[1] );
SbVec3f d = screenPt[0] - screenPt[1];
d[0] *= mViewport.getViewportSizePixels()[0];
d[1] *= mViewport.getViewportSizePixels()[1];
d[2] = 0;
return d.length() < CLOSING_MIN_PIXEL_DISTANCE;
}
return FALSE;
}
示例2: isVisibleFace
bool isVisibleFace(int faceIndex, const SbVec2f& pos, Gui::View3DInventorViewer* viewer)
{
SoSeparator* root = new SoSeparator;
root->ref();
root->addChild(viewer->getSoRenderManager()->getCamera());
root->addChild(vp->getRoot());
SoSearchAction searchAction;
searchAction.setType(PartGui::SoBrepFaceSet::getClassTypeId());
searchAction.setInterest(SoSearchAction::FIRST);
searchAction.apply(root);
SoPath* selectionPath = searchAction.getPath();
SoRayPickAction rp(viewer->getSoRenderManager()->getViewportRegion());
rp.setNormalizedPoint(pos);
rp.apply(selectionPath);
root->unref();
SoPickedPoint* pick = rp.getPickedPoint();
if (pick) {
const SoDetail* detail = pick->getDetail();
if (detail && detail->isOfType(SoFaceDetail::getClassTypeId())) {
int index = static_cast<const SoFaceDetail*>(detail)->getPartIndex();
if (faceIndex != index)
return false;
SbVec3f dir = viewer->getViewDirection();
const SbVec3f& nor = pick->getNormal();
if (dir.dot(nor) > 0)
return false; // bottom side points to user
return true;
}
}
return false;
}
示例3: SbVec3f
/**
* Sets the bounding box of the mesh to \a box and its center to \a center.
*/
void SoPolygon::computeBBox(SoAction *action, SbBox3f &box, SbVec3f ¢er)
{
SoState* state = action->getState();
const SoCoordinateElement * coords = SoCoordinateElement::getInstance(state);
if (!coords) return;
const SbVec3f * points = coords->getArrayPtr3();
if (!points) return;
float maxX=-FLT_MAX, minX=FLT_MAX,
maxY=-FLT_MAX, minY=FLT_MAX,
maxZ=-FLT_MAX, minZ=FLT_MAX;
int32_t len = coords->getNum();
int32_t beg = startIndex.getValue();
int32_t cnt = numVertices.getValue();
int32_t end = beg + cnt;
if (end <= len) {
for (int32_t i=beg; i<end; i++) {
maxX = std::max<float>(maxX,points[i][0]);
minX = std::min<float>(minX,points[i][0]);
maxY = std::max<float>(maxY,points[i][1]);
minY = std::min<float>(minY,points[i][1]);
maxZ = std::max<float>(maxZ,points[i][2]);
minZ = std::min<float>(minZ,points[i][2]);
}
box.setBounds(minX,minY,minZ,maxX,maxY,maxZ);
center.setValue(0.5f*(minX+maxX),0.5f*(minY+maxY),0.5f*(minZ+maxZ));
}
else {
box.setBounds(SbVec3f(0,0,0), SbVec3f(0,0,0));
center.setValue(0.0f,0.0f,0.0f);
}
}
示例4: selected
/*! \COININTERNAL
Called when dragger is selected (picked) by the user.
*/
void
SoRotateSphericalDragger::dragStart(void)
{
SoSwitch *sw;
sw = SO_GET_ANY_PART(this, "rotatorSwitch", SoSwitch);
SoInteractionKit::setSwitchValue(sw, 1);
sw = SO_GET_ANY_PART(this, "feedbackSwitch", SoSwitch);
SoInteractionKit::setSwitchValue(sw, 1);
SbVec3f hitPt = this->getLocalStartingPoint();
float radius = hitPt.length();
this->sphereProj->setSphere(SbSphere(SbVec3f(0.0f, 0.0f, 0.0f), radius));
this->sphereProj->setViewVolume(this->getViewVolume());
this->sphereProj->setWorkingSpace(this->getLocalToWorldMatrix());
switch (this->getFrontOnProjector()) {
case FRONT:
this->sphereProj->setFront(TRUE);
break;
case BACK:
this->sphereProj->setFront(TRUE);
break;
default: // avoid warnings
case USE_PICK:
this->sphereProj->setFront(this->sphereProj->isPointInFront(hitPt));
break;
}
SbVec3f projPt = this->sphereProj->project(this->getNormalizedLocaterPosition());
this->getLocalToWorldMatrix().multVecMatrix(projPt, this->prevWorldHitPt);
this->prevMotionMatrix = this->getMotionMatrix();
}
示例5: getLocalStartingPoint
void TDragger::drag()
{
projector.setViewVolume(this->getViewVolume());
projector.setWorkingSpace(this->getLocalToWorldMatrix());
SbVec3f hitPoint = projector.project(getNormalizedLocaterPosition());
SbVec3f startingPoint = getLocalStartingPoint();
SbVec3f localMovement = hitPoint - startingPoint;
//scale the increment to match local space.
float scaledIncrement = static_cast<float>(translationIncrement.getValue()) / autoScaleResult.getValue();
localMovement = roundTranslation(localMovement, scaledIncrement);
//when the movement vector is null either the appendTranslation or
//the setMotionMatrix doesn't work. either way it stops translating
//back to its initial starting point.
if (localMovement.equals(SbVec3f(0.0, 0.0, 0.0), 0.00001f))
{
setMotionMatrix(getStartMotionMatrix());
//don't know why I need the following but if I don't have it
//it won't return to original position.
this->valueChanged();
}
else
setMotionMatrix(appendTranslation(getStartMotionMatrix(), localMovement));
Base::Quantity quantity(
static_cast<double>(translationIncrementCount.getValue()) * translationIncrement.getValue(), Base::Unit::Length);
QString message(QObject::tr("Translation: "));
message += quantity.getUserString();
getMainWindow()->showMessage(message, 3000);
}
示例6: v
void METKShowClusteredObjects::setCamPosition(const int stackOrFieldNr, bool isStackNr){
float fX,fY,fZ;
if (!isStackNr)
m_calcVis.getFieldMaxPos(stackOrFieldNr,fX,fY,fZ);
else
m_calcVis.getStackMaxPos(stackOrFieldNr,fX,fY,fZ);
/*_resX->setDoubleValue(fX);
_resY->setDoubleValue(fY);
_resZ->setDoubleValue(fZ);*/
std::cout << "_result = " << fX << "," << fY << "," << fZ << std::endl;
vec3 v(fX,fY,fZ);
_result->setVec3fValue(vec3(fX,fY,fZ));
Cam->setNormPlump(SbVec3f(0.0,0.0,1.0));
Cam->setUpVecAngle(0.0);
float fMX,fMY,fMZ,fbla;
int ibla;
m_calcVis.getSphereValues(fMX,fMY,fMZ,fbla,ibla);
Cam->setCamPosition(SbVec3f(fX,fY,fZ),SbVec3f(fMX,fMY,fMZ));
Cam->setHeight(3.0);
SbRotation rot=Cam->getOrientation();
SbVec3f axis;
float angle,o1,o2,o3;
rot.getValue(axis,angle);
axis.getValue(o1,o2,o3);
_orient->setVec4fValue(vec4(o1,o2,o3,angle));
}
示例7: fprintf
void InvPlaneMover::setPosition(SbVec3f &point)
{
// make sure that the normal points in direction to the camera
SbVec3f camPos = renderer->viewer->getCamera()->position.getValue();
SbVec3f no;
if (camPos.dot(planeNormal_) < 0)
{
no = planeNormal_;
}
else
{
no = -planeNormal_;
}
fprintf(stderr, "inorm=(%f %f %f), no=(%f %f %f)\n",
iNorm_[0], iNorm_[1], iNorm_[2],
no[0], no[1], no[2]);
SbRotation rota(iNorm_, no);
distOffset_ = point;
nnn_ = no;
// the handle lays in front of the plane by this distance
float offset_of_handle = 0.001 * scale_->scaleFactor.getValue()[0];
// if (!show) {
transl_->translation = point - offset_of_handle * no;
fullRot_->rotation = rota;
// }
SbVec3f t(0, 0, 0);
jDrag_->translation.setValue(t);
jDrag_->rotation.setValue(SbRotation::identity());
}
示例8: distribute_shade
bool RayTracer::depth_of_field(int i, int j, SbVec3f *position, SbVec3f *color){
SbVec3f tempColor;
float R = DISK_SIZE;
float du;
float dv;
bool should_color ;
int number_of_jitter_positions = NUMBER_OF_CAMERAS;
if(depth_of_field_on == 0) {
should_color = distribute_shade(i,j, position, color);
}
else{
for(int k =0; k< number_of_jitter_positions ; k++){
SbVec3f camera_position = *position;
du = get_random_number();
dv = get_random_number();
camera_position = camera_position + (du * R * pixel_width * this->u) + (dv * pixel_height * R * this->v);
tempColor.setValue(0.0,0.0,0.0);
should_color = distribute_shade(i, j, &camera_position,&tempColor);
*color = *color + tempColor;
}
*color = *color/number_of_jitter_positions ;
}
return should_color;
}
示例9: Rchain_hand
int QilexDoc::doc_new_kinematic_hand(ct_new_kinematic_chain *data)
{
int error = 0;
int tipus = 0;
void * buffer ; //char *buffer;
char *buftemp = (char*)malloc(1024);
SoOutput out;
size_t sizeModel = 0;
SoSeparator *kinechain = new SoSeparator;
SoSeparator *kinetest = new SoSeparator;
Rchain_hand *kineengine = new Rchain_hand();
SoTransform *pos_rot = new SoTransform;
SbVec3f joinax;
joinax.setValue(SbVec3f(data->x,data->y,data->z));
pos_rot->translation.setValue(joinax);
pos_rot->rotation.setValue(SbVec3f(data->axeX, data->axeY, data->axeZ), (float) rad((double) data->angle));
kinechain = readFile(data->QsModelFile.latin1(), tipus);
if (kinechain == NULL) // no object read
{ return 1; }
else // ok, there's no object with the same name
{
error = kineengine->init_dat(data->QsDatFile.latin1()); //
if (error == 0)
{
kinechain->ref();
kinetest = (SoSeparator*)SoNode::getByName(data->QsName.latin1());
if (kinetest==NULL)
{
//we need to put it in a buffer to write the xml file
// if is Ok
SoOutput out;
out.setBuffer(buftemp, 1024, reallocCB);
SoWriteAction wa1(&out);
wa1.apply(kinechain);
out.getBuffer(buffer, sizeModel);
kinechain->insertChild(pos_rot, 0);
}
error = doc_insert_kinematic_hand(kineengine, kinechain);
}
}
if (error==0)
{
writeXML_kineelement((char *)buffer, sizeModel, tipus, data, kineengine);
}
return error;
}
示例10: COIN_UNUSED_ARG
// Doc in parent
void
SoVRMLCone::computeBBox(SoAction * COIN_UNUSED_ARG(action),
SbBox3f & box,
SbVec3f & center)
{
float r = this->bottomRadius.getValue();
float h = this->height.getValue();
// Allow negative values.
if (h < 0.0f) h = -h;
if (r < 0.0f) r = -r;
float half_height = h * 0.5f;
// The SIDES are present, so just find the middle point and enclose
// everything.
if (this->side.getValue()) {
center.setValue(0.0f, 0.0f, 0.0f);
box.setBounds(SbVec3f(-r, -half_height, -r), SbVec3f(r, half_height, r));
}
// ..no SIDES, but we've still got the bottom (NB: OIV misses this case).
else if (this->bottom.getValue()) {
center.setValue(0.0f, -half_height, 0.0f);
box.setBounds(SbVec3f(-r, -half_height, -r), SbVec3f(r, -half_height, r));
}
// ..no parts present. My confidence is shot -- I feel very small.
else {
center.setValue(0.0f, 0.0f, 0.0f);
box.setBounds(SbVec3f(0.0f, 0.0f, 0.0f), SbVec3f(0.0f, 0.0f, 0.0f));
}
}
示例11: angleBetweenVectors
double
angleBetweenVectors( const SbVec3f& u, const SbVec3f& v )
{
double cosAngle = v.dot( u ) / ( u.length() * v.length() );
return acos( cosAngle );
}
示例12: planeFromMatrix
SbBool XipGeomUtils::mprIntersect(const SbMatrix & m1, const SbMatrix & m2, SbVec3f line[2], float viewportAspectRatio)
{
SbLine objLine, worldLine;
SbVec3f pt1, pt2;
int pc = 0;
SbPlane p1 = planeFromMatrix(m1);
SbPlane p2 = planeFromMatrix(m2);
float width = viewportAspectRatio < 1.f ? 1.f : viewportAspectRatio;
float height = viewportAspectRatio > 1.f ? 1.f : 1.f / viewportAspectRatio;
const SbLine frameLines[4] =
{
SbLine(SbVec3f(-width, -height, 0), SbVec3f(-width, height, 0)),
SbLine(SbVec3f(-width, -height, 0), SbVec3f( width, -height, 0)),
SbLine(SbVec3f( width, height, 0), SbVec3f(-width, height, 0)),
SbLine(SbVec3f( width, height, 0), SbVec3f( width, -height, 0))
};
// First, get intersecting line of the two planes.
if (!planeIntersect(p1, p2, worldLine)) return FALSE;
// Convert intersection line from world into object space before
// testing against frame lines, which are also in object space.
m1.inverse().multLineMatrix(worldLine, objLine);
SbVec3f normal = objLine.getDirection();
normal.normalize();
objLine = SbLine(objLine.getPosition(), objLine.getPosition() + normal);
// Intersect with the 4 lines of frame.
for (int i = 0; i < 4; i++)
{
//if (objLine.getClosestPoints(frameLines[i], pt1, pt2))
//{
// // Valid intersection point. Convert back to world space.
// m1.multVecMatrix(pt1, pt2);
// line[pc++] = pt2;
// if (pc > 1) break;
//}
if ((1.0f - abs(objLine.getDirection().dot(frameLines[i].getDirection()))) > 0.1f)
{
if (objLine.getClosestPoints(frameLines[i], pt1, pt2))
{
// Valid intersection point. Convert back to world space.
m1.multVecMatrix(pt1, pt2);
line[pc++] = pt2;
if (pc > 1) break;
}
}
}
return (pc == 2);
}
示例13: povViewCamera
/// return the camera definition of the active view
static PyObject *
povViewCamera(PyObject *self, PyObject *args)
{
// no arguments
if (!PyArg_ParseTuple(args, ""))
return NULL;
PY_TRY {
std::string out;
const char* ppReturn=0;
Gui::Application::Instance->sendMsgToActiveView("GetCamera",&ppReturn);
SoNode* rootNode;
SoInput in;
in.setBuffer((void*)ppReturn,std::strlen(ppReturn));
SoDB::read(&in,rootNode);
if (!rootNode || !rootNode->getTypeId().isDerivedFrom(SoCamera::getClassTypeId()))
throw Base::Exception("CmdRaytracingWriteCamera::activated(): Could not read "
"camera information from ASCII stream....\n");
// root-node returned from SoDB::readAll() has initial zero
// ref-count, so reference it before we start using it to
// avoid premature destruction.
SoCamera * Cam = static_cast<SoCamera*>(rootNode);
Cam->ref();
SbRotation camrot = Cam->orientation.getValue();
SbVec3f upvec(0, 1, 0); // init to default up vector
camrot.multVec(upvec, upvec);
SbVec3f lookat(0, 0, -1); // init to default view direction vector
camrot.multVec(lookat, lookat);
SbVec3f pos = Cam->position.getValue();
float Dist = Cam->focalDistance.getValue();
// making gp out of the Coin stuff
gp_Vec gpPos(pos.getValue()[0],pos.getValue()[1],pos.getValue()[2]);
gp_Vec gpDir(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]);
lookat *= Dist;
lookat += pos;
gp_Vec gpLookAt(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]);
gp_Vec gpUp(upvec.getValue()[0],upvec.getValue()[1],upvec.getValue()[2]);
// getting image format
ParameterGrp::handle hGrp = App::GetApplication().GetParameterGroupByPath("User parameter:BaseApp/Preferences/Mod/Raytracing");
int width = hGrp->GetInt("OutputWidth", 800);
int height = hGrp->GetInt("OutputHeight", 600);
// call the write method of PovTools....
out = PovTools::getCamera(CamDef(gpPos,gpDir,gpLookAt,gpUp),width,height);
return Py::new_reference_to(Py::String(out));
} PY_CATCH;
}
示例14: calcUpVector
/*! Rotates the upVector by the current upVector angle
* \param lookDir
* \param plump
*/
SbVec3f kCamera::calcUpVector(const SbVec3f lookDir, const SbVec3f plump)
{
SbVec3f upVec;
upVec = calcPerfectUpVector(lookDir, plump);
upVec.normalize();
rotateVector(upVec,lookDir,currentUpVecAngle);
upVec.normalize();
return upVec;
}
示例15: storeTriangle
void SoColorShape::storeTriangle(const SbVec3f& point1, const SbVec3f& point2, const SbVec3f& point3,
const SbVec3f& normal1, const SbVec3f& normal2, const SbVec3f& normal3,
const int& colorIndex1)
{
// liegt das Dreieck komplett draußen, wenn ja, nicht speichern!
if (colorIndex1 != 0) {
// Schwerpunkt und Position im Array bestimmen
SbVec3f barycenter;
float baryX, baryY, baryZ;
barycenter = point1 - _offSet;
barycenter += point2 - _offSet;
barycenter += point3 - _offSet;
// durch 3 weil bestehend aus 3 Vektoren
barycenter = barycenter / 3.0;
// durch HASH_PARTITION weil aufteilen
barycenter = barycenter / float(HASH_PARTITION);
barycenter.getValue(baryX, baryY, baryZ);
int arrayPosition;
arrayPosition = ((int)fabs(baryX)) +
((int)fabs(baryY)) * _extentX +
((int)fabs(baryZ)) * _extentX * _extentY;
// Eckpunkte abspeichern und Indices besorgen
const Vertex *vertex1, *vertex2, *vertex3;
vertex1 = insertVertex(point1, normal1, colorIndex1, arrayPosition);
vertex2 = insertVertex(point2, normal2, colorIndex1, arrayPosition);
vertex3 = insertVertex(point3, normal3, colorIndex1, arrayPosition);
// Kanten finden bzw. anlegen
Edge *edge1, *edge2, *edge3;
edge3 = generateEdge(vertex1, vertex2, arrayPosition);
edge1 = generateEdge(vertex2, vertex3, arrayPosition);
edge2 = generateEdge(vertex3, vertex1, arrayPosition);
// Dreieck ablegen
Triangle* tri = new Triangle;
tri->vertex1 = vertex1;
tri->vertex2 = vertex2;
tri->vertex3 = vertex3;
tri->edge1 = edge1;
tri->edge2 = edge2;
tri->edge3 = edge3;
_triangleSet[arrayPosition].insert(tri);
// Den Kanten die Dreiecke zuweisen
if (edge1->triangle1 == 0) edge1->triangle1 = tri;
else edge1->triangle2 = tri;
if (edge2->triangle1 == 0) edge2->triangle1 = tri;
else edge2->triangle2 = tri;
if (edge3->triangle1 == 0) edge3->triangle1 = tri;
else edge3->triangle2 = tri;
}
}