本文整理汇总了Java中processing.core.PMatrix3D.mult方法的典型用法代码示例。如果您正苦于以下问题:Java PMatrix3D.mult方法的具体用法?Java PMatrix3D.mult怎么用?Java PMatrix3D.mult使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类processing.core.PMatrix3D
的用法示例。
在下文中一共展示了PMatrix3D.mult方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: DwFoldingTile
import processing.core.PMatrix3D; //导入方法依赖的package包/类
public DwFoldingTile(TileData DEF, DwIndexedFaceSet ifs, int verts_idx, int faces_idx, PMatrix3D mat){
this.DEF = DEF;
float[][] verts = new float[DEF.VERTS_COUNT][3];
for(int i = 0; i < DEF.VERTS_COUNT; i++){
mat.mult(DEF.VERTS[i], verts[i]);
}
int[][] faces = new int[DEF.FACES_COUNT][3];
for(int i = 0; i < DEF.FACES_COUNT; i++){
faces[i][0] = DEF.FACES[i][0] + verts_idx;
faces[i][1] = DEF.FACES[i][1] + verts_idx;
faces[i][2] = DEF.FACES[i][2] + verts_idx;
}
this.faces = faces;
System.arraycopy(verts, 0, ifs.verts, verts_idx, DEF.VERTS_COUNT);
System.arraycopy(faces, 0, ifs.faces, faces_idx, DEF.FACES_COUNT);
}
示例2: getCoordFrom
import processing.core.PMatrix3D; //导入方法依赖的package包/类
public PVector getCoordFrom(PaperScreen paperScreen, PVector point) {
// get a copy
PMatrix3D thisLocationInv = this.getLocation().get();
thisLocationInv.invert();
PMatrix3D otherLocation = paperScreen.getLocation();
PVector cameraViewOfPoint = new PVector();
otherLocation.mult(point, cameraViewOfPoint);
PVector thisViewOfPoint = new PVector();
thisLocationInv.mult(cameraViewOfPoint, thisViewOfPoint);
if (Float.isNaN(thisViewOfPoint.x)) {
return INVALID_VECTOR;
}
return thisViewOfPoint;
}
示例3: computeScreenPaperIntersection
import processing.core.PMatrix3D; //导入方法依赖的package包/类
/**
* Computes the intersection of the corners of the projector viewed by a
* camera
*
* @param projector
* @param planeCalibCam
* @param kinectCameraExtrinsics
* @return
*/
public static HomographyCalibration computeScreenPaperIntersection(ProjectorDisplay projector, PlaneCalibration planeCalibCam, PMatrix3D kinectCameraExtrinsics) {
// generate coordinates...
float step = 0.5f;
int nbPoints = (int) ((1 + 1.0F / step) * (1 + 1.0F / step));
HomographyCreator homographyCreator = new HomographyCreator(3, 2, nbPoints);
// Creates 3D points on the corner of the screen
for (float i = 0; i <= 1.0; i += step) {
for (float j = 0; j <= 1.0; j += step) {
PVector screenPoint = new PVector(i, j);
PVector kinectPoint = new PVector();
PVector inter = projector.getProjectedPointOnPlane(planeCalibCam, i, j);
if (inter == null) {
return HomographyCalibration.INVALID;
}
// get the point from the Kinect's point of view.
kinectCameraExtrinsics.mult(inter, kinectPoint);
homographyCreator.addPoint(kinectPoint, screenPoint);
}
}
return homographyCreator.getHomography();
}
示例4: getProjectedPointOnPlane
import processing.core.PMatrix3D; //导入方法依赖的package包/类
/**
* Computes the 3D coordinates of a projected pixel in the tracking camera
* coordinate system.
*
* @param planeCalibCam projection plane
* @param px x axis in pixel coordinates
* @param py x axis in pixel coordinates
* @return
*/
public PVector getProjectedPointOnPlane(PlaneCalibration planeCalibCam, float px, float py) {
// Create ray from the projector (origin / viewed pixel)
// Intersect this ray with the piece of paper.
// Compute the Two points for the ray
PVector originP = new PVector(0, 0, 0);
PVector viewedPtP = getProjectiveDeviceP().pixelToWorldNormalized(px, py);
// Pass it to the camera point of view (origin)
PMatrix3D proCamExtrinsics = getExtrinsicsInv();
PVector originC = new PVector();
PVector viewedPtC = new PVector();
proCamExtrinsics.mult(originP, originC);
proCamExtrinsics.mult(viewedPtP, viewedPtC);
// Second argument is a direction
viewedPtC.sub(originC);
Ray3D ray = new Ray3D(new Vec3D(originC.x,
originC.y,
originC.z),
new Vec3D(viewedPtC.x,
viewedPtC.y,
viewedPtC.z));
// Intersect ray with Plane
ReadonlyVec3D inter = planeCalibCam.getPlane().getIntersectionWithRay(ray);
if (inter == null) {
return null;
}
return new PVector(inter.x(), inter.y(), inter.z());
}
示例5: projectPointer
import processing.core.PMatrix3D; //导入方法依赖的package包/类
public PVector projectPointer(Screen screen, float x, float y) {
PMatrix3D screenMat = screen.getLocation(new PMatrix3D());
screenMat.invert();
PVector transformed = new PVector();
screenMat.mult(new PVector(x * drawingSizeX, y * drawingSizeY), transformed);
return transformed;
}
示例6: getBoardLocation
import processing.core.PMatrix3D; //导入方法依赖的package包/类
public PVector getBoardLocation(Camera camera, ARDisplay display) {
int id = getId(camera);
PVector v = getPositionVector(id);
// Apply extrinsics if required.
if (display.hasExtrinsics()) {
PMatrix3D extr = display.getExtrinsics();
PVector v2 = new PVector();
extr.mult(v, v2);
v = v2;
}
PVector px = display.getProjectiveDeviceP().worldToPixel(v, true);
return px;
}
示例7: projectPointer3DVec3D
import processing.core.PMatrix3D; //导入方法依赖的package包/类
protected ReadonlyVec3D projectPointer3DVec3D(Screen screen, float px, float py) {
// Create ray from the projector (origin / viewed pixel)
// Intersect this ray with the piece of paper.
// Compute the Two points for the ray
PVector originP = new PVector(0, 0, 0);
PVector viewedPtP = projectiveDeviceP.pixelToWorldNormP((int) (px * frameWidth), (int) (py * frameHeight));
// Pass it to the camera point of view (origin)
PMatrix3D extr = extrinsicsInv;
PVector originC = new PVector();
PVector viewedPtC = new PVector();
extr.mult(originP, originC);
extr.mult(viewedPtP, viewedPtC);
// Second argument is a direction
viewedPtC.sub(originC);
Ray3D ray
= new Ray3D(new Vec3D(originC.x,
originC.y,
originC.z),
new Vec3D(viewedPtC.x,
viewedPtC.y,
viewedPtC.z));
// Intersect ray with Plane
// TODO: Do no use screen.getPlane !!!
ReadonlyVec3D inter = screen.getPlane().getIntersectionWithRay(ray);
// It may not intersect.
if (inter == null) {
return null;
} else {
return inter;
}
// Check the error of the ray casting -- Debug only
// PVector inter1P = new PVector();
// projExtrinsicsP3D.mult(interP, inter1P);
// PVector px2 = projectiveDeviceP.worldToPixel(inter1P, false);
// px2.sub(px * frameWidth, py * frameHeight, 0);
// System.out.println("Error " + px2.mag());
}
示例8: initDraw
import processing.core.PMatrix3D; //导入方法依赖的package包/类
public void initDraw(Camera cam, PVector userPos, float nearPlane, float farPlane, boolean isAnaglyph, boolean isLeft, boolean isOnly) {
PGraphicsOpenGL graphics = getGraphics();
if (initPosM == null) {
this.isOpenGL = true;
// Transformation Camera -> Marker
initPosM = this.getLocation(cam);
initPosM.translate(this.getDrawSizeX() / 2, this.getDrawSizeY() / 2);
// All is relative to the paper's center. not the corner.
initPosM.scale(-1, 1, -1);
}
// get the current transformation...
PMatrix3D newPos = this.getLocation(cam);
newPos.translate(this.getDrawSizeX() / 2, this.getDrawSizeY() / 2);
newPos.scale(-1, 1, -1);
newPos.invert();
newPos.apply(initPosM);
PVector user = new PVector();
if (isAnaglyph && isLeft) {
userPos.add(-halfEyeDist * 2, 0, 0);
}
newPos.mult(userPos, user);
PVector paperCameraPos = user;
// Camera must look perpendicular to the screen.
graphics.camera(paperCameraPos.x, paperCameraPos.y, paperCameraPos.z,
paperCameraPos.x, paperCameraPos.y, 0,
0, 1, 0);
// http://www.gamedev.net/topic/597564-view-and-projection-matrices-for-vr-window-using-head-tracking/
float nearFactor = nearPlane / paperCameraPos.z;
float left = nearFactor * (-size.x / 2f - paperCameraPos.x);
float right = nearFactor * (size.x / 2f - paperCameraPos.x);
float top = nearFactor * (size.y / 2f - paperCameraPos.y);
float bottom = nearFactor * (-size.y / 2f - paperCameraPos.y);
graphics.frustum(left, right, bottom, top, nearPlane, farPlane);
graphics.projection.m11 = -graphics.projection.m11;
// No detection?
PMatrix3D transformation = this.getLocation(cam);
if (transformation.m03 == 0 && transformation.m13 == 0 && transformation.m23 == 0) {
resetPos();
}
}