本文整理汇总了Python中mathutils.Vector.to_4d方法的典型用法代码示例。如果您正苦于以下问题:Python Vector.to_4d方法的具体用法?Python Vector.to_4d怎么用?Python Vector.to_4d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mathutils.Vector
的用法示例。
在下文中一共展示了Vector.to_4d方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extrusion_to_matrix
# 需要导入模块: from mathutils import Vector [as 别名]
# 或者: from mathutils.Vector import to_4d [as 别名]
def extrusion_to_matrix(entity):
"""
Converts an extrusion vector to a rotation matrix that denotes the transformation between world coordinate system
and the entity's own coordinate system (described by the extrusion vector).
"""
def arbitrary_x_axis(extrusion_normal):
world_y = Vector((0, 1, 0))
world_z = Vector((0, 0, 1))
if abs(extrusion_normal[0]) < 1 / 64 and abs(extrusion_normal[1]) < 1 / 64:
a_x = world_y.cross(extrusion_normal)
else:
a_x = world_z.cross(extrusion_normal)
a_x.normalize()
return a_x, extrusion_normal.cross(a_x)
az = Vector(entity.extrusion)
ax, ay = arbitrary_x_axis(az)
ax4 = ax.to_4d()
ay4 = ay.to_4d()
az4 = az.to_4d()
ax4[3] = 0
ay4[3] = 0
az4[3] = 0
translation = Vector((0, 0, 0, 1))
if hasattr(entity, "elevation"):
if type(entity.elevation) is tuple:
translation = Vector(entity.elevation).to_4d()
else:
translation = (az * entity.elevation).to_4d()
return Matrix((ax4, ay4, az4, translation)).transposed()
示例2: scan_advanced
# 需要导入模块: from mathutils import Vector [as 别名]
# 或者: from mathutils.Vector import to_4d [as 别名]
#.........这里部分代码省略.........
abs(camera_returns[i][3]) <= max_distance and
abs(camera_returns[i][3]) >= min_distance):
"""The ray hit the projected ray, so this is a valid measurement"""
projector_point = get_uv_from_idx(projector_idx, res_x,res_y)
camera_x = get_pixel_from_world(camera_rays[idx*3],camera_rays[idx*3+2],
flength/pixel_width) + random.gauss(noise_mu, noise_sigma)
camera_y = get_pixel_from_world(camera_rays[idx*3+1],camera_rays[idx*3+2],
flength/pixel_width)
""" Kinect calculates the disparity with an accuracy of 1/8 pixel"""
camera_x_quantized = round(camera_x*8.0)/8.0
#I don't know if this accurately represents the kinect
camera_y_quantized = round(camera_y*8.0)/8.0
disparity_quantized = camera_x_quantized + projector_point[0]
if projector_idx >= 0:
all_quantized_disparities[projector_idx] = disparity_quantized
processed_disparities = numpy.empty(res_x*res_y)
fast_9x9_window(all_quantized_disparities, res_x, res_y, processed_disparities, noise_smooth, noise_scale)
"""We reuse the vector objects to spare us the object creation every
time
"""
v = Vector([0.0,0.0,0.0])
vn = Vector([0.0,0.0,0.0])
"""Check if the rays of the camera meet with the rays of the projector and
add them as valid returns if they do"""
image_idx = 0
for i in range(len(camera_returns)):
idx = camera_returns[i][-1]
projector_idx = projector_ray_index[idx] # Get the index of the original ray
camera_x,camera_y = get_uv_from_idx(projector_idx, res_x,res_y)
if projector_idx >= 0:
disparity_quantized = processed_disparities[projector_idx]
else:
disparity_quantized = INVALID_DISPARITY
if disparity_quantized < INVALID_DISPARITY and disparity_quantized != 0.0:
disparity_quantized = -disparity_quantized
Z_quantized = (flength*(baseline.x))/(disparity_quantized*pixel_width)
X_quantized = baseline.x+Z_quantized*camera_x*pixel_width/flength
Y_quantized = baseline.y+Z_quantized*camera_y*pixel_width/flength
Z_quantized = -(Z_quantized+baseline.z)
v.xyz=[x_multiplier*(returns[idx][1]+baseline.x),\
y_multiplier*(returns[idx][2]+baseline.y),\
z_multiplier*(returns[idx][3]+baseline.z)]
vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)
vt = (world_transformation * v.to_4d()).xyz
verts.append ( vt )
vn.xyz = [x_multiplier*X_quantized,y_multiplier*Y_quantized,z_multiplier*Z_quantized]
vector_length_noise = vn.magnitude
#[email protected]: prevent object creation here too
v_noise = (world_transformation * vn.to_4d()).xyz
verts_noise.append( v_noise )
kinect_image[projector_idx] = [ray_info[projector_idx][2],
0.0, 0.0, -returns[idx][3], -Z_quantized, vt[0],
vt[1], vt[2], v_noise[0], v_noise[1], v_noise[2],
returns[idx][4], returns[idx][5][0], returns[idx][5][1],
returns[idx][5][2],projector_idx]
image_idx += 1
else:
"""Occlusion"""
pass
for e in kinect_image:
evd_storage.addEntry(timestamp = e[0], yaw = e[1], pitch=e[2],
distance=e[3], distance_noise=e[4], x=e[5], y=e[6], z=e[7],
x_noise=e[8], y_noise=e[9], z_noise=e[10], object_id=e[11],
color=[e[12],e[13],e[14]], idx=e[15])
if evd_file:
evd_storage.appendEvdFile()
if add_blender_mesh:
mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)
if add_noisy_blender_mesh:
mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation)
bpy.context.scene.update()
end_time = time.time()
scan_time = end_time-start_time
print ("Elapsed time: %.3f"%(scan_time))
return True, 0.0, scan_time
示例3: distance
# 需要导入模块: from mathutils import Vector [as 别名]
# 或者: from mathutils.Vector import to_4d [as 别名]
def distance(self, other:Vector):
"""平面とVectorの距離を返す"""
return self.dot(other.to_4d())
示例4: __new__
# 需要导入模块: from mathutils import Vector [as 别名]
# 或者: from mathutils.Vector import to_4d [as 别名]
def __new__(cls, location=Vector(), normal=ZAXIS, rotation=Quaternion()):
loc = Vector(location)
nor = Vector(normal).normalized()
vector = nor.to_4d()
vector[3] = -nor.dot(loc)
return Vector.__new__(cls, vector)