本文整理汇总了C++中bl::Object::name方法的典型用法代码示例。如果您正苦于以下问题:C++ Object::name方法的具体用法?C++ Object::name怎么用?C++ Object::name使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bl::Object
的用法示例。
在下文中一共展示了Object::name方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sync_camera_motion
void BlenderSync::sync_camera_motion(BL::RenderSettings& b_render,
BL::Object& b_ob,
int width, int height,
float motion_time)
{
if(!b_ob)
return;
Camera *cam = scene->camera;
BL::Array<float, 16> b_ob_matrix;
b_engine.camera_model_matrix(b_ob, b_ob_matrix);
Transform tfm = get_transform(b_ob_matrix);
tfm = blender_camera_matrix(tfm, cam->type, cam->panorama_type);
if(tfm != cam->matrix) {
VLOG(1) << "Camera " << b_ob.name() << " motion detected.";
if(motion_time == -1.0f) {
cam->motion.pre = tfm;
cam->use_motion = true;
}
else if(motion_time == 1.0f) {
cam->motion.post = tfm;
cam->use_motion = true;
}
}
if(cam->type == CAMERA_PERSPECTIVE) {
BlenderCamera bcam;
float aspectratio, sensor_size;
blender_camera_init(&bcam, b_render);
blender_camera_from_object(&bcam, b_engine, b_ob);
blender_camera_viewplane(&bcam,
width, height,
NULL,
&aspectratio,
&sensor_size);
/* TODO(sergey): De-duplicate calculation with camera sync. */
float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
if(fov != cam->fov) {
VLOG(1) << "Camera " << b_ob.name() << " FOV change detected.";
if(motion_time == -1.0f) {
cam->fov_pre = fov;
cam->use_perspective_motion = true;
}
else if(motion_time == 1.0f) {
cam->fov_post = fov;
cam->use_perspective_motion = true;
}
}
}
}
示例2: bake
void BlenderSession::bake(BL::Object b_object, const string& pass_type, BL::BakePixel pixel_array, int num_pixels, int depth, float result[])
{
ShaderEvalType shader_type = get_shader_type(pass_type);
size_t object_index = ~0;
int tri_offset = 0;
if(shader_type == SHADER_EVAL_UV) {
/* force UV to be available */
Pass::add(PASS_UV, scene->film->passes);
}
if(is_light_pass(shader_type)) {
/* force use_light_pass to be true */
Pass::add(PASS_LIGHT, scene->film->passes);
}
/* create device and update scene */
scene->film->tag_update(scene);
scene->integrator->tag_update(scene);
/* update scene */
sync->sync_camera(b_render, b_engine.camera_override(), width, height);
sync->sync_data(b_v3d, b_engine.camera_override(), &python_thread_state);
/* get buffer parameters */
SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_scene, b_v3d, b_rv3d, scene->camera, width, height);
/* set number of samples */
session->tile_manager.set_samples(session_params.samples);
session->reset(buffer_params, session_params.samples);
session->update_scene();
/* find object index. todo: is arbitrary - copied from mesh_displace.cpp */
for(size_t i = 0; i < scene->objects.size(); i++) {
if(strcmp(scene->objects[i]->name.c_str(), b_object.name().c_str()) == 0) {
object_index = i;
tri_offset = scene->objects[i]->mesh->tri_offset;
break;
}
}
/* when used, non-instanced convention: object = ~object */
int object = ~object_index;
BakeData *bake_data = scene->bake_init(object, tri_offset, num_pixels);
populate_bake_data(bake_data, pixel_array, num_pixels);
scene->bake(shader_type, bake_data, result);
/* free all memory used (host and device), so we wouldn't leave render
* engine with extra memory allocated
*/
session->device_free();
delete sync;
sync = NULL;
}
示例3: sync_object
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint visibility)
{
/* light is handled separately */
if(object_is_light(b_ob)) {
sync_light(b_parent, b_index, b_ob, tfm);
return;
}
/* only interested in object that we can create meshes from */
if(!object_is_mesh(b_ob))
return;
/* test if we need to sync */
ObjectKey key(b_parent, b_index, b_ob);
Object *object;
bool object_updated = false;
if(object_map.sync(&object, b_ob, b_parent, key))
object_updated = true;
/* mesh sync */
object->mesh = sync_mesh(b_ob, object_updated);
/* object sync */
if(object_updated || (object->mesh && object->mesh->need_update)) {
object->name = b_ob.name().c_str();
object->tfm = tfm;
object->visibility = object_ray_visibility(b_ob) & visibility;
if(b_parent.ptr.data != b_ob.ptr.data)
object->visibility &= object_ray_visibility(b_parent);
object->tag_update(scene);
}
}
示例4: sync_object
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag)
{
/* light is handled separately */
if(object_is_light(b_ob)) {
sync_light(b_parent, b_index, b_ob, tfm);
return;
}
/* only interested in object that we can create meshes from */
if(!object_is_mesh(b_ob))
return;
/* test if we need to sync */
ObjectKey key(b_parent, b_index, b_ob);
Object *object;
bool object_updated = false;
if(object_map.sync(&object, b_ob, b_parent, key))
object_updated = true;
/* holdout? */
bool holdout = (layer_flag & render_layer.holdout_layer) != 0;
/* mesh sync */
object->mesh = sync_mesh(b_ob, holdout, object_updated);
/* object sync */
if(object_updated || (object->mesh && object->mesh->need_update)) {
object->name = b_ob.name().c_str();
object->pass_id = b_ob.pass_index();
object->tfm = tfm;
/* visibility flags for both parent */
object->visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL;
if(b_parent.ptr.data != b_ob.ptr.data)
object->visibility &= object_ray_visibility(b_parent);
/* camera flag is not actually used, instead is tested
against render layer flags */
if(object->visibility & PATH_RAY_CAMERA) {
object->visibility |= layer_flag << PATH_RAY_LAYER_SHIFT;
object->visibility &= ~PATH_RAY_CAMERA;
}
object->tag_update(scene);
}
}
示例5: sync_camera_motion
void BlenderSync::sync_camera_motion(BL::Object b_ob, float motion_time)
{
Camera *cam = scene->camera;
BL::Array<float, 16> b_ob_matrix;
b_engine.camera_model_matrix(b_ob, b_ob_matrix);
Transform tfm = get_transform(b_ob_matrix);
tfm = blender_camera_matrix(tfm, cam->type);
if(tfm != cam->matrix) {
VLOG(1) << "Camera " << b_ob.name() << " motion detected.";
if(motion_time == -1.0f) {
cam->motion.pre = tfm;
cam->use_motion = true;
}
else if(motion_time == 1.0f) {
cam->motion.post = tfm;
cam->use_motion = true;
}
}
}
示例6: sync_mesh_motion
//.........这里部分代码省略.........
if (attr_mP) {
Attribute *attr_mN = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_NORMAL);
Attribute *attr_N = mesh->attributes.find(ATTR_STD_VERTEX_NORMAL);
float3 *P = &mesh->verts[0];
float3 *N = (attr_N) ? attr_N->data_float3() : NULL;
memcpy(attr_mP->data_float3() + motion_step * numverts, P, sizeof(float3) * numverts);
if (attr_mN)
memcpy(attr_mN->data_float3() + motion_step * numverts, N, sizeof(float3) * numverts);
}
}
if (numkeys) {
/* curves */
Attribute *attr_mP = mesh->curve_attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mP) {
float3 *keys = &mesh->curve_keys[0];
memcpy(attr_mP->data_float3() + motion_step * numkeys, keys, sizeof(float3) * numkeys);
}
}
return;
}
/* TODO(sergey): Perform preliminary check for number of verticies. */
if (numverts) {
/* Find attributes. */
Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
Attribute *attr_mN = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_NORMAL);
Attribute *attr_N = mesh->attributes.find(ATTR_STD_VERTEX_NORMAL);
bool new_attribute = false;
/* Add new attributes if they don't exist already. */
if (!attr_mP) {
attr_mP = mesh->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_N)
attr_mN = mesh->attributes.add(ATTR_STD_MOTION_VERTEX_NORMAL);
new_attribute = true;
}
/* Load vertex data from mesh. */
float3 *mP = attr_mP->data_float3() + motion_step * numverts;
float3 *mN = (attr_mN) ? attr_mN->data_float3() + motion_step * numverts : NULL;
/* NOTE: We don't copy more that existing amount of vertices to prevent
* possible memory corruption.
*/
BL::Mesh::vertices_iterator v;
int i = 0;
for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end() && i < numverts; ++v, ++i) {
mP[i] = get_float3(v->co());
if (mN)
mN[i] = get_float3(v->normal());
}
if (new_attribute) {
/* In case of new attribute, we verify if there really was any motion. */
if (b_mesh.vertices.length() != numverts ||
memcmp(mP, &mesh->verts[0], sizeof(float3) * numverts) == 0) {
/* no motion, remove attributes again */
if (b_mesh.vertices.length() != numverts) {
VLOG(1) << "Topology differs, disabling motion blur for object " << b_ob.name();
}
else {
VLOG(1) << "No actual deformation motion for object " << b_ob.name();
}
mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mN)
mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_NORMAL);
}
else if (motion_step > 0) {
VLOG(1) << "Filling deformation motion for object " << b_ob.name();
/* motion, fill up previous steps that we might have skipped because
* they had no motion, but we need them anyway now */
float3 *P = &mesh->verts[0];
float3 *N = (attr_N) ? attr_N->data_float3() : NULL;
for (int step = 0; step < motion_step; step++) {
memcpy(attr_mP->data_float3() + step * numverts, P, sizeof(float3) * numverts);
if (attr_mN)
memcpy(attr_mN->data_float3() + step * numverts, N, sizeof(float3) * numverts);
}
}
}
else {
if (b_mesh.vertices.length() != numverts) {
VLOG(1) << "Topology differs, discarding motion blur for object " << b_ob.name()
<< " at time " << motion_step;
memcpy(mP, &mesh->verts[0], sizeof(float3) * numverts);
if (mN != NULL) {
memcpy(mN, attr_N->data_float3(), sizeof(float3) * numverts);
}
}
}
}
/* hair motion */
if (numkeys)
sync_curves(mesh, b_mesh, b_ob, true, motion_step);
/* free derived mesh */
free_object_to_mesh(b_data, b_ob, b_mesh);
}
示例7: sync_light
void BlenderSync::sync_light(BL::Object& b_parent,
int persistent_id[OBJECT_PERSISTENT_ID_SIZE],
BL::Object& b_ob,
BL::DupliObject& b_dupli_ob,
Transform& tfm,
bool *use_portal)
{
/* test if we need to sync */
Light *light;
ObjectKey key(b_parent, persistent_id, b_ob);
if(!light_map.sync(&light, b_ob, b_parent, key)) {
if(light->is_portal)
*use_portal = true;
return;
}
BL::Lamp b_lamp(b_ob.data());
/* type */
switch(b_lamp.type()) {
case BL::Lamp::type_POINT: {
BL::PointLamp b_point_lamp(b_lamp);
light->size = b_point_lamp.shadow_soft_size();
light->type = LIGHT_POINT;
break;
}
case BL::Lamp::type_SPOT: {
BL::SpotLamp b_spot_lamp(b_lamp);
light->size = b_spot_lamp.shadow_soft_size();
light->type = LIGHT_SPOT;
light->spot_angle = b_spot_lamp.spot_size();
light->spot_smooth = b_spot_lamp.spot_blend();
break;
}
case BL::Lamp::type_HEMI: {
light->type = LIGHT_DISTANT;
light->size = 0.0f;
break;
}
case BL::Lamp::type_SUN: {
BL::SunLamp b_sun_lamp(b_lamp);
light->size = b_sun_lamp.shadow_soft_size();
light->type = LIGHT_DISTANT;
break;
}
case BL::Lamp::type_AREA: {
BL::AreaLamp b_area_lamp(b_lamp);
light->size = 1.0f;
light->axisu = transform_get_column(&tfm, 0);
light->axisv = transform_get_column(&tfm, 1);
light->sizeu = b_area_lamp.size();
if(b_area_lamp.shape() == BL::AreaLamp::shape_RECTANGLE)
light->sizev = b_area_lamp.size_y();
else
light->sizev = light->sizeu;
light->type = LIGHT_AREA;
break;
}
}
/* location and (inverted!) direction */
light->co = transform_get_column(&tfm, 3);
light->dir = -transform_get_column(&tfm, 2);
light->tfm = tfm;
/* shader */
vector<Shader*> used_shaders;
find_shader(b_lamp, used_shaders, scene->default_light);
light->shader = used_shaders[0];
/* shadow */
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
PointerRNA clamp = RNA_pointer_get(&b_lamp.ptr, "cycles");
light->cast_shadow = get_boolean(clamp, "cast_shadow");
light->use_mis = get_boolean(clamp, "use_multiple_importance_sampling");
int samples = get_int(clamp, "samples");
if(get_boolean(cscene, "use_square_samples"))
light->samples = samples * samples;
else
light->samples = samples;
light->max_bounces = get_int(clamp, "max_bounces");
if(b_dupli_ob) {
light->random_id = b_dupli_ob.random_id();
}
else {
light->random_id = hash_int_2d(hash_string(b_ob.name().c_str()), 0);
}
if(light->type == LIGHT_AREA)
light->is_portal = get_boolean(clamp, "is_portal");
else
light->is_portal = false;
if(light->is_portal)
*use_portal = true;
//.........这里部分代码省略.........
示例8: sync_object
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag, int motion)
{
/* light is handled separately */
if(object_is_light(b_ob)) {
if(!motion)
sync_light(b_parent, b_index, b_ob, tfm);
return;
}
/* only interested in object that we can create meshes from */
if(!object_is_mesh(b_ob))
return;
/* key to lookup object */
ObjectKey key(b_parent, b_index, b_ob);
Object *object;
/* motion vector case */
if(motion) {
object = object_map.find(key);
if(object) {
if(tfm != object->tfm) {
if(motion == -1)
object->motion.pre = tfm;
else
object->motion.post = tfm;
object->use_motion = true;
}
sync_mesh_motion(b_ob, object->mesh, motion);
}
return;
}
/* test if we need to sync */
bool object_updated = false;
if(object_map.sync(&object, b_ob, b_parent, key))
object_updated = true;
bool use_holdout = (layer_flag & render_layer.holdout_layer) != 0;
/* mesh sync */
object->mesh = sync_mesh(b_ob, object_updated);
if(use_holdout != object->use_holdout) {
object->use_holdout = use_holdout;
scene->object_manager->tag_update(scene);
}
/* object sync */
if(object_updated || (object->mesh && object->mesh->need_update)) {
object->name = b_ob.name().c_str();
object->pass_id = b_ob.pass_index();
object->tfm = tfm;
object->motion.pre = tfm;
object->motion.post = tfm;
object->use_motion = false;
/* visibility flags for both parent */
object->visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL;
if(b_parent.ptr.data != b_ob.ptr.data)
object->visibility &= object_ray_visibility(b_parent);
/* camera flag is not actually used, instead is tested
against render layer flags */
if(object->visibility & PATH_RAY_CAMERA) {
object->visibility |= layer_flag << PATH_RAY_LAYER_SHIFT;
object->visibility &= ~PATH_RAY_CAMERA;
}
object->tag_update(scene);
}
}
示例9: key
//.........这里部分代码省略.........
/* test if we need to sync */
bool object_updated = false;
if (object_map.sync(&object, b_ob, b_parent, key))
object_updated = true;
/* mesh sync */
object->mesh = sync_mesh(
b_depsgraph, b_ob, b_ob_instance, object_updated, show_self, show_particles);
/* special case not tracked by object update flags */
/* holdout */
if (use_holdout != object->use_holdout) {
object->use_holdout = use_holdout;
scene->object_manager->tag_update(scene);
object_updated = true;
}
if (visibility != object->visibility) {
object->visibility = visibility;
object_updated = true;
}
bool is_shadow_catcher = get_boolean(cobject, "is_shadow_catcher");
if (is_shadow_catcher != object->is_shadow_catcher) {
object->is_shadow_catcher = is_shadow_catcher;
object_updated = true;
}
/* sync the asset name for Cryptomatte */
BL::Object parent = b_ob.parent();
ustring parent_name;
if (parent) {
while (parent.parent()) {
parent = parent.parent();
}
parent_name = parent.name();
}
else {
parent_name = b_ob.name();
}
if (object->asset_name != parent_name) {
object->asset_name = parent_name;
object_updated = true;
}
/* object sync
* transform comparison should not be needed, but duplis don't work perfect
* in the depsgraph and may not signal changes, so this is a workaround */
if (object_updated || (object->mesh && object->mesh->need_update) || tfm != object->tfm) {
object->name = b_ob.name().c_str();
object->pass_id = b_ob.pass_index();
object->tfm = tfm;
object->motion.clear();
/* motion blur */
Scene::MotionType need_motion = scene->need_motion();
if (need_motion != Scene::MOTION_NONE && object->mesh) {
Mesh *mesh = object->mesh;
mesh->use_motion_blur = false;
mesh->motion_steps = 0;
uint motion_steps;
示例10: bake
void BlenderSession::bake(BL::Object b_object, const string& pass_type, const int object_id, BL::BakePixel pixel_array, const size_t num_pixels, const int /*depth*/, float result[])
{
ShaderEvalType shader_type = get_shader_type(pass_type);
size_t object_index = OBJECT_NONE;
int tri_offset = 0;
/* Set baking flag in advance, so kernel loading can check if we need
* any baking capabilities.
*/
scene->bake_manager->set_baking(true);
/* ensure kernels are loaded before we do any scene updates */
session->load_kernels();
if(session->progress.get_cancel())
return;
if(shader_type == SHADER_EVAL_UV) {
/* force UV to be available */
Pass::add(PASS_UV, scene->film->passes);
}
if(BakeManager::is_light_pass(shader_type)) {
/* force use_light_pass to be true */
Pass::add(PASS_LIGHT, scene->film->passes);
}
/* create device and update scene */
scene->film->tag_update(scene);
scene->integrator->tag_update(scene);
/* update scene */
sync->sync_camera(b_render, b_engine.camera_override(), width, height);
sync->sync_data(b_render,
b_v3d,
b_engine.camera_override(),
width, height,
&python_thread_state,
b_rlay_name.c_str());
/* get buffer parameters */
SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);
scene->bake_manager->set_shader_limit((size_t)b_engine.tile_x(), (size_t)b_engine.tile_y());
/* set number of samples */
session->tile_manager.set_samples(session_params.samples);
session->reset(buffer_params, session_params.samples);
session->update_scene();
/* find object index. todo: is arbitrary - copied from mesh_displace.cpp */
for(size_t i = 0; i < scene->objects.size(); i++) {
if(strcmp(scene->objects[i]->name.c_str(), b_object.name().c_str()) == 0) {
object_index = i;
tri_offset = scene->objects[i]->mesh->tri_offset;
break;
}
}
/* when used, non-instanced convention: object = ~object */
int object = ~object_index;
BakeData *bake_data = scene->bake_manager->init(object, tri_offset, num_pixels);
populate_bake_data(bake_data, object_id, pixel_array, num_pixels);
/* set number of samples */
session->tile_manager.set_samples(session_params.samples);
session->reset(buffer_params, session_params.samples);
session->update_scene();
session->progress.set_update_callback(function_bind(&BlenderSession::update_bake_progress, this));
scene->bake_manager->bake(scene->device, &scene->dscene, scene, session->progress, shader_type, bake_data, result);
/* free all memory used (host and device), so we wouldn't leave render
* engine with extra memory allocated
*/
session->device_free();
delete sync;
sync = NULL;
}