本文整理汇总了C++中additional_buffer_smart_ptr::at方法的典型用法代码示例。如果您正苦于以下问题:C++ additional_buffer_smart_ptr::at方法的具体用法?C++ additional_buffer_smart_ptr::at怎么用?C++ additional_buffer_smart_ptr::at使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类additional_buffer_smart_ptr
的用法示例。
在下文中一共展示了additional_buffer_smart_ptr::at方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test
void dropout_layer_updater_plain::test(
const_additional_buffer_smart_ptr input_buffer,
additional_buffer_smart_ptr output_buffer,
std::vector<additional_buffer_smart_ptr>& additional_buffers,
plain_running_configuration_const_smart_ptr plain_config,
const_layer_smart_ptr layer_schema,
const_layer_data_smart_ptr data,
const_layer_data_custom_smart_ptr data_custom,
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific,
unsigned int updater_count,
unsigned int offset_input_entry_id,
bool force_deterministic) const
{
if (offset_input_entry_id > 0)
throw neural_network_exception("dropout_layer_updater_plain is not able to run using offset");
if (force_deterministic)
{
memcpy(&(output_buffer->at(0)), &(input_buffer->at(0)), input_configuration_specific.get_neuron_count() * updater_count * sizeof(float));
}
else
{
const std::vector<float>::const_iterator in_it_global = input_buffer->begin();
const std::vector<float>::iterator out_it_global = output_buffer->begin();
unsigned char * keep_elem_ptr = reinterpret_cast<unsigned char *>(&(additional_buffers[0]->at(0)));
nnforge_shared_ptr<const dropout_layer> layer_derived = nnforge_dynamic_pointer_cast<const dropout_layer>(layer_schema);
const float dropout_rate = layer_derived->dropout_rate;
const float keep_rate = 1.0F - dropout_rate;
const float mult = 1.0F / keep_rate;
const int total_workload = input_configuration_specific.get_neuron_count() * updater_count;
nnforge_uniform_real_distribution<float> dist(0.0F, 1.0F);
for(int i = 0; i < total_workload; ++i)
keep_elem_ptr[i] = (dist(gen) <= keep_rate ? (unsigned char)1 : (unsigned char)0);
#pragma omp parallel default(none) num_threads(plain_config->openmp_thread_count) shared(keep_elem_ptr)
{
#pragma omp for schedule(guided)
for(int workload_id = 0; workload_id < total_workload; ++workload_id)
{
int elem_id = workload_id;
*(out_it_global + elem_id) = *(in_it_global + elem_id) * (keep_elem_ptr[elem_id] ? mult : 0.0F);
}
}
}
}