当前位置: 首页>>代码示例>>C++>>正文


C++ layer_configuration_specific::get_neuron_count方法代码示例

本文整理汇总了C++中layer_configuration_specific::get_neuron_count方法的典型用法代码示例。如果您正苦于以下问题:C++ layer_configuration_specific::get_neuron_count方法的具体用法?C++ layer_configuration_specific::get_neuron_count怎么用?C++ layer_configuration_specific::get_neuron_count使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在layer_configuration_specific的用法示例。


在下文中一共展示了layer_configuration_specific::get_neuron_count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: run_forward_propagation

		void rgb_to_yuv_convert_layer_tester_plain::run_forward_propagation(
			plain_buffer::ptr output_buffer,
			const std::vector<plain_buffer::const_ptr>& input_buffers,
			plain_buffer::ptr temporary_working_fixed_buffer,
			plain_buffer::ptr temporary_working_per_entry_buffer,
			plain_running_configuration::const_ptr plain_config,
			layer::const_ptr layer_schema,
			layer_data::const_ptr data,
			layer_data_custom::const_ptr data_custom,
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int entry_count) const
		{
			const float * const in_it = *input_buffers[0];
			float * const out_it = *output_buffer;

			nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema);

			const unsigned int color_feature_map_config_count = static_cast<unsigned int>(layer_derived->color_feature_map_config_list.size());

			if ((out_it != in_it) && ((color_feature_map_config_count * 3) != output_configuration_specific.feature_map_count))
				memcpy(out_it, in_it, output_configuration_specific.get_neuron_count() * entry_count * sizeof(float));

			const int total_workload = static_cast<int>(entry_count * color_feature_map_config_count);

			const unsigned int input_neuron_count = output_configuration_specific.get_neuron_count();
			const unsigned int input_neuron_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();
			const std::vector<color_feature_map_config>::const_iterator cfm_it = layer_derived->color_feature_map_config_list.begin();

			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int workload_id = 0; workload_id < total_workload; ++workload_id)
			{
				int entry_id = workload_id / color_feature_map_config_count;
				int color_feature_map_config_id = workload_id - entry_id * color_feature_map_config_count;
				const color_feature_map_config& cfm = *(cfm_it + color_feature_map_config_id);

				const float * in_it_red_and_y = in_it + (entry_id * input_neuron_count) + (cfm.red_and_y_feature_map_id * input_neuron_count_per_feature_map);
				const float * in_it_green_and_u = in_it + (entry_id * input_neuron_count) + (cfm.green_and_u_feature_map_id * input_neuron_count_per_feature_map);
				const float * in_it_blue_and_v = in_it + (entry_id * input_neuron_count) + (cfm.blue_and_v_feature_map_id * input_neuron_count_per_feature_map);

				float * out_it_red_and_y = out_it + (entry_id * input_neuron_count) + (cfm.red_and_y_feature_map_id * input_neuron_count_per_feature_map);
				float * out_it_green_and_u = out_it + (entry_id * input_neuron_count) + (cfm.green_and_u_feature_map_id * input_neuron_count_per_feature_map);
				float * out_it_blue_and_v = out_it + (entry_id * input_neuron_count) + (cfm.blue_and_v_feature_map_id * input_neuron_count_per_feature_map);

				for(unsigned int i = 0; i < input_neuron_count_per_feature_map; ++i)
				{
					float red = in_it_red_and_y[i];
					float green = in_it_green_and_u[i];
					float blue = in_it_blue_and_v[i];

					float y = w_r * red + w_g * green + w_b * blue;
					float u = u_mult * (blue - y);
					float v = v_mult * (red - y);

					out_it_red_and_y[i] = y;
					out_it_green_and_u[i] = u;
					out_it_blue_and_v[i] = v;
				}
			}
		}
开发者ID:anshumang,项目名称:nnForge,代码行数:60,代码来源:rgb_to_yuv_convert_layer_tester_plain.cpp

示例2: transform

	void reshape_data_transformer::transform(
		const float * data,
		float * data_transformed,
		const layer_configuration_specific& original_config,
		unsigned int sample_id)
	{
		if (original_config.get_neuron_count() != config.get_neuron_count())
			throw neural_network_exception((boost::format("Neuron counts for reshape_data_transformer don't match: %1% and %2%") % original_config.get_neuron_count() % config.get_neuron_count()).str());

		memcpy(data_transformed, data, original_config.get_neuron_count() * sizeof(float));
	}
开发者ID:milakov,项目名称:nnForge,代码行数:11,代码来源:reshape_data_transformer.cpp

示例3: test

		void dropout_layer_updater_plain::test(
			const_additional_buffer_smart_ptr input_buffer,
			additional_buffer_smart_ptr output_buffer,
			std::vector<additional_buffer_smart_ptr>& additional_buffers,
			plain_running_configuration_const_smart_ptr plain_config,
			const_layer_smart_ptr layer_schema,
			const_layer_data_smart_ptr data,
			const_layer_data_custom_smart_ptr data_custom,
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int updater_count,
			unsigned int offset_input_entry_id,
			bool force_deterministic) const
		{
			if (offset_input_entry_id > 0)
				throw neural_network_exception("dropout_layer_updater_plain is not able to run using offset");

			if (force_deterministic)
			{
				memcpy(&(output_buffer->at(0)), &(input_buffer->at(0)), input_configuration_specific.get_neuron_count() * updater_count * sizeof(float));
			}
			else
			{
				const std::vector<float>::const_iterator in_it_global = input_buffer->begin();
				const std::vector<float>::iterator out_it_global = output_buffer->begin();
				unsigned char * keep_elem_ptr = reinterpret_cast<unsigned char *>(&(additional_buffers[0]->at(0)));

				nnforge_shared_ptr<const dropout_layer> layer_derived = nnforge_dynamic_pointer_cast<const dropout_layer>(layer_schema);
				const float dropout_rate = layer_derived->dropout_rate;
				const float keep_rate = 1.0F - dropout_rate;
				const float mult = 1.0F / keep_rate;

				const int total_workload = input_configuration_specific.get_neuron_count() * updater_count;

				nnforge_uniform_real_distribution<float> dist(0.0F, 1.0F);

				for(int i = 0; i < total_workload; ++i)
					keep_elem_ptr[i] = (dist(gen) <= keep_rate ? (unsigned char)1 : (unsigned char)0);

				#pragma omp parallel default(none) num_threads(plain_config->openmp_thread_count) shared(keep_elem_ptr)
				{
					#pragma omp for schedule(guided)
					for(int workload_id = 0; workload_id < total_workload; ++workload_id)
					{
						int elem_id = workload_id;
						*(out_it_global + elem_id) = *(in_it_global + elem_id) * (keep_elem_ptr[elem_id] ? mult : 0.0F);
					}
				}
			}
		}
开发者ID:anshumang,项目名称:nnForgeINST,代码行数:50,代码来源:dropout_layer_updater_plain.cpp

示例4: create_tester_specific

		layer_tester_cuda_smart_ptr convolution_layer_testing_schema::create_tester_specific(
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific) const
		{
			layer_tester_cuda_smart_ptr res;

			if (output_configuration_specific.get_neuron_count() == output_configuration_specific.feature_map_count)
			{
				res = layer_tester_cuda_smart_ptr(new fully_connected_layer_tester_cuda());
			}
			else
			{
				switch (output_configuration_specific.dimension_sizes.size())
				{
				case 1:
					if (cuda_config->get_compute_capability() >= 300)
						res = layer_tester_cuda_smart_ptr(new convolution_1d_layer_tester_cuda_kepler());
					else
						res = layer_tester_cuda_smart_ptr(new convolution_1d_layer_tester_cuda_fermi());
					break;
				case 2:
					if (cuda_config->get_compute_capability() >= 300)
						res = layer_tester_cuda_smart_ptr(new convolution_2d_layer_tester_cuda_kepler());
					else
						res = layer_tester_cuda_smart_ptr(new convolution_2d_layer_tester_cuda_fermi());
					break;
				default:
					throw neural_network_exception((boost::format("No CUDA tester for the convolution layer of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str());
					break;
				}
			}

			return res;
		}
开发者ID:mdqyy,项目名称:nnForge,代码行数:34,代码来源:convolution_layer_testing_schema.cpp

示例5: test

		void softmax_layer_hessian_plain::test(
			const_additional_buffer_smart_ptr input_buffer,
			additional_buffer_smart_ptr output_buffer,
			std::vector<additional_buffer_smart_ptr>& additional_buffers,
			plain_running_configuration_const_smart_ptr plain_config,
			const_layer_smart_ptr layer_schema,
			const_layer_data_smart_ptr data,
			const_layer_data_custom_smart_ptr data_custom,
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int entry_count) const
		{
			const unsigned int input_neuron_count = input_configuration_specific.get_neuron_count();
			const unsigned int input_neuron_count_per_feature_map = input_configuration_specific.get_neuron_count_per_feature_map();
			const unsigned int feature_map_count = static_cast<unsigned int>(input_configuration_specific.feature_map_count);

			const std::vector<float>::const_iterator input_buffer_it = input_buffer->begin();
			const std::vector<float>::iterator output_buffer_it = output_buffer->begin();

			const int total_workload = entry_count * input_neuron_count_per_feature_map;
			const int openmp_thread_count = plain_config->openmp_thread_count;
			
			#pragma omp parallel default(none) shared(additional_buffers) num_threads(openmp_thread_count)
			{
				int thread_id = 0;
				#ifdef _OPENMP
				thread_id = omp_get_thread_num();
				#endif

				std::vector<float>& local_additional_buffer = *(additional_buffers[thread_id]);

				#pragma omp for schedule(guided)
				for(int workload_id = 0; workload_id < total_workload; ++workload_id)
				{
					int entry_id = workload_id / input_neuron_count_per_feature_map;
					int neuron_id = workload_id - (entry_id * input_neuron_count_per_feature_map);

					const std::vector<float>::const_iterator in_it = input_buffer_it + (entry_id * input_neuron_count) + neuron_id;
					const std::vector<float>::iterator out_it = output_buffer_it + (entry_id * input_neuron_count) + neuron_id;

					float max_val = -1.0e+37F;
					for(unsigned int feature_map_id = 0; feature_map_id < feature_map_count; ++feature_map_id)
					{
						float val = *(in_it + (feature_map_id * input_neuron_count_per_feature_map));
						max_val = std::max(max_val, val);
					}

					float sum = 0.0F;
					for(unsigned int feature_map_id = 0; feature_map_id < feature_map_count; ++feature_map_id)
					{
						float val = expf((*(in_it + (feature_map_id * input_neuron_count_per_feature_map))) - max_val);
						sum += val;
						local_additional_buffer[feature_map_id] = val;
					}
					float mult = 1.0F / sum;
					for(unsigned int feature_map_id = 0; feature_map_id < feature_map_count; ++feature_map_id)
						*(out_it + (feature_map_id * input_neuron_count_per_feature_map)) = local_additional_buffer[feature_map_id] * mult;
				} // for(int workload_id
			} // #pragma parallel
		}
开发者ID:bluelzx,项目名称:nnForge,代码行数:60,代码来源:softmax_layer_hessian_plain.cpp

示例6: run_forward_propagation

		void rectified_linear_layer_updater_plain::run_forward_propagation(
			plain_buffer::ptr output_buffer,
			const std::vector<plain_buffer::const_ptr>& input_buffers,
			plain_buffer::ptr temporary_working_fixed_buffer,
			plain_buffer::ptr temporary_working_per_entry_buffer,
			plain_buffer::ptr temporary_per_entry_buffer,
			plain_running_configuration::const_ptr plain_config,
			layer::const_ptr layer_schema,
			layer_data::const_ptr data,
			layer_data_custom::const_ptr data_custom,
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific,
			const std::set<layer_action>& actions,
			unsigned int entry_count) const
		{
			std::shared_ptr<const rectified_linear_layer> layer_derived = std::dynamic_pointer_cast<const rectified_linear_layer>(layer_schema);

			const int elem_count = static_cast<int>(entry_count * output_configuration_specific.get_neuron_count());
			float * const out_it = *output_buffer;
			const float * const in_it = *input_buffers[0];
			const float negative_slope = layer_derived->negative_slope;

			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int i = 0; i < elem_count; ++i)
			{
				float input_val = *(in_it + i);
				*(out_it + i) = input_val >= 0.0F ? input_val : input_val * negative_slope;
			}
		}
开发者ID:milakov,项目名称:nnForge,代码行数:29,代码来源:rectified_linear_layer_updater_plain.cpp

示例7: backprop

		void absolute_layer_updater_plain::backprop(
			additional_buffer_smart_ptr input_errors,
			const_additional_buffer_smart_ptr input_neurons,
			const_additional_buffer_smart_ptr output_errors,
			const_additional_buffer_smart_ptr output_neurons,
			std::vector<additional_buffer_smart_ptr>& additional_buffers,
			plain_running_configuration_const_smart_ptr plain_config,
			const_layer_smart_ptr layer_schema,
			const_layer_data_smart_ptr data,
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int updater_count) const
		{
			const int elem_count = static_cast<int>(updater_count * input_configuration_specific.get_neuron_count());
			const std::vector<float>::const_iterator in_it = input_neurons->begin();
			const std::vector<float>::iterator in_err_it = input_errors->begin();

			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int i = 0; i < elem_count; ++i)
			{
				float val = *(in_it + i);
				if (val < 0.0F)
				{
					*(in_err_it + i) = - *(in_err_it + i);
				}
			}
		}
开发者ID:dreadlord1984,项目名称:nnForge,代码行数:27,代码来源:absolute_layer_updater_plain.cpp

示例8: test

		void absolute_layer_updater_plain::test(
			const_additional_buffer_smart_ptr input_buffer,
			additional_buffer_smart_ptr output_buffer,
			std::vector<additional_buffer_smart_ptr>& additional_buffers,
			plain_running_configuration_const_smart_ptr plain_config,
			const_layer_smart_ptr layer_schema,
			const_layer_data_smart_ptr data,
			const_layer_data_custom_smart_ptr data_custom,
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int updater_count,
			unsigned int offset_input_entry_id,
			bool force_deterministic) const
		{
			if (offset_input_entry_id > 0)
				throw neural_network_exception("absolute_layer_updater_plain is not able to run using offset");

			const int elem_count = static_cast<int>(updater_count * input_configuration_specific.get_neuron_count());
			const std::vector<float>::const_iterator in_it = input_buffer->begin();
			const std::vector<float>::iterator out_it = output_buffer->begin();

			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int i = 0; i < elem_count; ++i)
				*(out_it + i) = fabs(*(in_it + i));
		}
开发者ID:anshumang,项目名称:nnForgeINST,代码行数:25,代码来源:absolute_layer_updater_plain.cpp

示例9:

	supervised_data_mem_reader::supervised_data_mem_reader(
		const layer_configuration_specific& input_configuration,
		const layer_configuration_specific& output_configuration,
		const std::vector<std::tr1::shared_ptr<const std::vector<float> > >& input_data_list,
		const std::vector<std::tr1::shared_ptr<const std::vector<float> > >& output_data_list)
		: input_configuration(input_configuration)
		, output_configuration(output_configuration)
		, input_data_list_float(input_data_list)
		, output_data_list(output_data_list)
		, entry_read_count(0)
		, type_code(neuron_data_type::type_byte)
		, entry_count(static_cast<unsigned int>(input_data_list.size()))
		, input_neuron_count(input_configuration.get_neuron_count())
		, output_neuron_count(output_configuration.get_neuron_count())
	{
	}
开发者ID:ChenglongChen,项目名称:nnForge,代码行数:16,代码来源:supervised_data_mem_reader.cpp

示例10: run_forward_propagation

		void concat_layer_updater_plain::run_forward_propagation(
			plain_buffer::ptr output_buffer,
			const std::vector<plain_buffer::const_ptr>& input_buffers,
			plain_buffer::ptr temporary_working_fixed_buffer,
			plain_buffer::ptr temporary_working_per_entry_buffer,
			plain_buffer::ptr temporary_per_entry_buffer,
			plain_running_configuration::const_ptr plain_config,
			layer::const_ptr layer_schema,
			layer_data::const_ptr data,
			layer_data_custom::const_ptr data_custom,
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific,
			const std::set<layer_action>& actions,
			unsigned int entry_count) const
		{
			for(unsigned int entry_id = 0; entry_id < entry_count; ++entry_id)
			{
				float *dst = (float *)*output_buffer + entry_id * output_configuration_specific.get_neuron_count();
				for(unsigned int i = 0; i < static_cast<unsigned int>(input_configuration_specific_list.size()); ++i)
				{
					unsigned int input_neuron_count = input_configuration_specific_list[i].get_neuron_count();
					memcpy(
						dst,
						(const float *)(*input_buffers[i]) + entry_id * input_neuron_count,
						input_neuron_count * sizeof(float));
					dst += input_neuron_count;
				}
			}
		}
开发者ID:milakov,项目名称:nnForge,代码行数:29,代码来源:concat_layer_updater_plain.cpp

示例11: create_updater_specific

		layer_updater_cuda::ptr sparse_convolution_layer_updater_schema::create_updater_specific(
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific) const
		{
			layer_updater_cuda::ptr res;

			nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);

			bool zero_padding = (layer_derived->left_zero_padding == std::vector<unsigned int>(layer_derived->left_zero_padding.size(), 0))
				&& (layer_derived->right_zero_padding == std::vector<unsigned int>(layer_derived->right_zero_padding.size(), 0));

			if (zero_padding && (output_configuration_specific.get_neuron_count() == output_configuration_specific.feature_map_count))
			{
				if (input_configuration_specific_list[0].dimension_sizes == output_configuration_specific.dimension_sizes)
				{
					res = layer_updater_cuda::ptr(new sparse_fully_connected_1x1_layer_updater_cuda());
				}
				else
				{
					res = layer_updater_cuda::ptr(new sparse_fully_connected_layer_updater_cuda());
				}
			}
			else
			{
				res = sparse_convolution_layer_updater_schema_helper_cuda_kepler::create_updater_specific(input_configuration_specific_list, output_configuration_specific);
			}

			return res;
		}
开发者ID:anshumang,项目名称:nnForge,代码行数:29,代码来源:sparse_convolution_layer_updater_schema.cpp

示例12: run_forward_propagation

		void add_layer_tester_plain::run_forward_propagation(
			plain_buffer::ptr output_buffer,
			const std::vector<plain_buffer::const_ptr>& input_buffers,
			plain_buffer::ptr temporary_working_fixed_buffer,
			plain_buffer::ptr temporary_working_per_entry_buffer,
			plain_running_configuration::const_ptr plain_config,
			layer::const_ptr layer_schema,
			layer_data::const_ptr data,
			layer_data_custom::const_ptr data_custom,
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int entry_count) const
		{
			float * const out = *output_buffer;
			std::vector<const float *> in_list;
			for(std::vector<plain_buffer::const_ptr>::const_iterator it = input_buffers.begin(); it != input_buffers.end(); ++it)
				in_list.push_back(**it);
			const float ** const in_ptr_list = &in_list[0];
			std::shared_ptr<const add_layer> layer_derived = std::dynamic_pointer_cast<const add_layer>(layer_schema);
			const float alpha = layer_derived->alpha;
			const int src_ptr_count = static_cast<int>(in_list.size());
			const int elem_count = static_cast<int>(entry_count * output_configuration_specific.get_neuron_count());
			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int i = 0; i < elem_count; ++i)
			{
				float sum = 0.0F;
				for(int j = 0; j < src_ptr_count; ++j)
					sum += in_ptr_list[j][i];
				out[i] = sum * alpha;
			}
		}
开发者ID:milakov,项目名称:nnForge,代码行数:31,代码来源:add_layer_tester_plain.cpp

示例13: get_transformed_configuration

	layer_configuration_specific reshape_data_transformer::get_transformed_configuration(const layer_configuration_specific& original_config) const
	{
		if (original_config.get_neuron_count() != config.get_neuron_count())
			throw neural_network_exception((boost::format("Neuron counts for reshape_data_transformer don't match: %1% and %2%") % original_config.get_neuron_count() % config.get_neuron_count()).str());

		return config;
	}
开发者ID:milakov,项目名称:nnForge,代码行数:7,代码来源:reshape_data_transformer.cpp

示例14: test

		void hyperbolic_tangent_layer_updater_plain::test(
			const_additional_buffer_smart_ptr input_buffer,
			additional_buffer_smart_ptr output_buffer,
			std::vector<additional_buffer_smart_ptr>& additional_buffers,
			plain_running_configuration_const_smart_ptr plain_config,
			const_layer_smart_ptr layer_schema,
			const_layer_data_smart_ptr data,
			const_layer_data_custom_smart_ptr data_custom,
			const layer_configuration_specific& input_configuration_specific,
			const layer_configuration_specific& output_configuration_specific,
			unsigned int updater_count,
			unsigned int offset_input_entry_id) const
		{
			if (offset_input_entry_id > 0)
				throw neural_network_exception("hyperbolic_tangent_layer_updater_plain is not able to run using offset");

			const int elem_count = static_cast<int>(updater_count * input_configuration_specific.get_neuron_count());
			const std::vector<float>::const_iterator in_it = input_buffer->begin();
			const std::vector<float>::iterator out_it = output_buffer->begin();

			nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema);
			const float hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F;
			const float hyperbolic_tangent_major_multiplier = layer_derived->major_multiplier;

			#pragma omp parallel for default(none) schedule(guided) num_threads(plain_config->openmp_thread_count)
			for(int i = 0; i < elem_count; ++i)
			{
				float inp = *(in_it + i);
				float inp2 = expf(inp * hyperbolic_tangent_steepness2);
				float res = (inp2 - 1.0F) / (inp2 + 1.0F) * hyperbolic_tangent_major_multiplier;
				*(out_it + i) = res;
			}
		}
开发者ID:Alienfeel,项目名称:nnForge,代码行数:33,代码来源:hyperbolic_tangent_layer_updater_plain.cpp

示例15: configure

		void layer_updater_cuda::configure(
			const std::vector<layer_configuration_specific>& input_configuration_specific_list,
			const layer_configuration_specific& output_configuration_specific,
			layer::const_ptr layer_schema,
			cuda_running_configuration::const_ptr cuda_config,
			const std::set<layer_action>& actions)
		{
			this->layer_schema = layer_schema;
			this->input_configuration_specific_list = input_configuration_specific_list;
			this->output_configuration_specific = output_configuration_specific;
			this->cuda_config = cuda_config;
			this->actions = actions;

			input_elem_count_per_entry_list.resize(input_configuration_specific_list.size());
			input_elem_count_per_feature_map_list.resize(input_configuration_specific_list.size());
			for(int i = 0; i < input_configuration_specific_list.size(); ++i)
			{
				input_elem_count_per_entry_list[i] = input_configuration_specific_list[i].get_neuron_count();
				input_elem_count_per_feature_map_list[i] = input_configuration_specific_list[i].get_neuron_count_per_feature_map();
			}

			output_elem_count_per_entry = output_configuration_specific.get_neuron_count();
			output_elem_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();

			updater_configured();
		}
开发者ID:anshumang,项目名称:nnForge,代码行数:26,代码来源:layer_updater_cuda.cpp


注:本文中的layer_configuration_specific::get_neuron_count方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。