本文整理汇总了C++中hwloc_topology::get_number_of_cores方法的典型用法代码示例。如果您正苦于以下问题:C++ hwloc_topology::get_number_of_cores方法的具体用法?C++ hwloc_topology::get_number_of_cores怎么用?C++ hwloc_topology::get_number_of_cores使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类hwloc_topology
的用法示例。
在下文中一共展示了hwloc_topology::get_number_of_cores方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: decode_scatter_distribution
void decode_scatter_distribution(hwloc_topology& t,
std::vector<mask_type>& affinities,
std::size_t used_cores, std::size_t max_cores,
std::vector<std::size_t>& num_pus, error_code& ec)
{
std::size_t num_threads = affinities.size();
std::size_t num_cores = (std::min)(max_cores, t.get_number_of_cores());
std::vector<std::size_t> num_pus_cores(num_cores, 0);
num_pus.resize(num_threads);
for (std::size_t num_thread = 0; num_thread != num_threads; /**/)
{
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
if (any(affinities[num_thread]))
{
HPX_THROWS_IF(ec, bad_parameter, "decode_scatter_distribution",
boost::str(boost::format("affinity mask for thread %1% has "
"already been set") % num_thread));
return;
}
num_pus[num_thread] = t.get_pu_number(num_core + used_cores,
num_pus_cores[num_core]);
affinities[num_thread] = t.init_thread_affinity_mask(
num_core + used_cores, num_pus_cores[num_core]++);
if(++num_thread == num_threads)
return;
}
}
}
示例2: decode_scatter_distribution
void decode_scatter_distribution(hwloc_topology& t,
std::vector<mask_type>& affinities, error_code& ec)
{
std::size_t num_threads = affinities.size();
std::size_t num_cores = t.get_number_of_cores();
std::vector<std::size_t> num_pus_cores(num_cores, 0);
for (std::size_t num_thread = 0; num_thread != num_threads; /**/)
{
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
if (any(affinities[num_thread]))
{
HPX_THROWS_IF(ec, bad_parameter, "decode_scatter_distribution",
boost::str(boost::format("affinity mask for thread %1% has "
"already been set") % num_thread));
return;
}
// Check if we exceed the number of PUs on the current core.
// If yes, we need to proceed with the next one.
std::size_t num_pus_core = t.get_number_of_core_pus(num_core);
if(num_pus_cores[num_core] == num_pus_core) continue;
affinities[num_thread] = t.init_thread_affinity_mask(
num_core, num_pus_cores[num_core]++);
if(++num_thread == num_threads)
return;
}
}
}
示例3: switch
std::vector<mask_info>
extract_core_masks(hwloc_topology const& t, spec_type const& s,
std::size_t socket, mask_cref_type socket_mask, error_code& ec)
{
std::vector<mask_info> masks;
switch (s.type_)
{
case spec_type::core:
{
std::size_t base = 0;
std::size_t num_cores = 0;
if (socket != std::size_t(-1))
{
for (std::size_t i = 0; i != socket; ++i)
base += t.get_number_of_socket_cores(i);
num_cores = t.get_number_of_socket_cores(socket);
}
else
{
num_cores = t.get_number_of_cores();
}
bounds_type bounds = extract_bounds(s, num_cores, ec);
if (ec) break;
for (std::int64_t index : bounds)
{
mask_type mask =
t.init_core_affinity_mask_from_core(index + base);
masks.push_back(util::make_tuple(index, mask & socket_mask));
}
}
break;
case spec_type::unknown:
{
mask_type mask = extract_machine_mask(t, ec);
masks.push_back(util::make_tuple(
std::size_t(-1), mask & socket_mask
));
}
break;
default:
HPX_THROWS_IF(ec, bad_parameter, "extract_core_mask",
boost::str(boost::format(
"unexpected specification type %s"
) % spec_type::type_name(s.type_)));
break;
}
return masks;
}
示例4: decode_balanced_distribution
void decode_balanced_distribution(hwloc_topology& t,
std::vector<mask_type>& affinities,
std::size_t used_cores, std::size_t max_cores,
std::vector<std::size_t>& num_pus, error_code& ec)
{
std::size_t num_threads = affinities.size();
std::size_t num_cores = (std::min)(max_cores, t.get_number_of_cores());
std::vector<std::size_t> num_pus_cores(num_cores, 0);
num_pus.resize(num_threads);
// At first, calculate the number of used pus per core.
// This needs to be done to make sure that we occupy all the available
// cores
for (std::size_t num_thread = 0; num_thread != num_threads; /**/)
{
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
num_pus_cores[num_core]++;
if(++num_thread == num_threads)
break;
}
}
// Iterate over the cores and assigned pus per core. this additional
// loop is needed so that we have consecutive worker thread numbers
std::size_t num_thread = 0;
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
for(std::size_t num_pu = 0; num_pu != num_pus_cores[num_core]; ++num_pu)
{
if (any(affinities[num_thread]))
{
HPX_THROWS_IF(ec, bad_parameter,
"decode_balanced_distribution",
boost::str(boost::format(
"affinity mask for thread %1% has "
"already been set"
) % num_thread));
return;
}
num_pus[num_thread] = t.get_pu_number(num_core + used_cores, num_pu);
affinities[num_thread] = t.init_thread_affinity_mask(
num_core + used_cores, num_pu);
++num_thread;
}
}
}
示例5: decode_balanced_distribution
void decode_balanced_distribution(hwloc_topology& t,
std::vector<mask_type>& affinities, error_code& ec)
{
std::size_t num_threads = affinities.size();
std::size_t num_cores = t.get_number_of_cores();
std::vector<std::size_t> num_pus_cores(num_cores, 0);
// At first, calculate the number of used pus per core.
// This needs to be done to make sure that we occupy all the available cores
for (std::size_t num_thread = 0; num_thread != num_threads; /**/)
{
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
// Check if we exceed the number of PUs on the current core.
// If yes, we need to proceed with the next one.
std::size_t num_pus_core = t.get_number_of_core_pus(num_core);
if(num_pus_cores[num_core] == num_pus_core) continue;
num_pus_cores[num_core]++;
if(++num_thread == num_threads)
break;
}
}
// Iterate over the cores and assigned pus per core. this additional loop
// is needed so that we have consecutive worker thread numbers
std::size_t num_thread = 0;
for(std::size_t num_core = 0; num_core != num_cores; ++num_core)
{
for(std::size_t num_pu = 0; num_pu != num_pus_cores[num_core]; ++num_pu)
{
if (any(affinities[num_thread]))
{
HPX_THROWS_IF(ec, bad_parameter, "decode_balanced_distribution",
boost::str(boost::format("affinity mask for thread %1% has "
"already been set") % num_thread));
return;
}
affinities[num_thread] = t.init_thread_affinity_mask(
num_core, num_pu);
++num_thread;
}
}
}