本文整理汇总了C++中Nodes::at方法的典型用法代码示例。如果您正苦于以下问题:C++ Nodes::at方法的具体用法?C++ Nodes::at怎么用?C++ Nodes::at使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Nodes
的用法示例。
在下文中一共展示了Nodes::at方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: select
int UltimateTicTacToeMontecarloAI::select(Nodes const& nodes, int const current) const
{
Node const& node = nodes.at(current);
if(node.children.empty())
return current;
return select(nodes, pickBestChild(node, nodes));
}
示例2: nodeUCBValue
qreal UltimateTicTacToeMontecarloAI::nodeUCBValue(Node const& node, Nodes const& nodes) const {
if(node.parent != -1)
{
return node.v + c * qSqrt(qLn(nodes.at(node.parent).n) / node.n);
} else {
return 0;
}
}
示例3: PFNode
TEST(floodfill, one) {
typedef DAG::Node<long long> PFNode;
typedef std::map<long long, PFNode> Nodes;
Nodes myNodes;
long long id1, id2;
id1 = 1;
id2 = 2;
DAG::FloodFill<long long> FFill;
myNodes.emplace(id1, PFNode(id1));
myNodes.emplace(id2, PFNode(id2));
myNodes[id1].addChild(myNodes[id2]);
ASSERT_EQ(myNodes.at(id1).parents().size(), 0UL); // avoid compiler warning declare as unsigned long
ASSERT_EQ(myNodes.at(id1).children().size(), 1UL);
// ASSERT_EQ(myNodes.at(id1).children()[0],id2);
for (const auto& nodevector : FFill.traverse(myNodes)) {
ASSERT_EQ(nodevector.size(), 2UL);
}
}
示例4: pickBestChild
int UltimateTicTacToeMontecarloAI::pickBestChild(Node const& node, Nodes const& nodes, bool const ucb) const
{
bool first = true;
qreal bestChildValue = 0;
int bestChildIndex = -1;
for(int const childIndex : node.children)
{
Node const& child = nodes.at(childIndex);
qreal childValue = ucb ? nodeUCBValue(child, nodes) : child.v / static_cast<qreal>(child.n);
if(first || childValue >= bestChildValue) {
bestChildIndex = childIndex;
bestChildValue = childValue;
first = false;
}
}
return bestChildIndex;
}
示例5: distribution
/**
* Takes a subgraph of the given graph (all nodes in the graph with the given label),
* partitions this subgraph into even smaller subgraphs (using something similar to k-means),
* and gives all small subgraphs a unique label (using the given min_label).
*
* @param graph
* @param label_of_connected_component
* @param size_of_largest_partition
* @param min_label_for_partition_labeling
* @return the number of generated partitions
*/
std::size_t
partition_connected_component(UniGraph * graph, std::size_t label_of_connected_component, std::size_t partition_size, std::size_t min_label_for_partition_labeling)
{
typedef std::size_t Node;
typedef std::size_t Label;
typedef std::vector<Node> Nodes;
Nodes nodes;
for (Node node = 0; node < graph->num_nodes(); ++node)
if (graph->get_label(node) == label_of_connected_component)
nodes.push_back(node);
const std::size_t num_partitions = (nodes.size() + partition_size - 1) / partition_size; // division and rounding up
/********* k-means clustering *******/
const std::size_t num_kmeans_iterations = 100;
Nodes centroids;
/* Draw centroids randomly. */
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(0, nodes.size() - 1);
for(std::size_t partition = 0; partition < num_partitions; ++partition) {
Node centroid = std::numeric_limits<Node>::max();
while (std::find(centroids.begin(), centroids.end(), centroid) != centroids.end())
centroid = nodes.at(distribution(generator));
centroids.push_back(centroid);
}
for (std::size_t kmeans_iteration = 0; kmeans_iteration < num_kmeans_iterations; ++kmeans_iteration) {
const Label unvisited = std::numeric_limits<Label>::max();
for (Node const & node : nodes)
graph->set_label(node, unvisited);
/* Put centroids into queues. */
std::vector<Nodes> queues(num_partitions);
for (std::size_t i = 0; i < num_partitions; ++i)
queues.at(i).push_back(centroids.at(i));
/* Grow regions starting from centroids */
while (std::any_of(queues.begin(), queues.end(), [](Nodes const & queue){return !queue.empty();})) {
#pragma omp parallel for
for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
Nodes & old_queue = queues.at(queue_id);
std::unordered_set<Node> new_queue;
for (Node node : old_queue)
graph->set_label(node, min_label_for_partition_labeling + queue_id); // there is a race condition for partition boundary nodes but we don't care
for (Node node : old_queue) {
/* Copy all unvisited (and not yet inserted) neighbors into new queue. */
for (Node neighbor : graph->get_adj_nodes(node))
if (graph->get_label(neighbor) == unvisited)
new_queue.insert(neighbor);
}
old_queue.clear();
old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end());
}
}
/* If we are in the final iteration we stop here to keep the graph labels
* (they would be removed in the following region shrinking step). */
if (kmeans_iteration == num_kmeans_iterations - 1)
break;
/* Put partition boundary nodes into queues. */
for (Node const node : nodes) {
Label const cur_label = graph->get_label(node);
std::size_t const cur_queue = cur_label - min_label_for_partition_labeling;
Nodes const & neighbors = graph->get_adj_nodes(node);
/* Each node, where any of its neighbors has a different label, is a boundary node. */
if (std::any_of(neighbors.begin(), neighbors.end(), [graph, cur_label]
(Node const neighbor) { return graph->get_label(neighbor) != cur_label; } ))
queues.at(cur_queue).push_back(node);
}
/* Shrink regions starting from boundaries to obtain new centroids. */
#pragma omp parallel for
for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
Nodes & old_queue = queues.at(queue_id);
while (!old_queue.empty()){
std::unordered_set<Node> new_queue;
for (Node node : old_queue)
graph->set_label(node, unvisited);
for (Node node : old_queue) {
/* Copy all neighbors that have not yet been marked (and have not yet been inserted) into new queue. */
for (Node neighbor : graph->get_adj_nodes(node))
if (graph->get_label(neighbor) == min_label_for_partition_labeling + queue_id)
new_queue.insert(neighbor);
//.........这里部分代码省略.........
示例6: bless
static SV *node_to_sv(pTHX_ Node *node)
{
SV *ret = NULL;
if (!node) return ret;
if (TYPE_match(node, BranchNode)) {
BranchNode *branch = dynamic_cast<BranchNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, branch->tk);
add_key(hash, "left", branch->left);
add_key(hash, "right", branch->right);
add_key(hash, "next", branch->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Branch");
} else if (TYPE_match(node, FunctionCallNode)) {
FunctionCallNode *call = dynamic_cast<FunctionCallNode *>(node);
Nodes *args = call->args;
size_t argsize = args->size();
AV *array = new_Array();
for (size_t i = 0; i < argsize; i++) {
SV *arg = node_to_sv(aTHX_ args->at(i));
if (!arg) continue;
av_push(array, set(arg));
}
HV *hash = (HV*)new_Hash();
add_key(hash, "next", call->next);
add_token(hash, call->tk);
(void)hv_stores(hash, "args", set(new_Ref(array)));
ret = bless(aTHX_ hash, "Compiler::Parser::Node::FunctionCall");
} else if (TYPE_match(node, ArrayNode)) {
ArrayNode *array = dynamic_cast<ArrayNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, array->tk);
add_key(hash, "next", array->next);
add_key(hash, "idx", array->idx);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Array");
} else if (TYPE_match(node, HashNode)) {
HashNode *h = dynamic_cast<HashNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, h->tk);
add_key(hash, "next", h->next);
add_key(hash, "key", h->key);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Hash");
} else if (TYPE_match(node, DereferenceNode)) {
DereferenceNode *dref = dynamic_cast<DereferenceNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, dref->tk);
add_key(hash, "next", dref->next);
add_key(hash, "expr", dref->expr);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Dereference");
} else if (TYPE_match(node, FunctionNode)) {
FunctionNode *f = dynamic_cast<FunctionNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, f->tk);
add_key(hash, "next", f->next);
add_key(hash, "body", f->body);
add_key(hash, "prototype", f->prototype);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Function");
} else if (TYPE_match(node, BlockNode)) {
BlockNode *b = dynamic_cast<BlockNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, b->tk);
add_key(hash, "next", b->next);
add_key(hash, "body", b->body);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Block");
} else if (TYPE_match(node, ReturnNode)) {
ReturnNode *r = dynamic_cast<ReturnNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, r->tk);
add_key(hash, "next", r->next);
add_key(hash, "body", r->body);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Return");
} else if (TYPE_match(node, SingleTermOperatorNode)) {
SingleTermOperatorNode *s = dynamic_cast<SingleTermOperatorNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, s->tk);
add_key(hash, "next", s->next);
add_key(hash, "expr", s->expr);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::SingleTermOperator");
} else if (TYPE_match(node, DoubleTermOperatorNode)) {
} else if (TYPE_match(node, LeafNode)) {
LeafNode *leaf = dynamic_cast<LeafNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, leaf->tk);
add_key(hash, "next", leaf->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Leaf");
} else if (TYPE_match(node, ListNode)) {
ListNode *list = dynamic_cast<ListNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, list->tk);
add_key(hash, "data", list->data);
add_key(hash, "next", list->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::List");
} else if (TYPE_match(node, ArrayRefNode)) {
ArrayRefNode *ref = dynamic_cast<ArrayRefNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, ref->tk);
add_key(hash, "data", ref->data);
add_key(hash, "next", ref->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::ArrayRef");
} else if (TYPE_match(node, HashRefNode)) {
HashRefNode *ref = dynamic_cast<HashRefNode *>(node);
//.........这里部分代码省略.........
示例7: d
void ribi::pvdb::QtPvdbClusterDialog::Test() noexcept
{
{
static bool is_tested = false;
if (is_tested) return;
is_tested = true;
}
typedef std::vector<boost::shared_ptr<ribi::cmap::Edge> > Edges;
typedef std::vector<boost::shared_ptr<ribi::cmap::Node> > Nodes;
//Regular tests
{
const std::vector<boost::shared_ptr<pvdb::File> > v = pvdb::File::GetTests();
std::for_each(v.begin(),v.end(),
[](const boost::shared_ptr<pvdb::File> & file)
{
const bool had_cluster = file->GetCluster().get();
const bool had_concept_map = file->GetConceptMap().get();
boost::shared_ptr<QtPvdbClusterDialog> d(new QtPvdbClusterDialog(file));
if (!had_cluster && !had_concept_map)
{
assert(file->GetCluster());
assert(!file->GetConceptMap());
assert(d->ui->button_add->isEnabled());
}
if ( had_cluster && !had_concept_map)
{
assert(file->GetCluster());
assert(!file->GetConceptMap());
assert(d->ui->button_add->isEnabled());
}
if (!had_cluster && had_concept_map)
{
assert(!file->GetCluster());
assert( file->GetConceptMap());
assert(!d->ui->button_add->isEnabled());
}
if ( had_cluster && had_concept_map)
{
assert( file->GetCluster());
assert( file->GetConceptMap());
assert(!d->ui->button_add->isEnabled());
}
//Test cluster copying, if there
if (file->GetCluster())
{
assert(file->GetCluster() && "the cluster dialog used an existing or created a cluster");
assert(file->GetCluster() == d->GetWidget()->GetCluster());
const boost::shared_ptr<pvdb::Cluster> before = pvdb::ClusterFactory::DeepCopy(file->GetCluster());
assert(before);
assert(before != file->GetCluster());
assert(operator==(*file->GetCluster(),*before));
d->GetWidget()->Add("Modification!");
//assert(!IsEqual(*file->GetCluster(),*before)); //Does not work, must obtain the cluster from the widget
assert(!operator==(*d->GetWidget()->GetCluster(),*before)); //Widget updates the cluster
}
}
);
}
{
const std::vector<boost::shared_ptr<pvdb::File> > v = pvdb::File::GetTests();
std::for_each(v.begin(),v.end(),
[](const boost::shared_ptr<pvdb::File> & file)
{
const bool had_cluster = file->GetCluster().get(); //.get() needed for crosscompiler
const bool had_concept_map = file->GetConceptMap().get(); //.get() needed for crosscompiler
boost::shared_ptr<QtPvdbClusterDialog> d(new QtPvdbClusterDialog(file));
if (!had_cluster && !had_concept_map)
{
assert(file->GetCluster());
assert(!file->GetConceptMap());
assert(d->ui->button_add->isEnabled());
}
if ( had_cluster && !had_concept_map)
{
assert(file->GetCluster());
assert(!file->GetConceptMap());
assert(d->ui->button_add->isEnabled());
}
if (!had_cluster && had_concept_map)
{
assert(!file->GetCluster());
assert( file->GetConceptMap());
assert(!d->ui->button_add->isEnabled());
}
if ( had_cluster && had_concept_map)
{
assert( file->GetCluster());
assert( file->GetConceptMap());
assert(!d->ui->button_add->isEnabled());
}
if (file->GetCluster())
{
const boost::shared_ptr<pvdb::Cluster> before = pvdb::ClusterFactory::DeepCopy(file->GetCluster());
assert(before);
assert(before != file->GetCluster());
//.........这里部分代码省略.........
示例8: realThink
int UltimateTicTacToeMontecarloAI::realThink(const UltimateTicTacToeMontecarloAI::Board &board, const int previousMove, const int player) const
{
//printBoard(board);
if(maxIterations == 0) {
Moves options = movementOptions(board, previousMove);
return options.at(qrand() % options.size());
}
//qint64 now = QDateTime::currentMSecsSinceEpoch();
//qDebug() << "c: " << c << ", maxIterations: " << maxIterations << ", maxChildren: " <<maxChildren;
Nodes nodes;
nodes.reserve(maxIterations * maxChildren);
nodes.append(Node { 0, 1, board, previousMove, -1, Node::Children() });
int i;
for(i = 0; i < maxIterations; ++i)
{
int leafIndex = select(nodes);
Node const& leaf = nodes.at(leafIndex);
GameState leafState = gameState(leaf.board, player);
if(leafState == GameState::WIN)
{
/* qDebug() << "---";
printBoard(leaf.board);
*/
break;
}
else if(leafState == GameState::LOSE)
{
backpropagate(leafIndex, nodes, -10);
}
else if(leafState == GameState::TIE)
{
backpropagate(leafIndex, nodes, -5);
}
else if(leafState == GameState::UNRESOLVED)
{
int nodeIndex = expand(leafIndex, nodes, player);
Node const& node = nodes.at(nodeIndex);
int score = simulate(node.board, node.previousMove, player);
backpropagate(nodeIndex, nodes, score);
}
}
//qDebug() << "Found solution in " << i + 1 << " iterations";
Node const& root = nodes.at(0);
int bestChildIndex = pickBestChild(root, nodes, false);
Node const& bestChild = nodes.at(bestChildIndex);
//qDebug() << "AI took " << (QDateTime::currentMSecsSinceEpoch() - now) << " ms";
/*for(int childIndex : root.children)
{
Node const& child = nodes.at(childIndex);
qDebug() << child.previousMove << ":" << child.v << child.n;
}*/
//qDebug() << bestChild.previousMove / 9 << bestChild.previousMove %9;
return bestChild.previousMove;
}