本文整理汇总了C++中Nodes::size方法的典型用法代码示例。如果您正苦于以下问题:C++ Nodes::size方法的具体用法?C++ Nodes::size怎么用?C++ Nodes::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Nodes
的用法示例。
在下文中一共展示了Nodes::size方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: notifyDetach
void Object::notifyDetach()
{
if( !isMaster( ))
return;
// unmap slaves
const Nodes slaves = impl_->cm->getSlaveNodes();
if( slaves.empty( ))
return;
LBWARN << slaves.size() << " slaves subscribed during deregisterObject of "
<< lunchbox::className( this ) << " id " << impl_->id << std::endl;
for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i )
{
NodePtr node = *i;
node->send( CMD_NODE_UNMAP_OBJECT ) << impl_->id;
}
}
示例2: cmdFile
TEST(TestScriptFetcher, HandleParentArgument) {
TemporaryDir tmp_dir;
TemporaryFile cmdFile(tmp_dir.createFile());
cmdFile.writeString(
"#!/bin/sh\n"
"echo $1:foo\n"
);
PCHECK(chmod(cmdFile.getFilename().c_str(), 0700) == 0);
Config config(dynamic::object
("resources", dynamic::object)
("nodes", dynamic::object
("levels", {"level1", "level2"})
("node_sources", {
dynamic::object
("source", "manual")
("prefs", dynamic::object
("node1", {})
("node2", {})
)
,
dynamic::object
("source", "script")
("prefs", dynamic::object
("parent_level", "level1")
("script", cmdFile.getFilename().native())
)
}
)
)
);
Nodes nodes;
NodesLoader::_fetchNodesImpl(config, &nodes);
ASSERT_EQ(5, nodes.size());
auto n1 = nodes.getNodeVerySlow("node1:foo");
ASSERT_EQ(2, n1->level());
ASSERT_EQ("node1", n1->parent()->name());
auto n2 = nodes.getNodeVerySlow("node2:foo");
ASSERT_EQ(2, n2->level());
ASSERT_EQ("node2", n2->parent()->name());
}
示例3: notifyDetach
void Object::notifyDetach()
{
if( !isMaster( ))
return;
// unmap slaves
const Nodes slaves = _cm->getSlaveNodes();
if( slaves.empty( ))
return;
EQWARN << slaves.size() << " slaves subscribed during deregisterObject of "
<< base::className( this ) << " id " << _id << std::endl;
NodeUnmapObjectPacket packet;
packet.objectID = _id;
for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i )
{
NodePtr node = *i;
node->send( packet );
}
}
示例4: setZValue
GuiDomain::GuiDomain(Domain *pDomain, OverviewNetworkSimulationWidget* pNetSimWidget):QGraphicsPolygonItem()
{
mNetSimWidget = pNetSimWidget;
mDomain = pDomain;
Nodes tNodes = pDomain->GetNodes();
if (tNodes.size() > 0)
{
Nodes::iterator tIt;
for(tIt = tNodes.begin(); tIt != tNodes.end(); tIt++)
{
GuiNode* tGuiNode = pNetSimWidget->GetGuiNode(*tIt);
tGuiNode->AddDomain(this);
}
}
setZValue(-2000.0);
setPen(QPen(QColor(0xdd, 0xdd, 0xdd), 1, Qt::SolidLine, Qt::FlatCap, Qt::RoundJoin));
setBrush(QColor(0xdd, 0xdd, 0xdd));
UpdatePosition();
#ifdef DEBUG_GUI_SIMULATION_TOPOLOGY_CREATION
LOG(LOG_WARN, "Created GUI domain %s", mDomain->GetDomainAddress().c_str());
#endif
}
示例5: setMarked
void
LineMerger::buildEdgeStringsForNonDegree2Nodes()
{
#if GEOS_DEBUG
cerr<<__FUNCTION__<<endl;
#endif
typedef std::vector<Node*> Nodes;
Nodes nodes;
graph.getNodes(nodes);
for (Nodes::size_type i=0, in=nodes.size(); i<in; ++i) {
Node *node=nodes[i];
#if GEOS_DEBUG
cerr<<"Node "<<i<<": "<<*node<<endl;
#endif
if (node->getDegree()!=2) {
buildEdgeStringsStartingAt(node);
node->setMarked(true);
#if GEOS_DEBUG
cerr<<" setMarked(true) : "<<*node<<endl;
#endif
}
}
}
示例6: distribution
/**
* Takes a subgraph of the given graph (all nodes in the graph with the given label),
* partitions this subgraph into even smaller subgraphs (using something similar to k-means),
* and gives all small subgraphs a unique label (using the given min_label).
*
* @param graph
* @param label_of_connected_component
* @param size_of_largest_partition
* @param min_label_for_partition_labeling
* @return the number of generated partitions
*/
std::size_t
partition_connected_component(UniGraph * graph, std::size_t label_of_connected_component, std::size_t partition_size, std::size_t min_label_for_partition_labeling)
{
typedef std::size_t Node;
typedef std::size_t Label;
typedef std::vector<Node> Nodes;
Nodes nodes;
for (Node node = 0; node < graph->num_nodes(); ++node)
if (graph->get_label(node) == label_of_connected_component)
nodes.push_back(node);
const std::size_t num_partitions = (nodes.size() + partition_size - 1) / partition_size; // division and rounding up
/********* k-means clustering *******/
const std::size_t num_kmeans_iterations = 100;
Nodes centroids;
/* Draw centroids randomly. */
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(0, nodes.size() - 1);
for(std::size_t partition = 0; partition < num_partitions; ++partition) {
Node centroid = std::numeric_limits<Node>::max();
while (std::find(centroids.begin(), centroids.end(), centroid) != centroids.end())
centroid = nodes.at(distribution(generator));
centroids.push_back(centroid);
}
for (std::size_t kmeans_iteration = 0; kmeans_iteration < num_kmeans_iterations; ++kmeans_iteration) {
const Label unvisited = std::numeric_limits<Label>::max();
for (Node const & node : nodes)
graph->set_label(node, unvisited);
/* Put centroids into queues. */
std::vector<Nodes> queues(num_partitions);
for (std::size_t i = 0; i < num_partitions; ++i)
queues.at(i).push_back(centroids.at(i));
/* Grow regions starting from centroids */
while (std::any_of(queues.begin(), queues.end(), [](Nodes const & queue){return !queue.empty();})) {
#pragma omp parallel for
for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
Nodes & old_queue = queues.at(queue_id);
std::unordered_set<Node> new_queue;
for (Node node : old_queue)
graph->set_label(node, min_label_for_partition_labeling + queue_id); // there is a race condition for partition boundary nodes but we don't care
for (Node node : old_queue) {
/* Copy all unvisited (and not yet inserted) neighbors into new queue. */
for (Node neighbor : graph->get_adj_nodes(node))
if (graph->get_label(neighbor) == unvisited)
new_queue.insert(neighbor);
}
old_queue.clear();
old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end());
}
}
/* If we are in the final iteration we stop here to keep the graph labels
* (they would be removed in the following region shrinking step). */
if (kmeans_iteration == num_kmeans_iterations - 1)
break;
/* Put partition boundary nodes into queues. */
for (Node const node : nodes) {
Label const cur_label = graph->get_label(node);
std::size_t const cur_queue = cur_label - min_label_for_partition_labeling;
Nodes const & neighbors = graph->get_adj_nodes(node);
/* Each node, where any of its neighbors has a different label, is a boundary node. */
if (std::any_of(neighbors.begin(), neighbors.end(), [graph, cur_label]
(Node const neighbor) { return graph->get_label(neighbor) != cur_label; } ))
queues.at(cur_queue).push_back(node);
}
/* Shrink regions starting from boundaries to obtain new centroids. */
#pragma omp parallel for
for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
Nodes & old_queue = queues.at(queue_id);
while (!old_queue.empty()){
std::unordered_set<Node> new_queue;
for (Node node : old_queue)
graph->set_label(node, unvisited);
for (Node node : old_queue) {
/* Copy all neighbors that have not yet been marked (and have not yet been inserted) into new queue. */
for (Node neighbor : graph->get_adj_nodes(node))
if (graph->get_label(neighbor) == min_label_for_partition_labeling + queue_id)
new_queue.insert(neighbor);
//.........这里部分代码省略.........
示例7: route
void LeaflessOrthoRouter::route(Logger *logger) {
// Set up for logging.
unsigned ln = logger != nullptr ? logger->nextLoggingIndex : 0;
std::function<void(unsigned)> log = [ln, this, logger](unsigned n)->void{
if (logger!=nullptr) {
std::string fn = string_format("%02d_%02d_routing_attempt", ln, n);
std::string path = logger->writeFullPathForFilename(fn);
this->m_ra.router.outputInstanceToSVG(path);
}
};
/*
* We may need to route multiple times to ensure that at least two sides of each node are being used,
* but in theory we should never have to route more than 4n+1 times.
*
* Proof: We always begin with an initial routing. We want to show it could be necessary to re-route
* at most 4n times.
*
* In order to see this, we first argue that the worst-case-scenario for any single node is that it
* require four routings. Consider then some node u all of whose edges have been routed to one side, s0. We
* then pick some edge e0 incident to u, say that it may not connect to side s0, and we re-route for the first time.
*
* While unlikely, it could be that, for whatever reason, now all edges incident to node u are routed to some other side,
* s1. We then pick some edge e1 (could be the same or different from e0), forbid it from connecting to
* side s1, and re-route for a second time.
*
* Again, for whatever reason, all edges could now connect to one
* of the two remaining sides, s2. Continuing in this way, we could be led to re-route a third and a fourth time. But
* prior to the fourth re-routing it would be the case that for each side si of node u, there was
* some edge ei incident to u that had been forbidden from connecting on side si. Therefore on the fourth
* re-routing it would be impossible for all edges to connect on any single side of u.
*
* So much for the case of a single node. However, in again a highly unlikely worst-case-scenario, it could be
* that during the first five routings no other node besides u was a pseudoleaf (had all edges routed to one side),
* but after the fifth some other node became a pseudoleaf. In this way we could be led to do four re-routings
* for each node in the graph. QED
*
* In practice, it would probably be very rare for more that two routings to ever be necessary. For this
* requires the odd circumstance, considered in the proof, that forbidding one edge from connecting on a
* given side somehow results in /all/ edges incident at that node migrating to some other, single side.
*
* In order that our theory be tested, we use an infinite loop with counter and assertion, instead
* of a mere for-loop which would fail silently.
*/
size_t numRoutings = 0;
size_t maxRoutings = 4*m_n + 1;
while (true) {
m_ra.router.processTransaction();
log(++numRoutings);
// As explained in the comments above, at most five routings should ever be needed.
COLA_ASSERT(numRoutings <= maxRoutings);
// For testing purposes, we may want to record the results of
// each routing attempt.
if (recordEachAttempt) {
m_ra.recordRoutes(true);
routingAttemptTglf.push_back(m_graph->writeTglf());
}
// Are there any nodes having all of their edges routed
// out of just one side? This is what we want to prevent.
// Such nodes would become leaves in a planarisation, so we
// call them "pseudoleaves".
Nodes pseudoLeaves;
// For each such Node (if any), there is a sole direction in which
// all connectors depart. We keep track of those directions as we work.
vector<CardinalDir> soleDepartureDirecs;
// Check each Node in the Graph:
for (auto p : m_graph->getNodeLookup()) {
Node_SP &u = p.second;
const EdgesById edgeLookup = u->getEdgeLookup();
// Sanity check, that Node u is not an actual leaf:
COLA_ASSERT(edgeLookup.size() > 1);
// Determine the departure direction from Node u for its first Edge.
auto edge_it = edgeLookup.cbegin();
CardinalDir d0 = departureDir((*edge_it).second, u);
// If two or more directions have been used, some edge must depart
// in a different direction than this one. (For if all the rest equal
// this first one, then all are the same.)
bool isPseudoLeaf = true;
for (auto jt = ++edge_it; jt != edgeLookup.cend(); ++jt) {
CardinalDir d1 = departureDir((*jt).second, u);
if (d1 != d0) {
isPseudoLeaf = false;
break;
}
}
if (isPseudoLeaf) {
pseudoLeaves.push_back(u);
soleDepartureDirecs.push_back(d0);
}
}
// Are there any pseudoleaves?
if (pseudoLeaves.empty()) {
// If there are none, then we're done routing, and can break out of the outer while loop.
break;
} else {
// But if there are still pseudoleaves, then we need to work on them.
for (size_t i = 0; i < pseudoLeaves.size(); ++i) {
// Get the Node and the direction in which all connectors currently depart from it.
Node_SP u = pseudoLeaves[i];
//.........这里部分代码省略.........
示例8: bless
static SV *node_to_sv(pTHX_ Node *node)
{
SV *ret = NULL;
if (!node) return ret;
if (TYPE_match(node, BranchNode)) {
BranchNode *branch = dynamic_cast<BranchNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, branch->tk);
add_key(hash, "left", branch->left);
add_key(hash, "right", branch->right);
add_key(hash, "next", branch->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Branch");
} else if (TYPE_match(node, FunctionCallNode)) {
FunctionCallNode *call = dynamic_cast<FunctionCallNode *>(node);
Nodes *args = call->args;
size_t argsize = args->size();
AV *array = new_Array();
for (size_t i = 0; i < argsize; i++) {
SV *arg = node_to_sv(aTHX_ args->at(i));
if (!arg) continue;
av_push(array, set(arg));
}
HV *hash = (HV*)new_Hash();
add_key(hash, "next", call->next);
add_token(hash, call->tk);
(void)hv_stores(hash, "args", set(new_Ref(array)));
ret = bless(aTHX_ hash, "Compiler::Parser::Node::FunctionCall");
} else if (TYPE_match(node, ArrayNode)) {
ArrayNode *array = dynamic_cast<ArrayNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, array->tk);
add_key(hash, "next", array->next);
add_key(hash, "idx", array->idx);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Array");
} else if (TYPE_match(node, HashNode)) {
HashNode *h = dynamic_cast<HashNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, h->tk);
add_key(hash, "next", h->next);
add_key(hash, "key", h->key);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Hash");
} else if (TYPE_match(node, DereferenceNode)) {
DereferenceNode *dref = dynamic_cast<DereferenceNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, dref->tk);
add_key(hash, "next", dref->next);
add_key(hash, "expr", dref->expr);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Dereference");
} else if (TYPE_match(node, FunctionNode)) {
FunctionNode *f = dynamic_cast<FunctionNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, f->tk);
add_key(hash, "next", f->next);
add_key(hash, "body", f->body);
add_key(hash, "prototype", f->prototype);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Function");
} else if (TYPE_match(node, BlockNode)) {
BlockNode *b = dynamic_cast<BlockNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, b->tk);
add_key(hash, "next", b->next);
add_key(hash, "body", b->body);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Block");
} else if (TYPE_match(node, ReturnNode)) {
ReturnNode *r = dynamic_cast<ReturnNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, r->tk);
add_key(hash, "next", r->next);
add_key(hash, "body", r->body);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Return");
} else if (TYPE_match(node, SingleTermOperatorNode)) {
SingleTermOperatorNode *s = dynamic_cast<SingleTermOperatorNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, s->tk);
add_key(hash, "next", s->next);
add_key(hash, "expr", s->expr);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::SingleTermOperator");
} else if (TYPE_match(node, DoubleTermOperatorNode)) {
} else if (TYPE_match(node, LeafNode)) {
LeafNode *leaf = dynamic_cast<LeafNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, leaf->tk);
add_key(hash, "next", leaf->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::Leaf");
} else if (TYPE_match(node, ListNode)) {
ListNode *list = dynamic_cast<ListNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, list->tk);
add_key(hash, "data", list->data);
add_key(hash, "next", list->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::List");
} else if (TYPE_match(node, ArrayRefNode)) {
ArrayRefNode *ref = dynamic_cast<ArrayRefNode *>(node);
HV *hash = (HV*)new_Hash();
add_token(hash, ref->tk);
add_key(hash, "data", ref->data);
add_key(hash, "next", ref->next);
ret = bless(aTHX_ hash, "Compiler::Parser::Node::ArrayRef");
} else if (TYPE_match(node, HashRefNode)) {
HashRefNode *ref = dynamic_cast<HashRefNode *>(node);
//.........这里部分代码省略.........
示例9: main
int main(int argc, char **argv)
{
char *file_input = NULL;
int c;
Node node;
Nodes nodes;
struct Summary summary;
memset(&summary, 0, sizeof(struct Summary));
// options
while ((c = getopt(argc, argv, "i:")) != -1) {
switch (c) {
case 'i':
file_input = optarg;
break;
default:
break;
}
}
if (!file_input) {
printf("Usage: ./build_tree -i inputs.txt\n");
exit(EXIT_SUCCESS);
}
// read input file
std::ifstream fin(file_input);
if (!fin.is_open()) {
std::cerr << "open file failure: " << file_input << std::endl;
exit(EXIT_FAILURE);
}
while (!fin.eof()) {
std::string uid;
std::string balance;
if (!std::getline(fin, uid, '\t') || !std::getline(fin, balance, '\n')) {
break;
}
make_user_node(uid.c_str(), atoll(balance.c_str()), &node);
nodes.push_back(node);
summary.sum += node.sum;
}
fin.close();
summary.user_count = nodes.size();
// nodes at level 0 should be sorted
std::sort(nodes.begin(), nodes.end());
int idx = 0;
Nodes parents;
parents.reserve(nodes.size()%2 + 1);
while (nodes.size() > 1) {
if (nodes.size() % 2 == 1) {
summary.padding_sum += nodes[nodes.size()-1].sum;
nodes.push_back(nodes[nodes.size()-1]);
}
for (Nodes::iterator it = nodes.begin(); it != nodes.end(); it++) {
std::cout << idx++ << "\t" << summary.level << "\t" << it->sum << "\t";
dump_hex(it->hash, 8);
std::cout << std::endl;
}
parents.resize(0);
build_parent_nodes(&nodes, &parents);
nodes = parents;
summary.level++;
}
std::cout << idx++ << "\t" << summary.level << "\t" << nodes[0].sum << "\t";
dump_hex(nodes[0].hash, 8);
std::cout << std::endl;
std::cout << "summary:\t" << summary.user_count << "\t" << summary.sum << "\t"
<< summary.padding_sum << "\t" << summary.level << std::endl;
return 0;
}
示例10: getUnusedNodeId
Net_NodeID getUnusedNodeId() {
if(nodes.size() == 0) return 2;
return nodes.rbegin()->first + 1;
}