本文整理匯總了C++中DVLOG函數的典型用法代碼示例。如果您正苦於以下問題:C++ DVLOG函數的具體用法?C++ DVLOG怎麽用?C++ DVLOG使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DVLOG函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: function_footprint
void MembershipTableMgr::handleNormalJoinEvent(const MemberWrapper& member) {
function_footprint();
queue.push(member);
if(joining) {
DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$";
DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$";
DVLOG(0) << "current joining member queue size: " << queue.unsafe_size();
DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$";
DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$";
return;
}
while(!queue.empty()) {
joining.store(true);
MemberWrapper joined_member;
queue.try_pop(joined_member);
// leading generate whole member table and send to joining member.
auto wholeMemberTable = std::make_shared<WholeMembershipTableEvent>();
genMembershipTable(*wholeMemberTable->mutable_table());
ResultCode rs;
rs = multicastMemberMessage(WHOLE_MEMBERSHIP_TABLE, wholeMemberTable);
if (rs != RC_SUCCESS) {
LOG(ERROR)<< getErrorDescription(rs);
return;
}
// leading send delta member to all members.
auto deltaMember = std::make_shared<DeltaMemberEvent>();
deltaMember->set_position(findAddPos());
deltaMember->mutable_member()->CopyFrom(joined_member.getMember());
rs = multicastMemberMessage(DELTA_MEMBER_AND_JOIN_POSITION, deltaMember);
if (rs != RC_SUCCESS) {
LOG(ERROR)<< getErrorDescription(rs);
return;
}
}
}
示例2: serialize
void serialize( Archive & ar, const unsigned int version )
{
DVLOG(2) << "[BDF::serialize] serialize BDFBase\n";
#if 0
ar & M_order;
ar & M_name;
ar & M_time;
ar & M_n_restart;
ar & M_Tf;
#endif
//ar & M_time_orders;
ar & boost::serialization::make_nvp( "time_values", M_time_values_map );
//DVLOG(2) << "[BDF::serialize] time orders size: " << M_time_orders.size() << "\n";
DVLOG(2) << "[BDF::serialize] time values size: " << M_time_values_map.size() << "\n";
for ( auto it = M_time_values_map.begin(), en = M_time_values_map.end(); it!=en; ++it )
{
//LOG(INFO) << "[Bdf] order " << i << "=" << M_time_orders[i] << "\n";
DVLOG(2) << "[Bdf::serialize] value " << *it << "\n";
}
DVLOG(2) << "[BDF::serialize] serialize BDFBase done\n";
}
示例3: CHECK
A_Entry GlobalQueue<T>::push_reserve ( bool ignore ) {
CHECK( isMaster() );
Grappa::Metrics::global_queue_stats.record_push_reserve_reply( Grappa_sizeof_delegate_func_reply< bool, A_Entry >() );
DVLOG(5) << "push_reserve";
CHECK( capacity > 0 );
if ( (tail % capacity == head % capacity) && (tail != head) ) {
return make_global( static_cast< QueueEntry<T> * >( NULL ) ); // no room
} else {
A_Entry assigned = queueBase + (tail % capacity);
tail++;
// if there are any consumers, wake oldest and give the address just produced
if ( pullReserveWaiters.size() > 0 ) {
CHECK( head == tail-1 ) << "Size should be exactly one, since there are waiters and one value was just produced";
DVLOG(5) << "push_reserve: found waiters";
A_Entry granted = assigned;
head++;
A_D_A_Entry w = pullReserveWaiters.front();
pullReserveWaiters.pop();
pull_reserve_sendreply( w, &granted, true );
}
return assigned;
}
}
示例4: UpdateComponent
static void UpdateComponent( spaceT const& Xh, Epetra_MultiVector& sol, Epetra_MultiVector& comp )
{
Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) );
Epetra_Map globalMap ( epetraMap( Xh->map() ) );
int shift = Xh->nDofStart( index );
int Length = comp.MyLength();
for ( int i=0; i < Length; i++ )
{
int compGlobalID = componentMap.GID( i );
if ( compGlobalID >= 0 )
{
int compLocalID = componentMap.LID( compGlobalID );
int localID = globalMap.LID( compGlobalID+shift );
// int globalID = globalMap.GID(localID);
DVLOG(2) << "Copy entry component[" << compLocalID << "] to sol[" << localID << "]="
<< sol[0][localID]
<< "]\n";
sol[0][localID] = comp[0][compLocalID] ;
DVLOG(2) << comp[0][compLocalID] << "\n";
}
}
}
示例5: DVLOG
void StoreDelegateRddActor::handleRddCreate(const ActorMessagePtr& msg) {
DVLOG(2) << "StoreDelegateRddActor : handle create store delegate.";
rawMsg = msg;
CreateDelegateRddRequest* request = dynamic_cast<CreateDelegateRddRequest*>(msg->getPayload().get());
idgs::store::MetadataHelper::loadStoreMetadata(request->store_name(), metadata.get());
ClusterFramework& cluster = ::idgs::util::singleton<ClusterFramework>::getInstance();
for (int32_t partition = 0; partition < partitionSize; ++partition) {
int32_t memberId = cluster.getPartitionManager()->getPartition(partition)->getPrimaryMemberId();
shared_ptr<CreateDelegatePartitionRequest> payload(new CreateDelegatePartitionRequest);
payload->set_store_name(request->store_name());
payload->set_partition(partition);
payload->set_rdd_name(getRddName());
ActorMessagePtr reqMsg = createActorMessage();
reqMsg->setOperationName(CREATE_DELEGATE_PARTITION);
reqMsg->setDestActorId(RDD_SERVICE_ACTOR);
reqMsg->setDestMemberId(memberId);
reqMsg->setPayload(payload);
DVLOG(3) << "RDD \"" << getRddName() << "\" sending create RDD partition to member " << memberId;
::idgs::actor::postMessage(reqMsg);
}
}
示例6: DVLOG
typename BackendPetsc<T>::solve_return_type
BackendPetsc<T>::solve( sparse_matrix_type const& A,
vector_type& x,
vector_type const& b )
{
M_solver_petsc.setPrefix( this->prefix() );
M_solver_petsc.setPreconditionerType( this->pcEnumType() );
M_solver_petsc.setSolverType( this->kspEnumType() );
if (!M_solver_petsc.initialized())
M_solver_petsc.attachPreconditioner( this->M_preconditioner );
M_solver_petsc.setConstantNullSpace( this->hasConstantNullSpace() );
M_solver_petsc.setFieldSplitType( this->fieldSplitEnumType() );
M_solver_petsc.setTolerances( _rtolerance=this->rTolerance(),
_atolerance=this->aTolerance(),
_dtolerance=this->dTolerance(),
_maxit = this->maxIterations() );
M_solver_petsc.setPrecMatrixStructure( this->precMatrixStructure() );
M_solver_petsc.setMatSolverPackageType( this->matSolverPackageEnumType() );
M_solver_petsc.setShowKSPMonitor( this->showKSPMonitor() );
M_solver_petsc.setShowKSPConvergedReason( this->showKSPConvergedReason() );
auto res = M_solver_petsc.solve( A, x, b, this->rTolerance(), this->maxIterations() );
DVLOG(2) << "[BackendPetsc::solve] number of iterations : " << res.template get<1>() << "\n";
DVLOG(2) << "[BackendPetsc::solve] residual : " << res.template get<2>() << "\n";
if ( !res.template get<0>() )
LOG(ERROR) << "Backend " << this->prefix() << " : linear solver failed to converge" << std::endl;
return res;
} // BackendPetsc::solve
示例7: complete
/// Mark a certain number of things completed. When the global count on all cores goes to 0, all
/// tasks waiting on the GCE will be woken.
///
/// Note: this can be called in a message handler (e.g. remote completes from stolen tasks).
void complete(int64_t dec = 1) {
count -= dec;
DVLOG(4) << "complete (" << count << ") -- gce(" << this << ")";
// out of work here
if (count == 0) { // count[dec -> 0]
// enter cancellable barrier
send_heap_message(master_core, [this] {
cores_out--;
DVLOG(4) << "core entered barrier (cores_out:"<< cores_out <<")";
// if all are in
if (cores_out == 0) { // cores_out[1 -> 0]
CHECK_EQ(count, 0);
// notify everyone to wake
for (Core c = 0; c < cores(); c++) {
send_heap_message(c, [this] {
CHECK_EQ(count, 0);
DVLOG(3) << "broadcast";
broadcast(&cv); // wake anyone who was waiting here
reset(); // reset, now anyone else calling `wait` should fall through
});
}
}
});
}
}
示例8: try_merge_buddy_recursive
void try_merge_buddy_recursive( ChunkMap::iterator cmit ) {
// compute address of buddy
intptr_t address = cmit->second.address;
intptr_t buddy_address = (address ^ cmit->second.size);
DVLOG(5) << cmit->second << " buddy address " << (void *) buddy_address;
// does it exist?
ChunkMap::iterator buddy_iterator = chunks_.find( buddy_address );
if( buddy_iterator != chunks_.end() &&
buddy_iterator->second.size == cmit->second.size &&
buddy_iterator->second.in_use == false ) {
DVLOG(5) << "buddy found! address " << (void *) address << " buddy address " << (void *) buddy_address;
// remove the higher-addressed chunk
ChunkMap::iterator higher_iterator = address < buddy_address ? buddy_iterator : cmit;
remove_from_free_list( higher_iterator );
chunks_.erase( higher_iterator );
// keep the the lower-addressed chunk in the map:
// update its size and move it to the right free list
ChunkMap::iterator lower_iterator = address < buddy_address ? cmit : buddy_iterator;
remove_from_free_list( lower_iterator ); // should these be swapped? I think so.
lower_iterator->second.size *= 2;
add_to_free_list( lower_iterator );
// see if we have more to merge
try_merge_buddy_recursive( lower_iterator );
}
}
示例9: block_until_acquired
void block_until_acquired() {
if( !acquired_ ) {
start_acquire();
#ifdef VTRACE_FULL
VT_TRACER("incoherent block_until_acquired");
#endif
DVLOG(5) << "Worker " << Grappa::current_worker()
<< " ready to block on " << *request_address_
<< " * " << *count_ ;
if( !acquired_ ) {
start_time_ = Grappa::timestamp();
} else {
start_time_ = 0;
}
while( !acquired_ ) {
DVLOG(5) << "Worker " << Grappa::current_worker()
<< " blocking on " << *request_address_
<< " * " << *count_ ;
if( !acquired_ ) {
thread_ = Grappa::current_worker();
Grappa::suspend();
thread_ = NULL;
}
DVLOG(5) << "Worker " << Grappa::current_worker()
<< " woke up for " << *request_address_
<< " * " << *count_ ;
}
IAMetrics::record_wakeup_latency( start_time_, network_time_ );
}
}
示例10: getComponent
static Epetra_MultiVector getComponent( spaceT const& Xh, Epetra_MultiVector const& sol )
{
Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) );
Epetra_Map globalMap ( epetraMap( Xh->map() ) );
//DVLOG(2) << "Component map: " << componentMap << "\n";
Epetra_MultiVector component( componentMap, 1 );
int Length = component.MyLength();
int shift = Xh->nDofStart( index );
for ( int i=0; i < Length; i++ )
{
int compGlobalID = componentMap.GID( i );
if ( compGlobalID >= 0 )
{
int compLocalID = componentMap.LID( compGlobalID );
int localID = globalMap.LID( compGlobalID+shift );
// int globalID = globalMap.GID(localID);
DVLOG(2) << "[MyBackend] Copy entry sol[" << localID << "]=" << sol[0][localID]
<< " to component[" << compLocalID << "]\n";
component[0][compLocalID] = sol[0][localID];
DVLOG(2) << component[0][compLocalID] << "\n";
}
}
return component;
}
示例11: block_until_pop
T * block_until_pop() {
DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": blocking until pop with " << s_.get_value() << " now";
s_.decrement();
T * result = ptrs_[s_.get_value()];
DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": finished blocking until pop with " << s_.get_value() << "/" << result;
return result;
}
示例12: DVLOG
uint32_t InnerTcpConnection::connect(uint32_t memberId, int retry) {
InnerTcpConnectionState expectedState = INITIAL;
if(!state.compare_exchange_strong(expectedState, CONNECTING)) {
DVLOG(2) << "Already connecting to remote peer: " << memberId;
return 0;
}
setPeerMemberId(memberId);
/// connect to remote peer
auto ep = ::idgs::util::singleton<idgs::actor::RpcFramework>::getInstance().getNetwork()->getEndPoint(memberId);
if(ep == NULL) {
LOG(ERROR) << "Network endpoint of member " << memberId << " is not available.";
terminate();
return RC_CLIENT_SERVER_IS_NOT_AVAILABLE;
}
auto& end_point = ep->tcpEndPoint;
DVLOG(0) << "Connecting to remote peer " << memberId << '(' << end_point << ")";
auto conn = shared_from_this();
try {
socket.async_connect(end_point, [conn, retry](const asio::error_code& error) {
conn->handleConnect(error, retry);
});
} catch (std::exception& e) {
LOG(ERROR) << "Failed to connect to remote peer " << memberId << ", exception: " << e.what();
terminate();
}
return 0;
}
示例13: DVLOG
bool PartionManagableNode::processGetPartitionTableReq(OperationContext& context) {
if (!checkOperationName(context, ADMIN_GET_REQUEST)) {
return false;
}
idgs::Application& app = ::idgs::util::singleton<idgs::Application>::getInstance();
PartitionTableMgr* pm = app.getPartitionManager();
idgs::pb::PartitionTable table;
pm->genPartitionTable(table);
DVLOG(4) << "get Partition Table:\n" << table.DebugString();
string jsonBody = protobuf::JsonMessage::toJsonString(&table);
DVLOG(3) << "get json for partition table :\n" << jsonBody;
std::shared_ptr<idgs::actor::ActorMessage> resposne =
idgs::admin::util::createAdminResponse(context,
::idgs::admin::pb::Success,
jsonBody);
idgs::actor::sendMessage(resposne);
return true;
}
示例14: SplitStringIntoKeyValues
bool SplitStringIntoKeyValues(
const std::string& line,
char key_value_delimiter,
std::string* key,
std::vector<std::string>* values)
{
key->clear();
values->clear();
// 查找key.
size_t end_key_pos = line.find_first_of(key_value_delimiter);
if(end_key_pos == std::string::npos)
{
DVLOG(1) << "cannot parse key from line: " << line;
return false; // 沒有key.
}
key->assign(line, 0, end_key_pos);
// 查找values.
std::string remains(line, end_key_pos, line.size()-end_key_pos);
size_t begin_values_pos = remains.find_first_not_of(key_value_delimiter);
if(begin_values_pos == std::string::npos)
{
DVLOG(1) << "cannot parse value from line: " << line;
return false; // 沒有value.
}
std::string values_string(remains, begin_values_pos,
remains.size()-begin_values_pos);
// 添加到vector.
values->push_back(values_string);
return true;
}
示例15: enroll
/// Enroll more things that need to be completed before the global completion is, well, complete.
/// This will send a cancel to the master if this core previously entered the cancellable barrier.
///
/// Blocks until cancel completes (if it must cancel) to ensure correct ordering, therefore
/// cannot be called from message handler.
void enroll(int64_t inc = 1) {
if (inc == 0) return;
CHECK_GE(inc, 1);
count += inc;
DVLOG(5) << "enroll " << inc << " -> " << count << " gce("<<this<<")";
// first one to have work here
if (count == inc) { // count[0 -> inc]
event_in_progress = true; // optimization to save checking in wait()
// cancel barrier
Core co = impl::call(master_core, [this] {
cores_out++;
return cores_out;
});
// first one to cancel barrier should make sure other cores are ready to wait
if (co == 1) { // cores_out[0 -> 1]
event_in_progress = true;
call_on_all_cores([this] {
event_in_progress = true;
});
CHECK(event_in_progress);
}
// block until cancelled
CHECK_GT(count, 0);
DVLOG(2) << "gce(" << this << " cores_out: " << co << ", count: " << count << ")";
}
}