本文整理汇总了C++中log4cxx::LoggerPtr::isTraceEnabled方法的典型用法代码示例。如果您正苦于以下问题:C++ LoggerPtr::isTraceEnabled方法的具体用法?C++ LoggerPtr::isTraceEnabled怎么用?C++ LoggerPtr::isTraceEnabled使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类log4cxx::LoggerPtr
的用法示例。
在下文中一共展示了LoggerPtr::isTraceEnabled方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: completeLaunch
void MpiLauncher::completeLaunch(pid_t pid, const std::string& pidFile, int status)
{
// rm args file
boost::scoped_ptr<SharedMemoryIpc> shmIpc(mpi::newSharedMemoryIpc(_ipcName));
shmIpc->remove();
shmIpc.reset();
// rm pid file
scidb::File::remove(pidFile.c_str(), false);
// rm log file
if (!logger->isTraceEnabled() && !_inError) {
string logFileName = mpi::getLauncherLogFile(_installPath, _queryId, _launchId);
scidb::File::remove(logFileName.c_str(), false);
}
if (WIFSIGNALED(status)) {
LOG4CXX_ERROR(logger, "SciDB MPI launcher (pid="<<pid<<") terminated by signal = "
<< WTERMSIG(status) << (WCOREDUMP(status)? ", core dumped" : ""));
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED) << "MPI launcher process";
} else if (WIFEXITED(status)) {
int rc = WEXITSTATUS(status);
if (rc != 0) {
LOG4CXX_ERROR(logger, "SciDB MPI launcher (pid="<<_pid<<") exited with status = " << rc);
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED) << "MPI launcher process";
} else {
LOG4CXX_DEBUG(logger, "SciDB MPI launcher (pid="<<_pid<<") exited with status = " << rc);
return;
}
}
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNREACHABLE_CODE);
}
示例2: shmIpc
// XXX TODO: consider returning std::vector<scidb::SharedMemoryPtr>
// XXX TODO: which would require supporting different types of memory (double, char etc.)
std::vector<MPIPhysical::SMIptr_t> MPIPhysical::allocateMPISharedMemory(size_t numBufs,
size_t elemSizes[],
size_t numElems[],
string dbgNames[])
{
LOG4CXX_DEBUG(logger, "MPIPhysical::allocateMPISharedMemory(numBufs "<<numBufs<<",,,)");
if(logger->isTraceEnabled()) {
LOG4CXX_TRACE(logger, "MPIPhysical::allocateMPISharedMemory(): allocations are: ");
for(size_t ii=0; ii< numBufs; ii++) {
LOG4CXX_TRACE(logger, "MPIPhysical::allocateMPISharedMemory():"
<< " elemSizes["<<ii<<"] "<< dbgNames[ii] << " len " << numElems[ii]);
}
}
std::vector<SMIptr_t> shmIpc(numBufs);
bool preallocate = Config::getInstance()->getOption<bool>(CONFIG_PREALLOCATE_SHM);
for(size_t ii=0; ii<numBufs; ii++) {
std::stringstream suffix;
suffix << "." << ii ;
std::string ipcNameFull= _ipcName + suffix.str();
LOG4CXX_TRACE(logger, "IPC name = " << ipcNameFull);
shmIpc[ii] = SMIptr_t(mpi::newSharedMemoryIpc(ipcNameFull, preallocate)); // can I get 'em off ctx instead?
_ctx->addSharedMemoryIpc(_launchId, shmIpc[ii]);
char* ptr = MpiLauncher::initIpcForWrite(shmIpc[ii].get(), (elemSizes[ii] * numElems[ii]));
assert(ptr); ptr=ptr;
}
return shmIpc;
}
示例3: destroy
void MpiSlaveProxy::destroy(bool error)
{
QueryID queryIdForKill(INVALID_QUERY_ID);
if (error) {
_inError=true;
queryIdForKill = _queryId;
}
const string clusterUuid = Cluster::getInstance()->getUuid();
// kill the slave proc and its parent orted
for ( std::vector<pid_t>::const_iterator iter=_pids.begin();
iter!=_pids.end(); ++iter) {
pid_t pid = *iter;
//XXX TODO tigor: kill proceess group (-pid) ?
LOG4CXX_DEBUG(logger, "MpiSlaveProxy::destroy: killing slave pid = "<<pid);
MpiErrorHandler::killProc(_installPath, clusterUuid, pid, queryIdForKill);
}
std::string pidFile = mpi::getSlavePidFile(_installPath, _queryId, _launchId);
MpiErrorHandler::cleanupSlavePidFile(_installPath,
clusterUuid,
pidFile,
queryIdForKill);
// rm log file
if (!logger->isTraceEnabled() && !_inError) {
string logFileName = mpi::getSlaveLogFile(_installPath, _queryId, _launchId);
scidb::File::remove(logFileName.c_str(), false);
}
}
示例4: destroy
void MpiSlaveProxy::destroy(bool error)
{
if (error) {
_inError=true;
}
// kill the slave proc and its parent orted
for ( std::vector<pid_t>::const_iterator iter=_pids.begin();
iter!=_pids.end(); ++iter) {
pid_t pid = *iter;
//XXX TODO tigor: kill proceess group (-pid) ?
MpiErrorHandler::killProc(_installPath, pid);
}
// rm pid file
std::string pidFile = mpi::getSlavePidFile(_installPath, _queryId, _launchId);
scidb::File::remove(pidFile.c_str(), false);
// rm log file
if (!logger->isTraceEnabled() && !_inError) {
string logFileName = mpi::getSlaveLogFile(_installPath, _queryId, _launchId);
scidb::File::remove(logFileName.c_str(), false);
}
}
示例5: Cx
//.........这里部分代码省略.........
}
// free potentially large amount of memory, e.g. when inputArrays[mat] was significantly memory-materialized
inputArrays[mat].reset();
tmpRedistedInput.reset(); // and drop this array before iterating on the loop to the next repart/redist
if(DBG_REFORMAT) { // that the reformat worked correctly
for(size_t ii=0; ii < matrixLocalSize[mat]; ii++) {
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI():"
<< " @myPPos("<< MYPROW << "," << MYPCOL << ")"
<< " array["<<mat<<"]["<<ii<<"] = " << asDoubles[mat][ii]);
}
}
}
size_t resultShmIpcIndx = BUF_MAT_CC; // by default, GEMM assumes it will return something for C
// but this will change if find we don't particpate in the output
shmSharedPtr_t Cx(shmIpc[resultShmIpcIndx]);
//
//.... Call pdgemm to compute the product of A and B .............................
//
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI(): calling pdgemm_ M,N,K:" << size[AA][R] << ","
<< size[BB][R] << ","
<< size[CC][C]
<< " MB,NB:" << MB_NB[AA][R] << "," << MB_NB[AA][C]);
if(DBG_CERR) std::cerr << "GEMMPhysical::invokeMPI(): calling pdgemm to compute" << std:: endl;
std::shared_ptr<MpiSlaveProxy> slave = _ctx->getSlave(_launchId);
slpp::int_t MYPE = query->getInstanceID() ; // we map 1-to-1 between instanceID and MPI rank
slpp::int_t INFO = DEFAULT_BAD_INFO ;
pdgemmMaster(query.get(), _ctx, slave, _ipcName, shmIpc[BUF_ARGS]->get(),
NPROW, NPCOL, MYPROW, MYPCOL, MYPE,
getTransposeCode(options.transposeA), getTransposeCode(options.transposeB),
size[CC][R], size[CC][C], K,
&options.alpha,
asDoubles[AA], one, one, DESC[AA],
asDoubles[BB], one, one, DESC[BB],
&options.beta,
asDoubles[CC], one, one, DESC[CC],
INFO);
raiseIfBadResultInfo(INFO, "pdgemm");
boost::shared_array<char> resPtrDummy(reinterpret_cast<char*>(NULL));
typedef scidb::ReformatFromScalapack<shmSharedPtr_t> reformatOp_t ;
if(logger->isTraceEnabled()) {
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI():--------------------------------------");
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI(): sequential values from 'C' memory");
for(size_t ii=0; ii < matrixLocalSize[CC]; ii++) {
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI(): ("<< MYPROW << "," << MYPCOL << ") C["<<ii<<"] = " << asDoubles[CC][ii]);
}
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI(): --------------------------------------");
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI(): using pdelgetOp to reformat Gemm left from memory to scidb array , start");
}
//
// an OpArray is a SplitArray that is filled on-the-fly by calling the operator
// so all we have to do is create one with an upper-left corner equal to the
// global position of the first local block we have. so we need to map
// our "processor" coordinate into that position, which we do by multiplying
// by the chunkSize
//
Coordinates first(2);
first[R] = dimsCC[R].getStartMin() + MYPROW * MB_NB[CC][R];
first[C] = dimsCC[C].getStartMin() + MYPCOL * MB_NB[CC][C];
Coordinates last(2);
last[R] = dimsCC[R].getStartMin() + size[CC][R] - 1;
last[C] = dimsCC[C].getStartMin() + size[CC][C] - 1;
std::shared_ptr<Array> result;
// the process grid may be larger than the size of output in chunks... e.g multiplying A(1x100) * B(100x1) -> C(1x1)
bool isParticipatingInOutput = first[R] <= last[R] && first[C] <= last[C] ;
if (isParticipatingInOutput) {
// there is in fact some output in our shared memory... hook it up to an OpArray
Coordinates iterDelta(2);
iterDelta[0] = NPROW * MB_NB[CC][R];
iterDelta[1] = NPCOL * MB_NB[CC][C];
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI():Creating OpArray from ("<<first[R]<<","<<first[C]<<") to (" << last[R] <<"," <<last[C]<<") delta:"<<iterDelta[R]<<","<<iterDelta[C]);
reformatOp_t pdelgetOp(Cx, DESC[CC], dimsCC[R].getStartMin(), dimsCC[C].getStartMin(),
NPROW, NPCOL, MYPROW, MYPCOL);
result = std::shared_ptr<Array>(new OpArray<reformatOp_t>(outSchema, resPtrDummy, pdelgetOp,
first, last, iterDelta, query));
assert(resultShmIpcIndx == BUF_MAT_CC);
} else {
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI(): instance participated, but does not output: creating empty MemArray: first ("<<first[R]<<","<<first[C]<<"), last(" << last[R] <<"," <<last[C]<<")");
result = std::shared_ptr<Array>(new MemArray(_schema,query)); // same as when we don't participate at all
resultShmIpcIndx = shmIpc.size(); // indicate we don't want to hold on to buffer BUF_MAT_CC after all
}
// TODO: common pattern in ScaLAPACK operators: factor to base class
releaseMPISharedMemoryInputs(shmIpc, resultShmIpcIndx);
unlaunchMPISlaves();
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI() end");
return result;
}
示例6: assert
void
PhysicalQueryPlanNode::supplantChild(const PhysNodePtr& targetChild,
const PhysNodePtr& newChild)
{
assert(newChild);
assert(targetChild);
assert(newChild.get() != this);
int removed = 0;
std::vector<PhysNodePtr> newChildren;
if (logger->isTraceEnabled()) {
std::ostringstream os;
os << "Supplanting targetChild Node:\n";
targetChild->toString(os, 0 /*indent*/,false /*children*/);
os << "\nwith\n";
newChild->toString(os, 0 /*indent*/,false /*children*/);
LOG4CXX_TRACE(logger, os.str());
}
for(auto &child : _childNodes) {
if (child != targetChild) {
newChildren.push_back(child);
}
else {
// Set the parent of the newChild to this node.
newChild->_parent = shared_from_this();
// NOTE: Any existing children of the newChild are removed from the
// Query Plan.
if ((newChild->_childNodes).size() > 0) {
LOG4CXX_INFO(logger,
"Child nodes of supplanting node are being removed from the tree.");
}
// Re-parent the children of the targetChild to the newChild
newChild->_childNodes.swap(targetChild->_childNodes);
for (auto grandchild : newChild -> _childNodes) {
assert(grandchild != newChild);
grandchild->_parent = newChild;
}
// Remove any references to the children from the targetChild
targetChild->_childNodes.clear();
targetChild->resetParent();
// Add the newChild to this node
newChildren.push_back(newChild);
++removed;
}
}
_childNodes.swap(newChildren);
if (logger->isTraceEnabled()) {
std::ostringstream os;
newChild->toString(os);
LOG4CXX_TRACE(logger, "New Node subplan:\n"
<< os.str());
}
SCIDB_ASSERT(removed==1);
}