本文整理汇总了C++中log4cxx::LoggerPtr类的典型用法代码示例。如果您正苦于以下问题:C++ LoggerPtr类的具体用法?C++ LoggerPtr怎么用?C++ LoggerPtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LoggerPtr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: validatePath
bool LogImpl::validatePath(log4cxx::LoggerPtr logger)
{
if(activatePathValidation)
{
vector<AppenderPtr> appenders = logger->getAllAppenders();
RollingFileAppender* appenderTemp = new RollingFileAppender();
for(int i = 0; i < (int)appenders.size(); i++)
{
std::string currentAppenderName = typeid(*appenders[i]).name();
std::string typeName = typeid(*appenderTemp).name();
//RollingFile
if(currentAppenderName.find("RollingFile") != string::npos)
{
RollingFileAppender* apender = (RollingFileAppender*) &appenders[i];
std::string currentFile;
log4cxx::helpers::Transcoder::encode(apender->getFile(), currentFile);
if(DirectoryExists(currentFile))
{
return true;
}
}
}
delete(appenderTemp);
}
else
return true;
return false;
}
示例2: AppLog
void __cdecl AppLog(int level, const char *format, ...)
{
static log4cxx::LoggerPtr logger(Logger::getLogger("App"));
va_list args;
va_start(args, format);
int nBuf;
char szBuffer[4096]= "";
#if _WIN32
nBuf = _vsnprintf(szBuffer, _countof(szBuffer), format, args);
#else
nBuf = vsnprintf(szBuffer, _countof(szBuffer), format, args);
#endif
va_end(args);
switch(level) {
case APP_LOG_DEBUG:
logger->debug(szBuffer);
break;
case APP_LOG_INFO:
logger->info(szBuffer);
break;
case APP_LOG_WARN:
logger->warn(szBuffer);
break;
case APP_LOG_ERR:
logger->error(szBuffer);
break;
default:
logger->debug(szBuffer);
}
}
示例3: configure_default_logger
void configure_default_logger(log4cxx::LoggerPtr logger,
log4cxx::LevelPtr level, std::string fname, bool dual)
{
if (fname.empty() && dual)
throw std::logic_error("dual log mode requires a filename");
logger->setLevel(level);
if (fname.empty() || dual)
{
log4cxx::AppenderPtr app = logger_write_to_cout(logger);
app->setName("COUT");
}
if (!fname.empty())
{
log4cxx::AppenderPtr app = logger_write_to_file(fname, logger);
app->setName("FILE");
}
}
示例4: log_interface
void log_interface(int severity, const char *msg) {
static log4cxx::LoggerPtr logger(Logger::getLogger("Libevent"));
printf("%s", msg);
switch(severity) {
case EVENT_LOG_DEBUG:
logger->debug(msg);
break;
case EVENT_LOG_MSG:
logger->info(msg);
break;
case EVENT_LOG_WARN:
logger->warn(msg);
break;
case EVENT_LOG_ERR:
logger->error(msg);
break;
default:
logger->debug(msg);
}
}
示例5: logger
namespace scidb
{
using namespace scidb;
using namespace boost;
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.libdense_linear_algebra.ops.gemm"));
static const bool DBG_CERR = false;
static const bool DBG_REFORMAT = false;
/**
* A Physical multiply operator implemented using ScaLAPACK
* The interesting work is done in invokeMPI(), above
*
*/
class GEMMPhysical : public ScaLAPACKPhysical
{
public:
GEMMPhysical(const std::string& logicalName, const std::string& physicalName, const Parameters& parameters, const ArrayDesc& schema)
:
ScaLAPACKPhysical(logicalName, physicalName, parameters, schema)
{
}
std::shared_ptr<Array> invokeMPI(std::vector< std::shared_ptr<Array> >& inputArrays,
const GEMMOptions options, std::shared_ptr<Query>& query,
ArrayDesc& outSchema);
virtual std::shared_ptr<Array> execute(std::vector< std::shared_ptr<Array> >& inputArrays, std::shared_ptr<Query> query);
private:
};
char getTransposeCode(bool transpose) {
return transpose ? 'T' : 'N' ;
}
std::shared_ptr<Array> GEMMPhysical::invokeMPI(std::vector< std::shared_ptr<Array> >& inputArrays,
const GEMMOptions options, std::shared_ptr<Query>& query,
ArrayDesc& outSchema)
{
//
// Everything about the execute() method concerning the MPI execution of the arrays
// is factored into this method. This does not include the re-distribution of data
// chunks into the ScaLAPACK distribution scheme, as the supplied inputArrays
// must already be in that scheme.
//
// + intersects the array chunkGrids with the maximum process grid
// + sets up the ScaLAPACK grid accordingly and if not participating, return early
// + start and connect to an MPI slave process
// + create ScaLAPACK descriptors for the input arrays
// + convert the inputArrays into in-memory ScaLAPACK layout in shared memory
// + call a "master" routine that passes the ScaLAPACK operator name, parameters,
// and shared memory descriptors to the ScaLAPACK MPI process that will do the
// actual computation.
// + wait for successful completion
// + construct an "OpArray" that make and Array API view of the output memory.
// + return that output array.
//
enum dummy {R=0, C=1}; // row column
enum dummy2 {AA=0, BB, CC, NUM_MATRICES}; // which matrix: alpha AA * BB + beta CC -> result
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI(): begin");
size_t numArray = inputArrays.size();
if (numArray != NUM_MATRICES) { // for now ... may make CC optional when beta is 0, later
LOG4CXX_ERROR(logger, "GEMMPhysical::invokeMPI(): " << numArray << " != NUM_MATRICES " << size_t(NUM_MATRICES));
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "GEMMPhysical::invokeMPI(): requires 3 input Arrays/matrices.");
}
//
// Initialize the (emulated) BLACS and get the proces grid info
//
blacs::context_t blacsContext = doBlacsInit(inputArrays, query, "GEMMPhysical");
bool isParticipatingInScaLAPACK = blacsContext.isParticipating();
if (isParticipatingInScaLAPACK) {
checkBlacsInfo(query, blacsContext, "GEMMPhysical");
}
blacs::int_t NPROW=-1, NPCOL=-1, MYPROW=-1 , MYPCOL=-1 ;
scidb_blacs_gridinfo_(blacsContext, NPROW, NPCOL, MYPROW, MYPCOL);
LOG4CXX_TRACE(logger, "GEMMPhysical::invokeMPI() NPROW="<<NPROW<<", NPCOL="<<NPCOL);
//
// launch MPISlave if we participate
// TODO: move this down into the ScaLAPACK code ... something that does
// the doBlacsInit, launchMPISlaves, and the check that they agree
//
bool isParticipatingInMPI = launchMPISlaves(query, NPROW*NPCOL);
if (isParticipatingInScaLAPACK != isParticipatingInMPI) {
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI():"
<< " isParticipatingInScaLAPACK " << isParticipatingInScaLAPACK
<< " isParticipatingInMPI " << isParticipatingInMPI);
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "GEMMPhysical::invokeMPI(): internal inconsistency in MPI slave launch.");
}
if (isParticipatingInMPI) {
LOG4CXX_DEBUG(logger, "GEMMPhysical::invokeMPI(): participating in MPI");
//.........这里部分代码省略.........
示例6: logger
namespace scidb
{
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.mpi"));
static bool checkLauncher(uint32_t testDelay,
uint64_t launchId,
MpiOperatorContext* ctx)
{
std::shared_ptr<MpiLauncher> launcher(ctx->getLauncher(launchId));
if (isDebug()) {
if (launcher) {
// when running tests, slow down to give launcher a chance to exit
::sleep(testDelay);
}
}
if (launcher && !launcher->isRunning()) {
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "MPI launcher process already terminated";
}
return true;
}
static bool checkTimeout(double startTime,
double timeout,
uint64_t launchId,
MpiOperatorContext* ctx)
{
if (mpi::hasExpired(startTime, timeout)) {
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "MPI slave process failed to communicate in time";
}
return true;
}
static bool checkLauncherWithTimeout(uint32_t testDelay,
double startTime,
double timeout,
uint64_t launchId,
MpiOperatorContext* ctx)
{
bool rc = checkLauncher(testDelay, launchId, ctx);
assert(rc);
return (checkTimeout(startTime, timeout, launchId, ctx) && rc);
}
void MpiSlaveProxy::waitForHandshake(std::shared_ptr<MpiOperatorContext>& ctx)
{
if (_connection) {
throw (InvalidStateException(REL_FILE, __FUNCTION__, __LINE__)
<< "Connection to MPI slave already established");
}
LOG4CXX_DEBUG(logger, "MpiSlaveProxy::waitForHandshake: launchId="<<_launchId);
MpiOperatorContext::LaunchErrorChecker errChecker =
boost::bind(&checkLauncherWithTimeout,
_delayForTestingInSec,
mpi::getTimeInSecs(),
static_cast<double>(_MPI_SLAVE_RESPONSE_TIMEOUT),
_1, _2);
std::shared_ptr<scidb::ClientMessageDescription> msg = ctx->popMsg(_launchId, errChecker);
assert(msg);
_connection = msg->getClientContext();
if (msg->getMessageType() != scidb::mtMpiSlaveHandshake) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake is invalid");
}
std::shared_ptr<scidb_msg::MpiSlaveHandshake> handshake =
std::dynamic_pointer_cast<scidb_msg::MpiSlaveHandshake>(msg->getRecord());
assert(handshake);
// parse the handshake
if (!handshake->has_pid()) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has no PID");
}
const pid_t slavePid = handshake->pid();
if (slavePid == ::getpid() ||
slavePid == ::getppid() ||
slavePid < 2) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid PID");
}
if (!handshake->has_ppid()) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has no PPID");
}
const pid_t slavePPid = handshake->ppid();
if (slavePPid == ::getpid() ||
slavePPid == ::getppid() ||
slavePPid < 2) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid PPID");
}
//.........这里部分代码省略.........
示例7: logger
namespace scidb
{
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.mpi"));
#if defined(NDEBUG)
static const bool DBG = false;
#else
static const bool DBG = true;
#endif
MpiLauncher::MpiLauncher(uint64_t launchId, const boost::shared_ptr<Query>& q)
: _pid(0),
_status(0),
_queryId(q->getQueryID()),
_launchId(launchId),
_query(q),
_waiting(false),
_inError(false),
_MPI_LAUNCHER_KILL_TIMEOUT(scidb::getLivenessTimeout())
{
}
MpiLauncher::MpiLauncher(uint64_t launchId, const boost::shared_ptr<Query>& q, uint32_t timeout)
: _pid(0),
_status(0),
_queryId(q->getQueryID()),
_launchId(launchId),
_query(q),
_waiting(false),
_inError(false),
_MPI_LAUNCHER_KILL_TIMEOUT(timeout)
{
}
void MpiLauncher::getPids(vector<pid_t>& pids)
{
ScopedMutexLock lock(_mutex);
if (_pid <= 1) {
throw InvalidStateException(REL_FILE, __FUNCTION__, __LINE__)
<< " MPI launcher is not running";
}
pids.push_back(_pid);
}
void MpiLauncher::launch(const vector<string>& slaveArgs,
const boost::shared_ptr<const InstanceMembership>& membership,
const size_t maxSlaves)
{
vector<string> args;
{
ScopedMutexLock lock(_mutex);
if (_pid != 0 || _waiting) {
throw InvalidStateException(REL_FILE, __FUNCTION__, __LINE__)
<< " MPI launcher is already running";
}
boost::shared_ptr<Query> query = _query.lock();
Query::validateQueryPtr(query);
buildArgs(args, slaveArgs, membership, query, maxSlaves);
}
pid_t pid = fork();
if (pid < 0) {
// error
int err = errno;
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_SYSCALL_ERROR)
<< "fork" << pid << err <<"");
} else if (pid > 0) {
// parent
ScopedMutexLock lock(_mutex);
if (_pid != 0 || _waiting) {
throw InvalidStateException(REL_FILE, __FUNCTION__, __LINE__)
<< " MPI launcher is corrupted after launch";
}
_pid = pid;
LOG4CXX_DEBUG(logger, "MPI launcher process spawned, pid="<<_pid);
return;
} else {
// child
becomeProcGroupLeader();
recordPids();
setupLogging();
if (DBG) {
std::cerr << "LAUNCHER pid="<<getpid()
<< ", pgid="<< ::getpgid(0)
<< ", ppid="<< ::getppid()<<std::endl;
}
closeFds();
boost::scoped_array<const char*> argv(new const char*[args.size()+1]);
initExecArgs(args, argv);
const char *path = argv[0];
if (DBG) {
std::cerr << "LAUNCHER pid="<<::getpid()<<" args for "<<path<<" are ready" << std::endl;
for (size_t i=0; i<args.size(); ++i) {
const char * arg = argv[i];
//.........这里部分代码省略.........
示例8: logger
namespace scidb {
static const bool DBG = false;
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.query.ops.mpi"));
///
/// some operators may not be able to work in degraded mode while they are being implemented
/// this call can make them exit if that is the case.
/// TODO: add a more explicit message of what is happening
void throwIfDegradedMode(shared_ptr<Query>& query) {
const boost::shared_ptr<const InstanceMembership> membership =
Cluster::getInstance()->getInstanceMembership();
if ((membership->getViewId() != query->getCoordinatorLiveness()->getViewId()) ||
(membership->getInstances().size() != query->getInstancesCount())) {
// because we can't yet handle the extra data from
// replicas that we would be fed in "degraded mode"
throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_QUORUM2);
}
}
void MPIPhysical::setQuery(const boost::shared_ptr<Query>& query)
{
boost::shared_ptr<Query> myQuery = _query.lock();
if (myQuery) {
assert(query==myQuery);
assert(_ctx);
return;
}
PhysicalOperator::setQuery(query);
_ctx = boost::shared_ptr<MpiOperatorContext>(new MpiOperatorContext(query));
_ctx = MpiManager::getInstance()->checkAndSetCtx(query,_ctx);
}
void MPIPhysical::postSingleExecute(shared_ptr<Query> query)
{
// On a non-participating launcher instance it is difficult
// to determine when the launch is complete without a sync point.
// postSingleExecute() is run after all instances report success of their execute() phase,
// that is effectively a sync point.
assert(query->getCoordinatorID() == COORDINATOR_INSTANCE);
assert(_mustLaunch);
assert(_ctx);
const uint64_t lastIdInUse = _ctx->getLastLaunchIdInUse();
boost::shared_ptr<MpiLauncher> launcher(_ctx->getLauncher(lastIdInUse));
assert(launcher);
if (launcher && launcher == _launcher) {
LOG4CXX_DEBUG(logger, "MPIPhysical::postSingleExecute: destroying last launcher for launch = " << lastIdInUse);
assert(lastIdInUse == _launchId);
launcher->destroy();
_launcher.reset();
}
_ctx.reset();
}
bool MPIPhysical::launchMPISlaves(shared_ptr<Query>& query, const size_t maxSlaves)
{
LOG4CXX_DEBUG(logger, "MPIPhysical::launchMPISlaves(query, maxSlaves: " << maxSlaves << ") called.");
assert(maxSlaves <= query->getInstancesCount());
// This barrier guarantees MPIPhysical::setQuery is called on all instances
// before any slaves are launched.
// It also makes sure a non-participating launcher waits for the current launch to finish before starting a new one.
syncBarrier(0, query);
syncBarrier(1, query);
_launchId = _ctx->getNextLaunchId(); // bump the launch ID by 1
Cluster* cluster = Cluster::getInstance();
const boost::shared_ptr<const InstanceMembership> membership = cluster->getInstanceMembership();
const string& installPath = MpiManager::getInstallPath(membership);
uint64_t lastIdInUse = _ctx->getLastLaunchIdInUse();
assert(lastIdInUse < _launchId);
boost::shared_ptr<MpiSlaveProxy> slave;
// check if our logical ID is within the set of instances that will have a corresponding slave
InstanceID iID = query->getInstanceID();
if ( iID < maxSlaves) {
slave = boost::make_shared<MpiSlaveProxy>(_launchId, query, installPath);
_ctx->setSlave(slave);
}
_mustLaunch = (query->getCoordinatorID() == COORDINATOR_INSTANCE);
if (_mustLaunch) {
boost::shared_ptr<MpiLauncher> oldLauncher = _ctx->getLauncher(lastIdInUse);
if (oldLauncher) {
assert(lastIdInUse == oldLauncher->getLaunchId());
LOG4CXX_DEBUG(logger, "MPIPhysical::launchMPISlaves(): destroying last launcher for launch = " << lastIdInUse);
oldLauncher->destroy();
oldLauncher.reset();
}
_launcher = boost::shared_ptr<MpiLauncher>(MpiManager::getInstance()->newMPILauncher(_launchId, query));
_ctx->setLauncher(_launcher);
std::vector<std::string> args;
_launcher->launch(args, membership, maxSlaves);
}
//.........这里部分代码省略.........
示例9: logger
namespace scidb
{
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.mpi"));
static double getTimeInSecs()
{
struct timespec ts;
if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
assert(false);
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_CANT_GET_SYSTEM_TIME);
}
return (ts.tv_sec + ts.tv_nsec*1e-9);
}
static bool checkForTimeout(double startTime, double timeout,
uint64_t launchId, MpiOperatorContext* ctx)
{
if ((getTimeInSecs() - startTime) > timeout) {
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "MPI slave process failed to communicate in time";
}
return true;
}
static bool checkLauncher(double startTime, double timeout,
uint64_t launchId, MpiOperatorContext* ctx)
{
boost::shared_ptr<MpiLauncher> launcher(ctx->getLauncher(launchId));
if (launcher && !launcher->isRunning()) {
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED)
<< "MPI launcher process";
}
checkForTimeout(startTime, timeout, launchId, ctx);
return true;
}
void MpiSlaveProxy::waitForHandshake(boost::shared_ptr<MpiOperatorContext>& ctx)
{
if (_connection) {
throw (InvalidStateException(REL_FILE, __FUNCTION__, __LINE__)
<< "Connection to MPI slave already established");
}
MpiOperatorContext::LaunchErrorChecker errChecker =
boost::bind(&checkLauncher, getTimeInSecs(),
static_cast<double>(_MPI_SLAVE_RESPONSE_TIMEOUT), _1, _2);
boost::shared_ptr<scidb::ClientMessageDescription> msg = ctx->popMsg(_launchId, errChecker);
assert(msg);
_connection = msg->getClientContext();
if (msg->getMessageType() != scidb::mtMpiSlaveHandshake) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake is invalid");
}
boost::shared_ptr<scidb_msg::MpiSlaveHandshake> handshake =
boost::dynamic_pointer_cast<scidb_msg::MpiSlaveHandshake>(msg->getRecord());
assert(handshake);
// parse the handshake
if (!handshake->has_pid()) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has no PID");
}
if (!handshake->has_ppid()) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has no PPID");
}
_pids.push_back(handshake->pid());
_pids.push_back(handshake->ppid());
string clusterUuid = Cluster::getInstance()->getUuid();
if (handshake->cluster_uuid() != clusterUuid) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid clusterUuid");
}
InstanceID instanceId = Cluster::getInstance()->getLocalInstanceId();
if (handshake->instance_id() != instanceId) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid instanceId");
}
if (handshake->launch_id() != _launchId) {
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid launchId");
}
boost::shared_ptr<scidb::Query> query( _query.lock());
Query::validateQueryPtr(query);
if (handshake->rank() != query->getInstanceID()) { // logical instance ID
throw (SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNKNOWN_ERROR)
<< "MPI slave handshake has invalid rank");
}
//.........这里部分代码省略.........
示例10: logger
namespace scidb {
static log4cxx::LoggerPtr logger(log4cxx::Logger::getLogger("scidb.linear_algebra.ops.scalapack"));
inline bool hasSingleAttribute(ArrayDesc const& desc)
{
return desc.getAttributes().size() == 1 || (desc.getAttributes().size() == 2 && desc.getAttributes()[1].isEmptyIndicator());
}
void checkScaLAPACKInputs(std::vector<ArrayDesc> schemas, boost::shared_ptr<Query> query,
size_t nMatsMin, size_t nMatsMax)
{
enum dummy {ROW=0, COL=1};
enum dummy2 { ATTR0=0 };
const size_t NUM_MATRICES = schemas.size();
if(schemas.size() < nMatsMin ||
schemas.size() > nMatsMax) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR2);
}
// Check the properties first by argument, then by order property is determined in AFL statement:
// size, chunkSize, overlap.
// Check individual properties in the loop, and any inter-matrix properties after the loop
// TODO: in all of these, name the argument # at fault
for(size_t iArray=0; iArray < NUM_MATRICES; iArray++) {
// check: attribute count == 1
if (!hasSingleAttribute(schemas[iArray])) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR2);
// TODO: offending matrix is iArray
}
// check: attribute type is double
if (schemas[iArray].getAttributes()[ATTR0].getType() != TID_DOUBLE) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR5);
// TODO: offending matrix is iArray
}
// check: nDim == 2 (a matrix)
// TODO: relax nDim to be 1 and have it imply NCOL=1 (column vector)
// if you want a row vector, we could make transpose accept the column vector and output a 1 x N matrix
// and call that a "row vector" The other way could never be acceptable.
//
const size_t SCALAPACK_IS_2D = 2 ;
if (schemas[iArray].getDimensions().size() != SCALAPACK_IS_2D) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR3);
// TODO: offending matrix is iArray
}
// check: size is bounded
const Dimensions& dims = schemas[iArray].getDimensions();
if (dims[ROW].getLength() == INFINITE_LENGTH ||
dims[COL].getLength() == INFINITE_LENGTH) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR9);
}
// TODO: check: sizes are not larger than largest ScaLAPACK fortran INTEGER
// TEMPORARY until #2202 defines how to interpret arrays not starting at 0
// "dimensions must start at 0"
for(unsigned dim =ROW; dim <= COL; dim++) {
if(dims[dim].getStart() != 0) {
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR44);
}
}
// check: chunk interval not too small
if (dims[ROW].getChunkInterval() < slpp::SCALAPACK_MIN_BLOCK_SIZE ||
dims[COL].getChunkInterval() < slpp::SCALAPACK_MIN_BLOCK_SIZE ) {
// the cache will thrash and performance will be unexplicably horrible to the user
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR41); // too small
}
// check: chunk interval not too large
if (dims[ROW].getChunkInterval() > slpp::SCALAPACK_MAX_BLOCK_SIZE ||
dims[COL].getChunkInterval() > slpp::SCALAPACK_MAX_BLOCK_SIZE ) {
// the cache will thrash and performance will be unexplicably horrible to the user
throw PLUGIN_USER_EXCEPTION(DLANameSpace, SCIDB_SE_INFER_SCHEMA, DLA_ERROR42); // too large
}
// TODO: the following does not work correctly. postWarning() itself uses SCIDB_WARNING
// does not work correctly from a plugin, so seeking an example of how to do
// postWarning() from a plugin.
if (false) {
// broken code inside postWarning(SCIDB_WARNING()) faults and needs a different argument.
for(size_t d = ROW; d <= COL; d++) {
if(dims[d].getChunkInterval() != slpp::SCALAPACK_EFFICIENT_BLOCK_SIZE) {
query->postWarning(SCIDB_WARNING(DLA_WARNING4) << slpp::SCALAPACK_EFFICIENT_BLOCK_SIZE
<< slpp::SCALAPACK_EFFICIENT_BLOCK_SIZE);
}
}
}
// check: no overlap allowed
// TODO: improvement? if there's overlap, we may be able to ignore it,
// else invoke a common piece of code to remove it
//.........这里部分代码省略.........
示例11: completeLaunch
void MpiLauncher::completeLaunch(pid_t pid, const std::string& pidFile, int status)
{
// rm args file
boost::scoped_ptr<SharedMemoryIpc> shmIpc(mpi::newSharedMemoryIpc(_ipcName));
shmIpc->remove();
shmIpc.reset();
// rm pid file
scidb::File::remove(pidFile.c_str(), false);
// rm log file
if (!logger->isTraceEnabled() && !_inError) {
string logFileName = mpi::getLauncherLogFile(_installPath, _queryId, _launchId);
scidb::File::remove(logFileName.c_str(), false);
}
if (WIFSIGNALED(status)) {
LOG4CXX_ERROR(logger, "SciDB MPI launcher (pid="<<pid<<") terminated by signal = "
<< WTERMSIG(status) << (WCOREDUMP(status)? ", core dumped" : ""));
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED) << "MPI launcher process";
} else if (WIFEXITED(status)) {
int rc = WEXITSTATUS(status);
if (rc != 0) {
LOG4CXX_ERROR(logger, "SciDB MPI launcher (pid="<<_pid<<") exited with status = " << rc);
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OPERATION_FAILED) << "MPI launcher process";
} else {
LOG4CXX_DEBUG(logger, "SciDB MPI launcher (pid="<<_pid<<") exited with status = " << rc);
return;
}
}
throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNREACHABLE_CODE);
}
示例12: destroy
void MpiSlaveProxy::destroy(bool error)
{
QueryID queryIdForKill(INVALID_QUERY_ID);
if (error) {
_inError=true;
queryIdForKill = _queryId;
}
const string clusterUuid = Cluster::getInstance()->getUuid();
// kill the slave proc and its parent orted
for ( std::vector<pid_t>::const_iterator iter=_pids.begin();
iter!=_pids.end(); ++iter) {
pid_t pid = *iter;
//XXX TODO tigor: kill proceess group (-pid) ?
LOG4CXX_DEBUG(logger, "MpiSlaveProxy::destroy: killing slave pid = "<<pid);
MpiErrorHandler::killProc(_installPath, clusterUuid, pid, queryIdForKill);
}
std::string pidFile = mpi::getSlavePidFile(_installPath, _queryId, _launchId);
MpiErrorHandler::cleanupSlavePidFile(_installPath,
clusterUuid,
pidFile,
queryIdForKill);
// rm log file
if (!logger->isTraceEnabled() && !_inError) {
string logFileName = mpi::getSlaveLogFile(_installPath, _queryId, _launchId);
scidb::File::remove(logFileName.c_str(), false);
}
}
示例13: init
void init()
{
//calculate log directory
#ifdef _MSC_VER
std::string log_file_name_str = ros::file_log::getLogDirectory() + "/rosout.log";
LOG4CXX_DECODE_CHAR(log_file_name, log_file_name_str); // this instantiates log_file_name as type LogString as well
std::string empty_str = "";
LOG4CXX_DECODE_CHAR(log_empty, empty_str);
#else
std::string log_file_name = ros::file_log::getLogDirectory() + "/rosout.log";
std::string log_empty = "";
#endif
logger_ = log4cxx::Logger::getRootLogger();
log4cxx::LayoutPtr layout = new log4cxx::PatternLayout(log_empty);
log4cxx::RollingFileAppenderPtr appender = new log4cxx::RollingFileAppender(layout, log_file_name, true);
logger_->addAppender( appender );
appender->setMaximumFileSize(100*1024*1024);
appender->setMaxBackupIndex(10);
log4cxx::helpers::Pool pool;
appender->activateOptions(pool);
std::cout << "logging to " << log_file_name.c_str() << std::endl;
LOG4CXX_INFO(logger_, "\n\n" << ros::Time::now() << " Node Startup\n");
agg_pub_ = node_.advertise<rosgraph_msgs::Log>("/rosout_agg", 0);
std::cout << "re-publishing aggregated messages to /rosout_agg" << std::endl;
rosout_sub_ = node_.subscribe("/rosout", 0, &Rosout::rosoutCallback, this);
std::cout << "subscribed to /rosout" << std::endl;
}
示例14: shmIpc
// XXX TODO: consider returning std::vector<scidb::SharedMemoryPtr>
// XXX TODO: which would require supporting different types of memory (double, char etc.)
std::vector<MPIPhysical::SMIptr_t> MPIPhysical::allocateMPISharedMemory(size_t numBufs,
size_t elemSizes[],
size_t numElems[],
string dbgNames[])
{
LOG4CXX_DEBUG(logger, "MPIPhysical::allocateMPISharedMemory(numBufs "<<numBufs<<",,,)");
if(logger->isTraceEnabled()) {
LOG4CXX_TRACE(logger, "MPIPhysical::allocateMPISharedMemory(): allocations are: ");
for(size_t ii=0; ii< numBufs; ii++) {
LOG4CXX_TRACE(logger, "MPIPhysical::allocateMPISharedMemory():"
<< " elemSizes["<<ii<<"] "<< dbgNames[ii] << " len " << numElems[ii]);
}
}
std::vector<SMIptr_t> shmIpc(numBufs);
bool preallocate = Config::getInstance()->getOption<bool>(CONFIG_PREALLOCATE_SHM);
for(size_t ii=0; ii<numBufs; ii++) {
std::stringstream suffix;
suffix << "." << ii ;
std::string ipcNameFull= _ipcName + suffix.str();
LOG4CXX_TRACE(logger, "IPC name = " << ipcNameFull);
shmIpc[ii] = SMIptr_t(mpi::newSharedMemoryIpc(ipcNameFull, preallocate)); // can I get 'em off ctx instead?
_ctx->addSharedMemoryIpc(_launchId, shmIpc[ii]);
char* ptr = MpiLauncher::initIpcForWrite(shmIpc[ii].get(), (elemSizes[ii] * numElems[ii]));
assert(ptr); ptr=ptr;
}
return shmIpc;
}
示例15: log4cxx_debug_dimensions
void log4cxx_debug_dimensions(const std::string& prefix, const Dimensions& dims)
{
if(logger->isDebugEnabled()) {
for (size_t i=0; i<dims.size(); i++) {
LOG4CXX_DEBUG(logger, prefix << " dims["<<i<<"] from " << dims[i].getStartMin() << " to " << dims[i].getEndMax());
}
}
}