本文整理汇总了C++中OID::toString方法的典型用法代码示例。如果您正苦于以下问题:C++ OID::toString方法的具体用法?C++ OID::toString怎么用?C++ OID::toString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OID
的用法示例。
在下文中一共展示了OID::toString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: toString
void Definition::toString(char *buf, int max, int indent) const {
int index=0;
OID temp;
int modi;
char buf2[50];
Event *evt2 = NEW Event(Event::GET_RANGE, m_def);
evt2->param<0>(0);
evt2->param<1>(m_size);
evt2->send();
OID boid = evt2->result();
OID ifobj = Null;
Buffer *def = Buffer::lookup(boid);
delete evt2;
if (def == 0) {
buf[0] = 0;
return;
}
for (int i=0; i<indent; i++) strcat(buf, "\t");
while (index < m_size) {
temp = def->get(index++);
if (temp.isModifier()) {
modi = temp.d();
switch (modi) {
case modifiers::ENDSUB:
strcat(buf, ")");
break;
case modifiers::BEGINSUB:
strcat(buf, "(");
break;
case modifiers::UNION:
strcat(buf, "union ");
break;
case modifiers::COMPARE:
strcat(buf, "== ");
break;
default: break;
}
} else {
if (temp == This) {
strcat(buf, ".");
} else if (index == 1 && temp.get("type") == OID("if")) {
ifobj = temp;
strcat(buf, "if ");
//buildif(buf, temp);
} else {
temp.toString(buf2, 50);
strcat(buf, buf2);
strcat(buf, " ");
}
}
}
if (ifobj != Null) buildif(buf, ifobj, indent);
Buffer::free(boid);
}
示例2: insertData
INT32 insertData ( sdbCollection *collection, CHAR **json_str )
{
INT32 rc = SDB_OK ;
OID oid ;
INT32 len = 0 ;
CHAR *temp = NULL ;
BSONObj bs ;
rc = fromjson ( *json_str, bs ) ;
if ( rc )
{
return rc ;
}
rc = collection->insert ( bs, &oid ) ;
if ( rc )
{
return rc ;
}
std::string str = oid.toString () ;
const CHAR *p = str.c_str () ;
INT32 eleSize = strlen(p) ;
if ( eleSize > 0 )
{
CHAR *pTemp = (CHAR*)malloc(eleSize+1) ;
if ( !pTemp )
return SDB_OOM ;
memcpy ( pTemp, p, eleSize ) ;
pTemp[eleSize] = '\0' ;
*json_str = pTemp ;
}
return rc ;
}
示例3: sink
void
PublicKey::decode(CryptoPP::BufferedTransformation& in)
{
// SubjectPublicKeyInfo ::= SEQUENCE {
// algorithm AlgorithmIdentifier
// keybits BIT STRING }
using namespace CryptoPP;
try
{
std::string out;
StringSink sink(out);
////////////////////////
// part 1: copy as is //
////////////////////////
BERSequenceDecoder decoder(in);
{
assert(decoder.IsDefiniteLength());
DERSequenceEncoder encoder(sink);
decoder.TransferTo(encoder, decoder.RemainingLength());
encoder.MessageEnd();
}
decoder.MessageEnd();
////////////////////////
// part 2: check if the key is RSA (since it is the only supported for now)
////////////////////////
StringSource checkedSource(out, true);
BERSequenceDecoder subjectPublicKeyInfo(checkedSource);
{
BERSequenceDecoder algorithmInfo(subjectPublicKeyInfo);
{
OID algorithm;
algorithm.decode(algorithmInfo);
if (algorithm == oid::RSA)
m_type = KEY_TYPE_RSA;
else if (algorithm == oid::ECDSA)
m_type = KEY_TYPE_ECDSA;
else
BOOST_THROW_EXCEPTION(Error("Only RSA/ECDSA public keys are supported for now (" +
algorithm.toString() + " requested)"));
}
}
m_key.assign(out.begin(), out.end());
}
catch (CryptoPP::BERDecodeErr& err)
{
m_type = KEY_TYPE_NULL;
BOOST_THROW_EXCEPTION(Error("PublicKey decoding error"));
}
m_digest.reset();
}
示例4: strlen
DString cadence::operator+(DString m, const OID &o) {
char buf[200];
o.toString(buf,200);
int s = m.size();
int s2 = strlen(buf);
for (int i=0; i<s2; i++) {
m.m_obj[s+i].set(buf[i], true);
}
m.m_obj[Size].set(s+s2);
return m;
}
示例5: SavePaymentJson
string SavePaymentJson(int amount)
{
//BSONObj paymentBSON = mongo::fromjson(newPyamentJson);
BSONObj paymentBSON = BSON(GENOID
<< "PayedToUserId" << 8888
<< "PayedDate" << "2015-01-25 12:00:00"
<< "PayedPeriodStartDate" << "2015-01-01 00:00:00"
<< "PayedPeriodEndDate" << "2015-01-29 23:59:59"
<< "Amount" << amount);
db.insert(PAYMENTS_COLLECTION_NAMESPASE, paymentBSON);
BSONElement oi;
paymentBSON.getObjectID(oi);
OID oid = oi.__oid();
return oid.toString();
}
示例6: toString
void DString::toString(char *str, int max) {
//The whole object may be an integer etc...
if (m_obj[Size] == Null || !((OID)m_obj[0]).isChar()) {
m_obj.toString(str,max);
return;
}
int size = (OID)m_obj[Size];
if (size >= max) size = max-1;
int j = 0;
OID v;
Event *evt = new Event(Event::GET_RANGE, m_obj);
evt->param<0>(0);
evt->param<1>(size);
evt->send();
OID boid = evt->result();
Buffer *str2 = Buffer::lookup(boid);
delete evt;
if (str2 == 0) {
str[0] = 0;
return;
}
for (int i=0; i<size; i++) {
v = str2->get(i);
if (v.isChar())
str[j++] = str2->get(i);
else {
int k = 0;
char buf[500];
v.toString(buf,500);
while (buf[k] != 0) {
str[j++] = buf[k++];
}
}
}
Buffer::free(boid);
str[j] = 0;
};
示例7: getCurrentTime
//.........这里部分代码省略.........
oid = *id;
}
reg.append("_id", oid);
reg.append(REG_EXPIRATION, expiration);
reg.append(REG_SERVICE_PATH, servicePath == "" ? DEFAULT_SERVICE_PATH_UPDATES : servicePath);
reg.append(REG_FORMAT, format);
//
// We accumulate the subscriptions in a map. The key of the map is the string representing
// subscription id
//
std::map<string, TriggeredSubscription*> subsToNotify;
// This vector is used to define which entities to include in notifications
EntityIdVector triggerEntitiesV;
BSONArrayBuilder contextRegistration;
for (unsigned int ix = 0; ix < requestP->contextRegistrationVector.size(); ++ix)
{
ContextRegistration* cr = requestP->contextRegistrationVector[ix];
BSONArrayBuilder entities;
for (unsigned int jx = 0; jx < cr->entityIdVector.size(); ++jx)
{
EntityId* en = cr->entityIdVector[jx];
triggerEntitiesV.push_back(en);
if (en->type == "")
{
entities.append(BSON(REG_ENTITY_ID << en->id));
LM_T(LmtMongo, ("Entity registration: {id: %s}", en->id.c_str()));
}
else
{
entities.append(BSON(REG_ENTITY_ID << en->id << REG_ENTITY_TYPE << en->type));
LM_T(LmtMongo, ("Entity registration: {id: %s, type: %s}", en->id.c_str(), en->type.c_str()));
}
}
BSONArrayBuilder attrs;
for (unsigned int jx = 0; jx < cr->contextRegistrationAttributeVector.size(); ++jx)
{
ContextRegistrationAttribute* cra = cr->contextRegistrationAttributeVector[jx];
attrs.append(BSON(REG_ATTRS_NAME << cra->name << REG_ATTRS_TYPE << cra->type << "isDomain" << cra->isDomain));
LM_T(LmtMongo, ("Attribute registration: {name: %s, type: %s, isDomain: %s}",
cra->name.c_str(),
cra->type.c_str(),
cra->isDomain.c_str()));
for (unsigned int kx = 0;
kx < requestP->contextRegistrationVector[ix]->contextRegistrationAttributeVector[jx]->metadataVector.size();
++kx)
{
// FIXME: metadata not supported at the moment
}
}
contextRegistration.append(
BSON(
REG_ENTITIES << entities.arr() <<
REG_ATTRS << attrs.arr() <<
REG_PROVIDING_APPLICATION << requestP->contextRegistrationVector[ix]->providingApplication.get()));
LM_T(LmtMongo, ("providingApplication registration: %s",
requestP->contextRegistrationVector[ix]->providingApplication.c_str()));
std::string err;
if (!addTriggeredSubscriptions(*cr, subsToNotify, err, tenant))
{
responseP->errorCode.fill(SccReceiverInternalError, err);
return SccOk;
}
}
reg.append(REG_CONTEXT_REGISTRATION, contextRegistration.arr());
/* Note we are using upsert = "true". This means that if the document doesn't previously
* exist in the collection, it is created. Thus, this way both uses of registerContext are OK
* (either new registration or updating an existing one) */
if (!collectionUpdate(getRegistrationsCollectionName(tenant), BSON("_id" << oid), reg.obj(), true, &err))
{
responseP->errorCode.fill(SccReceiverInternalError, err);
releaseTriggeredSubscriptions(subsToNotify);
return SccOk;
}
//
// Send notifications for each one of the subscriptions accumulated by
// previous addTriggeredSubscriptions() invocations
//
processSubscriptions(triggerEntitiesV, subsToNotify, err, tenant, fiwareCorrelator);
// Fill the response element
responseP->duration = requestP->duration;
responseP->registrationId.set(oid.toString());
responseP->errorCode.fill(SccOk);
return SccOk;
}
示例8: mergeChunks
bool mergeChunks(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch,
string* errMsg) {
// Get the distributed lock
string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to "
<< maxKey;
auto scopedDistLock = grid.catalogManager(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
*errMsg = stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << minKey << "," << maxKey << ")"
<< causedBy(scopedDistLock.getStatus());
warning() << *errMsg;
return false;
}
ShardingState* shardingState = ShardingState::get(txn);
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
Status status = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);
if (!status.isOK()) {
*errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
<< nss.ns() << causedBy(status.reason());
warning() << *errMsg;
return false;
}
if (epoch.isSet() && shardVersion.epoch() != epoch) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed"
<< " since merge was sent"
<< "(sent epoch : " << epoch.toString()
<< ", current epoch : " << shardVersion.epoch().toString() << ")";
warning() << *errMsg;
return false;
}
shared_ptr<CollectionMetadata> metadata = shardingState->getCollectionMetadata(nss.ns());
if (!metadata || metadata->getKeyPattern().isEmpty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
warning() << *errMsg;
return false;
}
dassert(metadata->getShardVersion().equals(shardVersion));
if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) {
*errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey)
<< " is not valid"
<< " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern();
warning() << *errMsg;
return false;
}
//
// Get merged chunk information
//
ChunkVersion mergeVersion = metadata->getCollVersion();
mergeVersion.incMinor();
std::vector<ChunkType> chunksToMerge;
ChunkType itChunk;
itChunk.setMin(minKey);
itChunk.setMax(minKey);
itChunk.setNS(nss.ns());
itChunk.setShard(shardingState->getShardName());
while (itChunk.getMax().woCompare(maxKey) < 0 &&
metadata->getNextChunk(itChunk.getMax(), &itChunk)) {
chunksToMerge.push_back(itChunk);
}
if (chunksToMerge.empty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " range starting at " << minKey << " and ending at " << maxKey
<< " does not belong to shard " << shardingState->getShardName();
warning() << *errMsg;
return false;
}
//
//.........这里部分代码省略.........
示例9: initChunks
Status MetadataLoader::initChunks(CatalogManager* catalogManager,
const string& ns,
const string& shard,
const CollectionMetadata* oldMetadata,
CollectionMetadata* metadata) const
{
map<string, ChunkVersion> versionMap;
// Preserve the epoch
versionMap[shard] = metadata->_shardVersion;
OID epoch = metadata->getCollVersion().epoch();
bool fullReload = true;
// Check to see if we should use the old version or not.
if ( oldMetadata ) {
// If our epochs are compatible, it's useful to use the old metadata for diffs
if ( oldMetadata->getCollVersion().hasEqualEpoch( epoch ) ) {
fullReload = false;
invariant( oldMetadata->isValid() );
versionMap[shard] = oldMetadata->_shardVersion;
metadata->_collVersion = oldMetadata->_collVersion;
// TODO: This could be made more efficient if copying not required, but
// not as frequently reloaded as in mongos.
metadata->_chunksMap = oldMetadata->_chunksMap;
LOG( 2 ) << "loading new chunks for collection " << ns
<< " using old metadata w/ version " << oldMetadata->getShardVersion()
<< " and " << metadata->_chunksMap.size() << " chunks" << endl;
}
else {
warning() << "reloading collection metadata for " << ns << " with new epoch "
<< epoch.toString() << ", the current epoch is "
<< oldMetadata->getCollVersion().epoch().toString() << endl;
}
}
// Exposes the new metadata's range map and version to the "differ," who
// would ultimately be responsible of filling them up.
SCMConfigDiffTracker differ( shard );
differ.attach( ns, metadata->_chunksMap, metadata->_collVersion, versionMap );
try {
std::vector<ChunkType> chunks;
Status status = catalogManager->getChunks(differ.configDiffQuery(), 0, &chunks);
if (!status.isOK()) {
if (status == ErrorCodes::HostUnreachable) {
// Make our metadata invalid
metadata->_collVersion = ChunkVersion( 0, 0, OID() );
metadata->_chunksMap.clear();
}
return status;
}
//
// The diff tracker should always find at least one chunk (the highest chunk we saw
// last time). If not, something has changed on the config server (potentially between
// when we read the collection data and when we read the chunks data).
//
int diffsApplied = differ.calculateConfigDiff(chunks);
if ( diffsApplied > 0 ) {
// Chunks found, return ok
LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
<< " with version " << metadata->_collVersion;
metadata->_shardVersion = versionMap[shard];
metadata->fillRanges();
invariant( metadata->isValid() );
return Status::OK();
}
else if ( diffsApplied == 0 ) {
// No chunks found, the collection is dropping or we're confused
// If this is a full reload, assume it is a drop for backwards compatibility
// TODO: drop the config.collections entry *before* the chunks and eliminate this
// ambiguity
string errMsg =
str::stream() << "no chunks found when reloading " << ns
<< ", previous version was "
<< metadata->_collVersion.toString()
<< ( fullReload ? ", this is a drop" : "" );
warning() << errMsg << endl;
metadata->_collVersion = ChunkVersion( 0, 0, OID() );
metadata->_chunksMap.clear();
return fullReload ? Status( ErrorCodes::NamespaceNotFound, errMsg ) :
Status( ErrorCodes::RemoteChangeDetected, errMsg );
}
else {
// Invalid chunks found, our epoch may have changed because we dropped/recreated
// the collection.
string errMsg = str::stream() << "invalid chunks found when reloading " << ns
//.........这里部分代码省略.........
示例10: mergeChunks
bool mergeChunks( OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch,
string* errMsg ) {
//
// Get sharding state up-to-date
//
ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(),
*errMsg );
if ( !configLoc.isValid() ){
warning() << *errMsg << endl;
return false;
}
//
// Get the distributed lock
//
ScopedDistributedLock collLock( configLoc, nss.ns() );
collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from "
<< minKey << " to " << maxKey );
Status acquisitionStatus = collLock.tryAcquire();
if (!acquisitionStatus.isOK()) {
*errMsg = stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << minKey << "," << maxKey << ")"
<< causedBy(acquisitionStatus);
warning() << *errMsg << endl;
return false;
}
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion);
if ( !status.isOK() ) {
*errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
<< nss.ns() << causedBy( status.reason() );
warning() << *errMsg << endl;
return false;
}
if ( epoch.isSet() && shardVersion.epoch() != epoch ) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " has changed" << " since merge was sent" << "(sent epoch : "
<< epoch.toString()
<< ", current epoch : " << shardVersion.epoch().toString() << ")";
warning() << *errMsg << endl;
return false;
}
CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() );
if ( !metadata || metadata->getKeyPattern().isEmpty() ) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
warning() << *errMsg << endl;
return false;
}
dassert( metadata->getShardVersion().equals( shardVersion ) );
if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {
*errMsg = stream() << "could not merge chunks, the range "
<< rangeToString( minKey, maxKey ) << " is not valid"
<< " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern();
warning() << *errMsg << endl;
return false;
}
//
// Get merged chunk information
//
ChunkVersion mergeVersion = metadata->getCollVersion();
mergeVersion.incMinor();
OwnedPointerVector<ChunkType> chunksToMerge;
ChunkType itChunk;
itChunk.setMin( minKey );
itChunk.setMax( minKey );
itChunk.setNS( nss.ns() );
//.........这里部分代码省略.........
示例11: updateShardChunks
Status updateShardChunks(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ChunkType>& chunks,
const OID& currEpoch) {
invariant(!chunks.empty());
NamespaceString chunkMetadataNss(ChunkType::ShardNSPrefix + nss.ns());
try {
DBDirectClient client(opCtx);
/**
* Here are examples of the operations that can happen on the config server to update
* the config.chunks collection. 'chunks' only includes the chunks that result from the
* operations, which can be read from the config server, not any that were removed, so
* we must delete any chunks that overlap with the new 'chunks'.
*
* CollectionVersion = 10.3
*
* moveChunk
* {_id: 3, max: 5, version: 10.1} --> {_id: 3, max: 5, version: 11.0}
*
* splitChunk
* {_id: 3, max: 9, version 10.3} --> {_id: 3, max: 5, version 10.4}
* {_id: 5, max: 8, version 10.5}
* {_id: 8, max: 9, version 10.6}
*
* mergeChunk
* {_id: 10, max: 14, version 4.3} --> {_id: 10, max: 22, version 10.4}
* {_id: 14, max: 19, version 7.1}
* {_id: 19, max: 22, version 2.0}
*
*/
for (auto& chunk : chunks) {
// Check for a different epoch.
if (!chunk.getVersion().hasEqualEpoch(currEpoch)) {
return Status{ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Invalid chunks found when reloading '"
<< nss.toString()
<< "'. Previous collection epoch was '"
<< currEpoch.toString()
<< "', but unexpectedly found a new epoch '"
<< chunk.getVersion().epoch().toString()
<< "'. Collection was dropped and recreated."};
}
// Delete any overlapping chunk ranges. Overlapping chunks will have a min value
// ("_id") between (chunk.min, chunk.max].
//
// query: { "_id" : {"$gte": chunk.min, "$lt": chunk.max}}
auto deleteDocs(stdx::make_unique<BatchedDeleteDocument>());
deleteDocs->setQuery(BSON(ChunkType::minShardID << BSON(
"$gte" << chunk.getMin() << "$lt" << chunk.getMax())));
deleteDocs->setLimit(0);
auto deleteRequest(stdx::make_unique<BatchedDeleteRequest>());
deleteRequest->addToDeletes(deleteDocs.release());
BatchedCommandRequest batchedDeleteRequest(deleteRequest.release());
batchedDeleteRequest.setNS(chunkMetadataNss);
const BSONObj deleteCmdObj = batchedDeleteRequest.toBSON();
rpc::UniqueReply deleteCommandResponse =
client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
deleteCmdObj.firstElementFieldName(),
rpc::makeEmptyMetadata(),
deleteCmdObj);
auto deleteStatus =
getStatusFromCommandResult(deleteCommandResponse->getCommandReply());
if (!deleteStatus.isOK()) {
return deleteStatus;
}
// Now the document can be expected to cleanly insert without overlap.
auto insert(stdx::make_unique<BatchedInsertRequest>());
insert->addToDocuments(chunk.toShardBSON());
BatchedCommandRequest insertRequest(insert.release());
insertRequest.setNS(chunkMetadataNss);
const BSONObj insertCmdObj = insertRequest.toBSON();
rpc::UniqueReply commandResponse =
client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
insertCmdObj.firstElementFieldName(),
rpc::makeEmptyMetadata(),
insertCmdObj);
auto insertStatus = getStatusFromCommandResult(commandResponse->getCommandReply());
if (!insertStatus.isOK()) {
return insertStatus;
}
}
return Status::OK();
} catch (const DBException& ex) {
return ex.toStatus();
}
}
示例12: mongoRegisterContext
/* ****************************************************************************
*
* mongoRegisterContext -
*/
HttpStatusCode mongoRegisterContext(RegisterContextRequest* requestP, RegisterContextResponse* responseP, const std::string& tenant)
{
reqSemTake(__FUNCTION__, "ngsi9 register request");
LM_T(LmtMongo, ("Register Context Request"));
DBClientBase* connection = getMongoConnection();
/* Check if new registration */
if (requestP->registrationId.isEmpty()) {
HttpStatusCode result = processRegisterContext(requestP, responseP, NULL, tenant);
reqSemGive(__FUNCTION__, "ngsi9 register request");
return result;
}
/* It is not a new registration, so it should be an update */
BSONObj reg;
OID id;
try
{
id = OID(requestP->registrationId.get());
mongoSemTake(__FUNCTION__, "findOne from RegistrationsCollection");
reg = connection->findOne(getRegistrationsCollectionName(tenant).c_str(), BSON("_id" << id));
mongoSemGive(__FUNCTION__, "findOne from RegistrationsCollection");
LM_I(("Database Operation Successful (findOne _id: %s)", id.toString().c_str()));
}
catch (const AssertionException &e)
{
mongoSemGive(__FUNCTION__, "findOne from RegistrationsCollection (AssertionException)");
reqSemGive(__FUNCTION__, "ngsi9 register request");
/* This happens when OID format is wrong */
// FIXME: this checking should be done at parsing stage, without progressing to
// mongoBackend. By the moment we can live this here, but we should remove in the future
responseP->errorCode.fill(SccContextElementNotFound);
responseP->registrationId = requestP->registrationId;
++noOfRegistrationUpdateErrors;
LM_W(("Bad Input (invalid OID format)"));
return SccOk;
}
catch (const DBException &e)
{
mongoSemGive(__FUNCTION__, "findOne from RegistrationsCollection (DBException)");
reqSemGive(__FUNCTION__, "ngsi9 register request");
responseP->errorCode.fill(SccReceiverInternalError,
std::string("collection: ") + getRegistrationsCollectionName(tenant).c_str() +
" - findOne() _id: " + requestP->registrationId.get() +
" - exception: " + e.what());
++noOfRegistrationUpdateErrors;
LM_E(("Database Error (%s)", responseP->errorCode.details.c_str()));
return SccOk;
}
catch (...)
{
mongoSemGive(__FUNCTION__, "findOne from RegistrationsCollection (Generic Exception)");
reqSemGive(__FUNCTION__, "ngsi9 register request");
responseP->errorCode.fill(SccReceiverInternalError,
std::string("collection: ") + getRegistrationsCollectionName(tenant).c_str() +
" - findOne() _id: " + requestP->registrationId.get() +
" - exception: " + "generic");
++noOfRegistrationUpdateErrors;
LM_E(("Database Error (%s)", responseP->errorCode.details.c_str()));
return SccOk;
}
if (reg.isEmpty())
{
reqSemGive(__FUNCTION__, "ngsi9 register request (no registrations found)");
responseP->errorCode.fill(SccContextElementNotFound, std::string("registration id: /") + requestP->registrationId.get() + "/");
responseP->registrationId = requestP->registrationId;
++noOfRegistrationUpdateErrors;
return SccOk;
}
HttpStatusCode result = processRegisterContext(requestP, responseP, &id, tenant);
reqSemGive(__FUNCTION__, "ngsi9 register request");
return result;
}
示例13: strlen
void VStore::Object::copy(const OID &nobj) {
//OID nobj = OID::create();
OID res, value;
int flags = 0;
Attribute *attrib;
char *dbuf = new char [5000];
char *dbuf2;
DASM *dasm = (DASM*)(root.get("notations").get("dasm"));
for (int i=0; i<m_keys.size(); i++) {
if (m_keys[i] == This) continue;
attrib = Attribute::get(m_oid, m_keys[i]);
if (attrib == 0) {
res = Null;
value = Null;
} else {
flags = attrib->getFlags();
res = attrib->getDefinition();
}
/*if (flags & OID::FLAG_OUT_OF_DATE) {
attrib->setFlags(flags & (0xFF - OID::FLAG_OUT_OF_DATE));
flags = attrib->getFlags();
attrib->setValue(Definition(res).evaluate(m_oid, m_keys[i], false));
value = attrib->getValue();
}*/
if (res != Null) {
//Clone the definition
//value = res;
//res = s_alloc;
//s_alloc++;
//value.copy(res);
//std::cout << "CLONING DEF\n";
dbuf2 = dbuf;
Definition d = res;
nobj.toString(dbuf2, 100);
dbuf2 += strlen(dbuf2);
strcpy(dbuf2, " ");
dbuf2 += strlen(dbuf2);
m_keys[i].toString(dbuf2,100);
dbuf2 += strlen(dbuf2);
if (flags && 0x01) {
//nobj.define(m_keys[i], res, true);
strcpy(dbuf2, " is { ");
dbuf2 += strlen(dbuf2);
} else {
value = attrib->getValue();
//nobj.set(m_keys[i], value, true);
//nobj.definefuture(m_keys[i], res, true);
strcpy(dbuf2, " = ");
dbuf2 += strlen(dbuf2);
value.toString(dbuf2, 100);
dbuf2 += strlen(dbuf2);
strcpy(dbuf2, "\n");
dbuf2 += strlen(dbuf2);
m_keys[i].toString(dbuf2, 100);
dbuf2 += strlen(dbuf2);
strcpy(dbuf2, " := { ");
dbuf2 += strlen(dbuf2);
}
d.toString(dbuf2, 4000);
dbuf2 += strlen(dbuf2);
strcpy(dbuf2, " }\n");
//std::cout << "DBUF: " << dbuf << "\n";
//Execute
dasm->execute(dbuf);
} else {
if (flags && OID::FLAG_DEEP) {
//#ifdef LINUX
//SPINLOCK
//pthread_mutex_lock(&oid_lock);
//#endif
res = s_alloc;
s_alloc++;
//#ifdef LINUX
//SPINLOCK
//pthread_mutex_unlock(&oid_lock);
//#endif
value = attrib->getValue();
value.copy(res);
res.set(This, nobj, true);
nobj.set(m_keys[i], res, true);
} else {
value = attrib->getValue();
if (value != Null) nobj.set(m_keys[i], value, true);
}
nobj.flags(m_keys[i],flags, true);
}
//delete evt;
//.........这里部分代码省略.........
示例14: getCurrentTime
//.........这里部分代码省略.........
LM_T(LmtMongo, ("Entity registration: {id: %s}", en->id.c_str()));
}
else
{
entities.append(BSON(REG_ENTITY_ID << en->id << REG_ENTITY_TYPE << en->type));
LM_T(LmtMongo, ("Entity registration: {id: %s, type: %s}", en->id.c_str(), en->type.c_str()));
}
}
BSONArrayBuilder attrs;
for (unsigned int jx = 0; jx < cr->contextRegistrationAttributeVector.size(); ++jx)
{
ContextRegistrationAttribute* cra = cr->contextRegistrationAttributeVector.get(jx);
attrs.append(BSON(REG_ATTRS_NAME << cra->name << REG_ATTRS_TYPE << cra->type << "isDomain" << cra->isDomain));
LM_T(LmtMongo, ("Attribute registration: {name: %s, type: %s, isDomain: %s}",
cra->name.c_str(),
cra->type.c_str(),
cra->isDomain.c_str()));
for (unsigned int kx = 0; kx < requestP->contextRegistrationVector.get(ix)->contextRegistrationAttributeVector.get(jx)->metadataVector.size(); ++kx)
{
// FIXME: metadata not supported at the moment
}
}
contextRegistration.append(BSON(
REG_ENTITIES << entities.arr() <<
REG_ATTRS << attrs.arr() <<
REG_PROVIDING_APPLICATION << requestP->contextRegistrationVector.get(ix)->providingApplication.get())
);
LM_T(LmtMongo, ("providingApplication registration: %s", requestP->contextRegistrationVector.get(ix)->providingApplication.c_str()));
std::string err;
if (!processAssociations(cr->registrationMetadataVector, &err, tenant))
{
responseP->errorCode.fill(SccReceiverInternalError);
return SccOk;
}
if (!addTriggeredSubscriptions(*cr, subsToNotify, err, tenant))
{
responseP->errorCode.fill(SccReceiverInternalError, err);
return SccOk;
}
}
reg.append(REG_CONTEXT_REGISTRATION, contextRegistration.arr());
BSONObj regDoc = reg.obj();
LM_T(LmtMongo, ("upsert update() in '%s' collection: '%s'", getRegistrationsCollectionName(tenant).c_str(), regDoc.toString().c_str()));
try
{
connection = getMongoConnection();
/* Note the fourth parameter is set to "true". This means "upsert", so if the document doesn't previously
* exist in the collection, it is created. Thus, this way is ok with both uses of
* registerContext (either new registration or updating an existing one) */
connection->update(getRegistrationsCollectionName(tenant).c_str(), BSON("_id" << oid), regDoc, true);
releaseMongoConnection(connection);
LM_I(("Database Operation Successful (_id: %s)", oid.toString().c_str()));
}
catch (const DBException& e)
{
releaseMongoConnection(connection);
responseP->errorCode.fill(SccReceiverInternalError,
std::string("collection: ") + getRegistrationsCollectionName(tenant).c_str() +
" - upsert update(): " + regDoc.toString() +
" - exception: " + e.what());
LM_E(("Database Error (%s)", responseP->errorCode.reasonPhrase.c_str()));
releaseTriggeredSubscriptions(subsToNotify);
return SccOk;
}
catch (...)
{
releaseMongoConnection(connection);
responseP->errorCode.fill(SccReceiverInternalError,
std::string("collection: ") + getRegistrationsCollectionName(tenant).c_str() +
" - upsert update(): " + regDoc.toString() +
" - exception: " + "generic");
LM_E(("Database Error (%s)", responseP->errorCode.reasonPhrase.c_str()));
releaseTriggeredSubscriptions(subsToNotify);
return SccOk;
}
/* Send notifications for each one of the subscriptions accumulated by
* previous addTriggeredSubscriptions() invocations */
std::string err;
processSubscriptions(triggerEntitiesV, subsToNotify, err, tenant);
/* Fill the response element */
responseP->duration = requestP->duration;
responseP->registrationId.set(oid.toString());
responseP->errorCode.fill(SccOk);
return SccOk;
}
示例15: if
//.........这里部分代码省略.........
connPtr->done();
}
//
// First lock all collection namespaces that exist
//
OwnedPointerMap<string, CollectionType> ownedCollections;
const map<string, CollectionType*>& collections = ownedCollections.map();
Status findCollectionsStatus = findAllCollectionsV3(configLoc, &ownedCollections);
if (!findCollectionsStatus.isOK()) {
*errMsg = stream() << "could not read collections from config server"
<< causedBy(findCollectionsStatus);
return false;
}
//
// Acquire locks for all sharded collections
// Something that didn't involve getting thousands of locks would be better.
//
OwnedPointerVector<ScopedDistributedLock> collectionLocks;
log() << "acquiring locks for " << collections.size() << " sharded collections..." << endl;
// WARNING - this string is used programmatically when forcing locks, be careful when
// changing!
// TODO: Add programmatic "why" field to lock collection
string lockMessage = str::stream() << "ensuring epochs for config upgrade"
<< " (" << upgradeId.toString() << ")";
if (!_acquireAllCollectionLocks(configLoc,
collections,
lockMessage,
20 * 60 * 1000,
&collectionLocks,
errMsg))
{
*errMsg = stream() << "could not acquire all namespace locks for upgrade"
<< " (" << upgradeId.toString() << ")"
<< causedBy(errMsg);
return false;
}
// We are now preventing all splits and migrates for all sharded collections
// Get working and backup suffixes
string workingSuffix = genWorkingSuffix(upgradeId);
string backupSuffix = genBackupSuffix(upgradeId);
log() << "copying collection and chunk metadata to working and backup collections..."
<< endl;
// Get a backup and working copy of the config.collections and config.chunks collections
Status copyStatus = copyFrozenCollection(configLoc,
CollectionType::ConfigNS,
CollectionType::ConfigNS + workingSuffix);
if (!copyStatus.isOK()) {