本文整理汇总了C++中BSONObj::getObjectID方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::getObjectID方法的具体用法?C++ BSONObj::getObjectID怎么用?C++ BSONObj::getObjectID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::getObjectID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pretouchOperation
void pretouchOperation(OperationContext* txn, const BSONObj& op) {
if (txn->lockState()->isWriteLocked()) {
// no point pretouching if write locked. not sure if this will ever fire, but just in case.
return;
}
const char* which = "o";
const char* opType = op.getStringField("op");
if (*opType == 'i')
;
else if (*opType == 'u')
which = "o2";
else
return;
/* todo : other operations */
try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if (o.getObjectID(_id)) {
const char* ns = op.getStringField("ns");
BSONObjBuilder b;
b.append(_id);
BSONObj result;
AutoGetCollectionForRead ctx(txn, ns);
if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) {
_dummy_z += result.objsize(); // touch
}
}
} catch (DBException&) {
log() << "ignoring assertion in pretouchOperation()" << endl;
}
}
示例2: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
LOG(6) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
Lock::assertWriteLocked(ns);
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
// updates aren't allowed for indexes -- so we will do a regular insert. if index already
// exists, that is ok.
theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjectsForReplication(ns, o, o, true, false, false, debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
}
}
}
示例3: prefetchRecordPages
void prefetchRecordPages(const char* ns, const BSONObj& obj) {
BSONElement _id;
if( obj.getObjectID(_id) ) {
TimerHolder timer(&prefetchDocStats);
BSONObjBuilder builder;
builder.append(_id);
BSONObj result;
try {
// we can probably use Client::Context here instead of ReadContext as we
// have locked higher up the call stack already
Client::ReadContext ctx( ns );
if( Helpers::findById(cc(), ns, builder.done(), result) ) {
// do we want to use Record::touch() here? it's pretty similar.
volatile char _dummy_char = '\0';
// Touch the first word on every page in order to fault it into memory
for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) {
_dummy_char += *(result.objdata() + i);
}
// hit the last page, in case we missed it above
_dummy_char += *(result.objdata() + result.objsize() - 1);
}
}
catch(const DBException& e) {
LOG(2) << "ignoring exception in prefetchRecordPages(): " << e.what() << endl;
}
}
}
示例4: pretouchOperation
void pretouchOperation(const BSONObj& op) {
if( dbMutex.isWriteLocked() )
return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
const char *which = "o";
const char *opType = op.getStringField("op");
if ( *opType == 'i' )
;
else if( *opType == 'u' )
which = "o2";
else
return;
/* todo : other operations */
try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if( o.getObjectID(_id) ) {
const char *ns = op.getStringField("ns");
BSONObjBuilder b;
b.append(_id);
BSONObj result;
readlock lk(ns);
Client::Context ctx( ns );
if( Helpers::findById(cc(), ns, b.done(), result) )
_dummy_z += result.objsize(); // touch
}
}
catch( DBException& ) {
log() << "ignoring assertion in pretouchOperation()" << endl;
}
}
示例5: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
assertInWriteLock();
LOG(6) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
// updates aren't allowed for indexes -- so we will do a regular insert. if index already
// exists, that is ok.
theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjects(ns, o, o, true, false, false, debug );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
/* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
RARELY if (nsd && !nsd->capped) {
ensureHaveIdIndex(ns); // otherwise updates will be slow
}
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjects(ns, o, b.done(), true, false, false , debug );
}
}
}
示例6: writeToDB
/**********************************************************
*similar to writetodb in FileRec
*
***********************************************************/
void VersionRec::writeToDB(mongo::DBClientConnection& conn) {
BSONObjBuilder record;
if (this->versionid.empty()) { //if no id has been read in because it is a new version
record.genOID();//create one
} else {
mongo::OID theoid(this->versionid); //use current id
record.append("_id", theoid);
}
//convert to BSON
record.append("Tempname", this->tmpname);
record.append("filehash", this->filehash);
record.append("length", this->length);
record.append("Version", this->versionnumber);
/* love you */ long long time = this->modifytime.tv_nsec;
record.append("Mtnsec", time);
time = this->modifytime.tv_sec;
record.append("mtsec", time);
mongo::BSONArrayBuilder Version;
for (vector<VersionDiffBlock>::iterator it = changes.begin(); it != changes.end(); ++it) {
BSONObjBuilder version;
version.append("Blknum", (*it).blockNo);
version.append("hash", (*it).blockHash);
Version.append(version.obj());
}
record.append("Blktable", Version.arr());
BSONObj result = record.obj();
if (this->versionid.empty()) {
mongo::BSONElement thing;
result.getObjectID(thing);
mongo::OID anoid = thing.OID();
this->versionid = anoid.toString();
}
auto_ptr<mongo::DBClientCursor> cursor = conn.query("fileRecords.FileVersion", MONGO_QUERY("_id" << mongo::OID(this->versionid)));
if (cursor->more()) {//already a version with same id, update
conn.update("fileRecords.FileVersion", MONGO_QUERY("_id" << mongo::OID(this->versionid)), result);
} else { //new version
conn.insert("fileRecords.FileVersion", result);
}
string e = conn.getLastError();
if (!e.empty()) {
cout << "something failed failed: " << e << std::endl;
sleep(1);
exit(1);
}
else{
cout << "Version " << this->versionnumber << " successfully written to database" << endl;
}
}
示例7: makeOplogEntryQuery
BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj doc, bool multi) const {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if ( doc.getObjectID( id ) ) {
idPattern.append( id );
return idPattern.obj();
}
else {
uassert( 16980, "multi-update requires all modified objects to have an _id" , ! multi );
return doc;
}
}
示例8: SavePaymentJson
string SavePaymentJson(int amount)
{
//BSONObj paymentBSON = mongo::fromjson(newPyamentJson);
BSONObj paymentBSON = BSON(GENOID
<< "PayedToUserId" << 8888
<< "PayedDate" << "2015-01-25 12:00:00"
<< "PayedPeriodStartDate" << "2015-01-01 00:00:00"
<< "PayedPeriodEndDate" << "2015-01-29 23:59:59"
<< "Amount" << amount);
db.insert(PAYMENTS_COLLECTION_NAMESPASE, paymentBSON);
BSONElement oi;
paymentBSON.getObjectID(oi);
OID oid = oi.__oid();
return oid.toString();
}
示例9: makeOplogEntryQuery
BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if (doc.getObjectID(id)) {
idPattern.append(id);
return idPattern.obj();
} else {
uassert(16980,
str::stream() << "Multi-update operations require all documents to "
"have an '_id' field. " << doc.toString(false, false),
!multi);
return doc;
}
}
示例10: pretouchN
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
Client* c = currentClient.get();
if (c == 0) {
Client::initThread("pretouchN");
c = &cc();
}
OperationContextImpl txn; // XXX
ScopedTransaction transaction(&txn, MODE_S);
Lock::GlobalRead lk(txn.lockState());
for (unsigned i = a; i <= b; i++) {
const BSONObj& op = v[i];
const char* which = "o";
const char* opType = op.getStringField("op");
if (*opType == 'i')
;
else if (*opType == 'u')
which = "o2";
else
continue;
/* todo : other operations */
try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if (o.getObjectID(_id)) {
const char* ns = op.getStringField("ns");
BSONObjBuilder b;
b.append(_id);
BSONObj result;
Client::Context ctx(&txn, ns);
if (Helpers::findById(&txn, ctx.db(), ns, b.done(), result))
_dummy_z += result.objsize(); // touch
}
} catch (DBException& e) {
log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' '
<< e.toString() << endl;
}
}
}
示例11: pretouchN
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
DEV assert( !dbMutex.isWriteLocked() );
Client *c = currentClient.get();
if( c == 0 ) {
Client::initThread("pretouchN");
c = &cc();
}
readlock lk("");
for( unsigned i = a; i <= b; i++ ) {
const BSONObj& op = v[i];
const char *which = "o";
const char *opType = op.getStringField("op");
if ( *opType == 'i' )
;
else if( *opType == 'u' )
which = "o2";
else
continue;
/* todo : other operations */
try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if( o.getObjectID(_id) ) {
const char *ns = op.getStringField("ns");
BSONObjBuilder b;
b.append(_id);
BSONObj result;
Client::Context ctx( ns );
if( Helpers::findById(cc(), ns, b.done(), result) )
_dummy_z += result.objsize(); // touch
}
}
catch( DBException& e ) {
log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' ' << e.toString() << endl;
}
}
}
示例12: _updateObjects
//.........这里部分代码省略.........
do {
debug.nscanned++;
if ( mods.get() && mods->hasDynamicArray() ) {
// The Cursor must have a Matcher to record an elemMatchKey. But currently
// a modifier on a dynamic array field may be applied even if there is no
// elemMatchKey, so a matcher cannot be required.
//verify( c->matcher() );
details.requestElemMatchKey();
}
if ( !c->currentMatches( &details ) ) {
c->advance();
continue;
}
BSONObj currPK = c->currPK();
if ( c->getsetdup( currPK ) ) {
c->advance();
continue;
}
BSONObj currentObj = c->current();
BSONObj pattern = patternOrig;
if ( logop ) {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if ( currentObj.getObjectID( id ) ) {
idPattern.append( id );
pattern = idPattern.obj();
}
else {
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
struct LogOpUpdateDetails loud;
loud.logop = logop;
loud.ns = ns;
loud.fromMigrate = fromMigrate;
if ( isOperatorUpdate ) {
if ( multi ) {
// Make our own copies of the currPK and currentObj before we invalidate
// them by advancing the cursor.
currPK = currPK.copy();
currentObj = currentObj.copy();
// Advance past the document to be modified. This used to be because of SERVER-5198,
// but TokuMX does it because we want to avoid needing to do manual deduplication
// of this PK on the next iteration if the current update modifies the next
// entry in the index. For example, an index scan over a:1 with mod {$inc: {a:1}}
// would cause every other key read to be a duplicate if we didn't advance here.
while ( c->ok() && currPK == c->currPK() ) {
c->advance();
}
// Multi updates need to do their own deduplication because updates may modify the
示例13: applyOperation_inlock
void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
assertInWriteLock();
LOG(6) << "applying op: " << op << endl;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
// updates aren't allowed for indexes -- so we will do a regular insert. if index already
// exists, that is ok.
theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjects(ns, o, o, true, false, false, debug );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
/* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjects(ns, o, b.done(), true, false, false , debug );
}
}
}
else if ( *opType == 'u' ) {
opCounters->gotUpdate();
RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
OpDebug debug;
updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug );
}
else if ( *opType == 'd' ) {
opCounters->gotDelete();
if ( opType[1] == 0 )
deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
else
assert( opType[1] == 'b' ); // "db" advertisement
}
else if ( *opType == 'c' ) {
opCounters->gotCommand();
BufBuilder bb;
BSONObjBuilder ob;
_runCommands(ns, o, bb, ob, true, 0);
}
else if ( *opType == 'n' ) {
// no op
}
else {
throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
}
}
示例14: updateRecord
/** Note: if the object shrinks a lot, we don't free up space, we leave extra at end of the record.
*/
const DiskLoc DataFileMgr::updateRecord(
const char *ns,
Collection* collection,
Record *toupdate, const DiskLoc& dl,
const char *_buf, int _len, OpDebug& debug, bool god) {
dassert( toupdate == dl.rec() );
BSONObj objOld = BSONObj::make(toupdate);
BSONObj objNew(_buf);
DEV verify( objNew.objsize() == _len );
DEV verify( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
/* add back the old _id value if the update removes it. Note this implementation is slow
(copies entire object multiple times), but this shouldn't happen often, so going for simple
code, not speed.
*/
BSONObjBuilder b;
BSONElement e;
verify( objOld.getObjectID(e) );
b.append(e); // put _id first, for best performance
b.appendElements(objNew);
objNew = b.obj();
}
NamespaceString nsstring(ns);
if (nsstring.coll() == "system.users") {
V2UserDocumentParser parser;
uassertStatusOK(parser.checkValidUserDocument(objNew));
}
uassert( 13596 , str::stream() << "cannot change _id of a document old:" << objOld << " new:" << objNew,
objNew["_id"] == objOld["_id"]);
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerVector<UpdateTicket> updateTickets;
updateTickets.mutableVector().resize(collection->details()->getTotalIndexCount());
for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = !(KeyPattern::isIdKeyPattern(descriptor->keyPattern())
|| descriptor->unique())
|| ignoreUniqueIndex(descriptor->getOnDisk());
updateTickets.mutableVector()[i] = new UpdateTicket();
Status ret = iam->validateUpdate(objOld, objNew, dl, options,
updateTickets.mutableVector()[i]);
if (Status::OK() != ret) {
uasserted(ASSERT_ID_DUPKEY, "Update validation failed: " + ret.toString());
}
}
if ( toupdate->netLength() < objNew.objsize() ) {
// doesn't fit. reallocate -----------------------------------------------------
moveCounter.increment();
uassert( 10003,
"failing update: objects in a capped ns cannot grow",
!(collection && collection->details()->isCapped()));
collection->details()->paddingTooSmall();
deleteRecord(ns, toupdate, dl);
DiskLoc res = insert(ns, objNew.objdata(), objNew.objsize(), false, god);
if (debug.nmoved == -1) // default of -1 rather than 0
debug.nmoved = 1;
else
debug.nmoved += 1;
return res;
}
collection->infoCache()->notifyOfWriteOp();
collection->details()->paddingFits();
debug.keyUpdates = 0;
for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
int64_t updatedKeys;
Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys);
if (Status::OK() != ret) {
// This shouldn't happen unless something disastrous occurred.
massert(16799, "update failed: " + ret.toString(), false);
}
debug.keyUpdates += updatedKeys;
}
// update in place
int sz = objNew.objsize();
memcpy(getDur().writingPtr(toupdate->data(), sz), objNew.objdata(), sz);
return dl;
}
示例15: _updateObjects
//.........这里部分代码省略.........
} // end yielding block
debug.nscanned++;
if ( mods.get() && mods->hasDynamicArray() ) {
details.requestElemMatchKey();
}
if ( !c->currentMatches( &details ) ) {
c->advance();
continue;
}
Record* r = c->_current();
DiskLoc loc = c->currLoc();
if ( c->getsetdup( loc ) && autoDedup ) {
c->advance();
continue;
}
BSONObj js = BSONObj::make(r);
BSONObj pattern = patternOrig;
if ( logop ) {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if ( js.getObjectID( id ) ) {
idPattern.append( id );
pattern = idPattern.obj();
}
else {
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
if ( multi ) {
// go to next record in case this one moves
c->advance();
// Update operations are deduped for cursors that implement their own
// deduplication. In particular, some geo cursors are excluded.
if ( autoDedup ) {
if ( seenObjects.count( loc ) ) {
continue;
}
// SERVER-5198 Advance past the document to be modified, provided
// deduplication is enabled, but see SERVER-5725.
while( c->ok() && loc == c->currLoc() ) {
c->advance();
}
}
}