本文整理汇总了C++中NamespaceDetails::isCapped方法的典型用法代码示例。如果您正苦于以下问题:C++ NamespaceDetails::isCapped方法的具体用法?C++ NamespaceDetails::isCapped怎么用?C++ NamespaceDetails::isCapped使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NamespaceDetails
的用法示例。
在下文中一共展示了NamespaceDetails::isCapped方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findAll
/* get a table scan cursor, but can be forward or reverse direction.
order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
*/
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc) {
BSONElement el = order.getField("$natural"); // e.g., { $natural : -1 }
if ( el.number() >= 0 )
return DataFileMgr::findAll(ns, startLoc);
// "reverse natural order"
NamespaceDetails *d = nsdetails(ns);
if ( !d )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
if ( !d->isCapped() ) {
if ( !startLoc.isNull() )
return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
Extent *e = d->lastExtent().ext();
while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
e = e->getPrevExtent();
}
return shared_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
}
else {
return shared_ptr<Cursor>( new ReverseCappedCursor( d, startLoc ) );
}
}
示例2: compact
bool compact(const string& ns, string &errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
massert( 14028, "bad ns", NamespaceString::normal(ns.c_str()) );
massert( 14027, "can't compact a system namespace", !str::contains(ns, ".system.") ); // items in system.indexes cannot be moved there are pointers to those disklocs in NamespaceDetails
bool ok;
{
Lock::DBWrite lk(ns);
BackgroundOperation::assertNoBgOpInProgForNs(ns.c_str());
Client::Context ctx(ns);
NamespaceDetails *d = nsdetails(ns.c_str());
massert( 13660, str::stream() << "namespace " << ns << " does not exist", d );
massert( 13661, "cannot compact capped collection", !d->isCapped() );
log() << "compact " << ns << " begin" << endl;
if( pf != 0 || pb != 0 ) {
log() << "paddingFactor:" << pf << " paddingBytes:" << pb << endl;
}
try {
ok = _compact(ns.c_str(), d, errmsg, validate, result, pf, pb);
}
catch(...) {
log() << "compact " << ns << " end (with error)" << endl;
throw;
}
log() << "compact " << ns << " end" << endl;
}
return ok;
}
示例3: getMissingDoc
BSONObj Sync::getMissingDoc(const BSONObj& o) {
OplogReader missingObjReader;
const char *ns = o.getStringField("ns");
// capped collections
NamespaceDetails *nsd = nsdetails(ns);
if ( nsd && nsd->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
}
uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch(DBException& e) {
log() << "replication assertion fetching missing object: " << e.what() << endl;
throw;
}
return missingObj;
}
示例4: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
LOG(6) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
Lock::assertWriteLocked(ns);
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
// updates aren't allowed for indexes -- so we will do a regular insert. if index already
// exists, that is ok.
theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjectsForReplication(ns, o, o, true, false, false, debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
}
}
}
示例5: operator
void operator()(DBClientCursorBatchIterator &i) {
const string to_dbname = nsToDatabase(to_collection);
while (i.moreInCurrentBatch()) {
if (n % 128 == 127) {
time_t now = time(0);
if (now - lastLog >= 60) {
// report progress
if (lastLog) {
log() << "clone " << to_collection << ' ' << n << endl;
}
lastLog = now;
}
mayInterrupt(_mayBeInterrupted);
}
BSONObj js = i.nextSafe();
++n;
if (isindex) {
verify(strstr(from_collection, "system.indexes"));
storedForLater->push_back(fixindex(js, to_dbname).getOwned());
}
else {
try {
Client::ReadContext ctx(to_collection);
if (_isCapped) {
NamespaceDetails *d = nsdetails(to_collection);
verify(d->isCapped());
BSONObj pk = js["$_"].Obj();
BSONObjBuilder rowBuilder;
BSONObjIterator it(js);
while (it.moreWithEOO()) {
BSONElement e = it.next();
if (e.eoo()) {
break;
}
if (!mongoutils::str::equals(e.fieldName(), "$_")) {
rowBuilder.append(e);
}
}
BSONObj row = rowBuilder.obj();
d->insertObjectIntoCappedWithPK(pk, row, NamespaceDetails::NO_LOCKTREE);
}
else {
insertObject(to_collection, js, 0, logForRepl);
}
}
catch (UserException& e) {
error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
throw;
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
}
示例6: details
NamespaceDetails* NamespaceIndex::details(const Namespace& ns) {
if ( !_ht )
return 0;
NamespaceDetails *d = _ht->get(ns);
if ( d && d->isCapped() )
d->cappedCheckMigrate();
return d;
}
示例7: getMissingDoc
BSONObj Sync::getMissingDoc(const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char *ns = o.getStringField("ns");
// capped collections
NamespaceDetails *nsd = nsdetails(ns);
if ( nsd && nsd->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
}
const int retryMax = 3;
for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
if (retryCount != 1) {
// if we are retrying, sleep a bit to let the network possibly recover
sleepsecs(retryCount * retryCount);
}
try {
bool ok = missingObjReader.connect(hn);
if (!ok) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
}
catch (const SocketException&) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
}
catch (const SocketException&) {
warning() << "network problem detected while fetching a missing document from the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
catch (DBException& e) {
log() << "replication assertion fetching missing object: " << e.what() << endl;
throw;
}
// success!
return missingObj;
}
// retry count exceeded
msgasserted(15916,
str::stream() << "Can no longer connect to initial sync source: " << hn);
}
示例8: prefetchPagesForReplicatedOp
// prefetch for an oplog operation
void prefetchPagesForReplicatedOp(const BSONObj& op) {
const char *opField;
const char *opType = op.getStringField("op");
switch (*opType) {
case 'i': // insert
case 'd': // delete
opField = "o";
break;
case 'u': // update
opField = "o2";
break;
default:
// prefetch ignores other ops
return;
}
BSONObj obj = op.getObjectField(opField);
const char *ns = op.getStringField("ns");
NamespaceDetails *nsd = nsdetails(ns);
if (!nsd) return; // maybe not opened yet
LOG(4) << "index prefetch for op " << *opType << endl;
DEV Lock::assertAtLeastReadLocked(ns);
// should we prefetch index pages on updates? if the update is in-place and doesn't change
// indexed values, it is actually slower - a lot slower if there are a dozen indexes or
// lots of multikeys. possible variations (not all mutually exclusive):
// 1) current behavior: full prefetch
// 2) don't do it for updates
// 3) don't do multikey indexes for updates
// 4) don't prefetchIndexPages on some heuristic; e.g., if it's an $inc.
// 5) if not prefetching index pages (#2), we should do it if we are upsertings and it
// will be an insert. to do that we could do the prefetchRecordPage first and if DNE
// then we do #1.
//
// note that on deletes 'obj' does not have all the keys we would want to prefetch on.
// a way to achieve that would be to prefetch the record first, and then afterwards do
// this part.
//
prefetchIndexPages(nsd, obj);
// do not prefetch the data for inserts; it doesn't exist yet
//
// we should consider doing the record prefetch for the delete op case as we hit the record
// when we delete. note if done we only want to touch the first page.
//
// update: do record prefetch.
if ((*opType == 'u') &&
// do not prefetch the data for capped collections because
// they typically do not have an _id index for findById() to use.
!nsd->isCapped()) {
prefetchRecordPages(ns, obj);
}
}
示例9: run
virtual bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string coll = cmdObj.firstElement().valuestr();
if( coll.empty() || db.empty() ) {
errmsg = "no collection name specified";
return false;
}
if( isCurrentlyAReplSetPrimary() && !cmdObj["force"].trueValue() ) {
errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
return false;
}
string ns = db + '.' + coll;
if ( ! NamespaceString::normal(ns.c_str()) ) {
errmsg = "bad namespace name";
return false;
}
// parameter validation to avoid triggering assertions in compact()
if ( str::contains(ns, ".system.") ) {
errmsg = "can't compact a system namespace";
return false;
}
{
Lock::DBWrite lk(ns);
Client::Context ctx(ns);
NamespaceDetails *d = nsdetails(ns.c_str());
if( ! d ) {
errmsg = "namespace does not exist";
return false;
}
if ( d->isCapped() ) {
errmsg = "cannot compact a capped collection";
return false;
}
}
double pf = 1.0;
int pb = 0;
if( cmdObj.hasElement("paddingFactor") ) {
pf = cmdObj["paddingFactor"].Number();
verify( pf >= 1.0 && pf <= 4.0 );
}
if( cmdObj.hasElement("paddingBytes") ) {
pb = (int) cmdObj["paddingBytes"].Number();
verify( pb >= 0 && pb <= 1024 * 1024 );
}
bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment
bool ok = compact(ns, errmsg, validate, result, pf, pb);
return ok;
}
示例10: insertObjects
void insertObjects(const char *ns, const vector<BSONObj> &objs, bool keepGoing, uint64_t flags, bool logop ) {
if (mongoutils::str::contains(ns, "system.")) {
massert(16748, "need transaction to run insertObjects", cc().txnStackSize() > 0);
uassert(10095, "attempt to insert in reserved database name 'system'", !mongoutils::str::startsWith(ns, "system."));
massert(16750, "attempted to insert multiple objects into a system namspace at once", objs.size() == 1);
if (handle_system_collection_insert(ns, objs[0], logop) != 0) {
return;
}
}
NamespaceDetails *details = getAndMaybeCreateNS(ns, logop);
NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get(ns);
for (size_t i = 0; i < objs.size(); i++) {
const BSONObj &obj = objs[i];
try {
uassert( 10059 , "object to insert too large", obj.objsize() <= BSONObjMaxUserSize);
BSONObjIterator i( obj );
while ( i.more() ) {
BSONElement e = i.next();
uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' );
}
uassert( 16440 , "_id cannot be an array", obj["_id"].type() != Array );
BSONObj objModified = obj;
BSONElementManipulator::lookForTimestamps(objModified);
if (details->isCapped() && logop) {
// unfortunate hack we need for capped collections
// we do this because the logic for generating the pk
// and what subsequent rows to delete are buried in the
// namespace details object. There is probably a nicer way
// to do this, but this works.
details->insertObjectIntoCappedAndLogOps(objModified, flags);
if (nsdt != NULL) {
nsdt->notifyOfWriteOp();
}
}
else {
insertOneObject(details, nsdt, objModified, flags); // may add _id field
if (logop) {
OpLogHelpers::logInsert(ns, objModified, &cc().txn());
}
}
} catch (const UserException &) {
if (!keepGoing || i == objs.size() - 1) {
throw;
}
}
}
}
示例11: out
shared_ptr<Cursor> DataFileMgr::findAll(const StringData& ns, const DiskLoc &startLoc) {
NamespaceDetails * d = nsdetails( ns );
if ( ! d )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
DiskLoc loc = d->firstExtent();
if ( loc.isNull() )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
Extent *e = getExtent(loc);
DEBUGGING {
out() << "listing extents for " << ns << endl;
DiskLoc tmp = loc;
set<DiskLoc> extents;
while ( 1 ) {
Extent *f = getExtent(tmp);
out() << "extent: " << tmp.toString() << endl;
extents.insert(tmp);
tmp = f->xnext;
if ( tmp.isNull() )
break;
f = f->getNextExtent();
}
out() << endl;
d->dumpDeleted(&extents);
}
if ( d->isCapped() )
return shared_ptr<Cursor>( ForwardCappedCursor::make( d , startLoc ) );
if ( !startLoc.isNull() )
return shared_ptr<Cursor>(new BasicCursor( startLoc ));
while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
/* todo: if extent is empty, free it for reuse elsewhere.
that is a bit complicated have to clean up the freelists.
*/
RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead. ns:" << ns << endl;
// find a nonempty extent
// it might be nice to free the whole extent here! but have to clean up free recs then.
e = e->getNextExtent();
}
return shared_ptr<Cursor>(new BasicCursor( e->firstRecord ));
}
示例12: _insertObjects
// Does not check magic system collection inserts.
void _insertObjects(const char *ns, const vector<BSONObj> &objs, bool keepGoing, uint64_t flags, bool logop ) {
NamespaceDetails *details = getAndMaybeCreateNS(ns, logop);
for (size_t i = 0; i < objs.size(); i++) {
const BSONObj &obj = objs[i];
try {
uassert( 10059 , "object to insert too large", obj.objsize() <= BSONObjMaxUserSize);
BSONObjIterator i( obj );
while ( i.more() ) {
BSONElement e = i.next();
// check no $ modifiers. note we only check top level.
// (scanning deep would be quite expensive)
uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' );
// check no regexp for _id (SERVER-9502)
if (str::equals(e.fieldName(), "_id")) {
uassert(17033, "can't use a regex for _id", e.type() != RegEx);
}
}
uassert( 16440 , "_id cannot be an array", obj["_id"].type() != Array );
BSONObj objModified = obj;
BSONElementManipulator::lookForTimestamps(objModified);
if (details->isCapped() && logop) {
// unfortunate hack we need for capped collections
// we do this because the logic for generating the pk
// and what subsequent rows to delete are buried in the
// namespace details object. There is probably a nicer way
// to do this, but this works.
details->insertObjectIntoCappedAndLogOps(objModified, flags);
details->notifyOfWriteOp();
}
else {
insertOneObject(details, objModified, flags); // may add _id field
if (logop) {
OpLogHelpers::logInsert(ns, objModified);
}
}
} catch (const UserException &) {
if (!keepGoing || i == objs.size() - 1) {
throw;
}
}
}
}
示例13: getOrCreateProfileCollection
NamespaceDetails* getOrCreateProfileCollection(Database *db, bool force, string* errmsg ) {
fassert(16372, db);
const char* profileName = db->profileName.c_str();
NamespaceDetails* details = db->namespaceIndex.details(profileName);
if (!details && (cmdLine.defaultProfile || force)) {
// system.profile namespace doesn't exist; create it
log() << "creating profile collection: " << profileName << endl;
string myerrmsg;
if (!userCreateNS(db->profileName.c_str(),
BSON("capped" << true << "size" << 1024 * 1024), myerrmsg , false)) {
myerrmsg = str::stream() << "could not create ns " << db->profileName << ": " << myerrmsg;
log() << myerrmsg << endl;
if ( errmsg )
*errmsg = myerrmsg;
return NULL;
}
details = db->namespaceIndex.details(profileName);
}
else if ( details && !details->isCapped() ) {
string myerrmsg = str::stream() << profileName << " exists but isn't capped";
log() << myerrmsg << endl;
if ( errmsg )
*errmsg = myerrmsg;
return NULL;
}
if (!details) {
// failed to get or create profile collection
static time_t last = time(0) - 10; // warn the first time
if( time(0) > last+10 ) {
log() << "profile: warning ns " << profileName << " does not exist" << endl;
last = time(0);
}
}
return details;
}
示例14: deleteObjects
/* ns: namespace, e.g. <database>.<collection>
pattern: the "where" clause / criteria
justOne: stop after 1 match
god: allow access to system namespaces, and don't yield
*/
long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop, bool god, RemoveSaver * rs ) {
if( !god ) {
if ( strstr(ns, ".system.") ) {
/* note a delete from system.indexes would corrupt the db
if done here, as there are pointers into those objects in
NamespaceDetails.
*/
uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
}
if ( strchr( ns , '$' ) ) {
log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
}
}
{
NamespaceDetails *d = nsdetails( ns );
if ( ! d )
return 0;
uassert( 10101 , "can't remove from a capped collection" , ! d->isCapped() );
}
long long nDeleted = 0;
shared_ptr< Cursor > creal = NamespaceDetailsTransient::getCursor( ns, pattern );
if( !creal->ok() )
return nDeleted;
shared_ptr< Cursor > cPtr = creal;
auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) );
cc->setDoingDeletes( true );
CursorId id = cc->cursorid();
bool canYield = !god && !(creal->matcher() && creal->matcher()->docMatcher().atomic());
do {
// TODO: we can generalize this I believe
//
bool willNeedRecord = (creal->matcher() && creal->matcher()->needRecord()) || pattern.isEmpty() || isSimpleIdQuery( pattern );
if ( ! willNeedRecord ) {
// TODO: this is a total hack right now
// check if the index full encompasses query
if ( pattern.nFields() == 1 &&
str::equals( pattern.firstElement().fieldName() , creal->indexKeyPattern().firstElement().fieldName() ) )
willNeedRecord = true;
}
if ( canYield && ! cc->yieldSometimes( willNeedRecord ? ClientCursor::WillNeed : ClientCursor::MaybeCovered ) ) {
cc.release(); // has already been deleted elsewhere
// TODO should we assert or something?
break;
}
if ( !cc->ok() ) {
break; // if we yielded, could have hit the end
}
// this way we can avoid calling prepareToYield() every time (expensive)
// as well as some other nuances handled
cc->setDoingDeletes( true );
DiskLoc rloc = cc->currLoc();
BSONObj key = cc->currKey();
bool match = creal->currentMatches();
cc->advance();
if ( ! match )
continue;
// SERVER-5198 Advance past the document to be modified, but see SERVER-5725.
while( cc->ok() && rloc == cc->currLoc() ) {
cc->advance();
}
bool foundAllResults = ( justOne || !cc->ok() );
if ( !foundAllResults ) {
// NOTE: Saving and restoring a btree cursor's position was historically described
// as slow here.
cc->c()->prepareToTouchEarlierIterate();
}
if ( logop ) {
BSONElement e;
if( BSONObj::make( rloc.rec() ).getObjectID( e ) ) {
BSONObjBuilder b;
b.append( e );
bool replJustOne = true;
logOp( "d", ns, b.done(), 0, &replJustOne );
}
else {
//.........这里部分代码省略.........
示例15: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
LOG(3) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
Lock::assertWriteLocked(ns);
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
if (o["background"].trueValue()) {
IndexBuilder* builder = new IndexBuilder(ns, o);
// This spawns a new thread and returns immediately.
builder->go();
}
else {
IndexBuilder builder(ns, o);
// Finish the foreground build before returning
builder.build();
}
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
const NamespaceString requestNs(ns);
UpdateRequest request(
requestNs, debug,
QueryPlanSelectionPolicy::idElseNatural());
request.setQuery(o);
request.setUpdates(o);
request.setUpsert();
request.setFromReplication();
update(request);
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
const NamespaceString requestNs(ns);
UpdateRequest request(
requestNs, debug,
QueryPlanSelectionPolicy::idElseNatural());
request.setQuery(b.done());
request.setUpdates(o);
request.setUpsert();
request.setFromReplication();
update(request);
}
}
}