本文整理汇总了C++中NamespaceDetails类的典型用法代码示例。如果您正苦于以下问题:C++ NamespaceDetails类的具体用法?C++ NamespaceDetails怎么用?C++ NamespaceDetails使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NamespaceDetails类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
bool Helpers::findById(Client& c, const char *ns, BSONObj query, BSONObj& result ,
bool * nsFound , bool * indexFound ) {
d.dbMutex.assertAtLeastReadLocked();
Database *database = c.database();
assert( database );
NamespaceDetails *d = database->namespaceIndex.details(ns);
if ( ! d )
return false;
if ( nsFound )
*nsFound = 1;
int idxNo = d->findIdIndex();
if ( idxNo < 0 )
return false;
if ( indexFound )
*indexFound = 1;
IndexDetails& i = d->idx( idxNo );
BSONObj key = i.getKeyFromQuery( query );
DiskLoc loc = i.idxInterface().findSingle(i , i.head , key);
if ( loc.isNull() )
return false;
result = loc.obj();
return true;
}
示例2: getIndex
IndexDescriptor* getIndex(const BSONObj& obj) {
Client::ReadContext ctx(ns());
Collection* collection = ctx.ctx().db()->getCollection( ns() );
NamespaceDetails* nsd = collection->details();
int idxNo = nsd->findIndexByKeyPattern(obj);
return collection->getIndexCatalog()->getDescriptor( idxNo );
}
示例3: compact
bool compact(const string& ns, string &errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
massert( 14028, "bad ns", NamespaceString::normal(ns.c_str()) );
massert( 14027, "can't compact a system namespace", !str::contains(ns, ".system.") ); // items in system.indexes cannot be moved there are pointers to those disklocs in NamespaceDetails
bool ok;
{
Lock::DBWrite lk(ns);
BackgroundOperation::assertNoBgOpInProgForNs(ns.c_str());
Client::Context ctx(ns);
NamespaceDetails *d = nsdetails(ns.c_str());
massert( 13660, str::stream() << "namespace " << ns << " does not exist", d );
massert( 13661, "cannot compact capped collection", !d->isCapped() );
log() << "compact " << ns << " begin" << endl;
if( pf != 0 || pb != 0 ) {
log() << "paddingFactor:" << pf << " paddingBytes:" << pb << endl;
}
try {
ok = _compact(ns.c_str(), d, errmsg, validate, result, pf, pb);
}
catch(...) {
log() << "compact " << ns << " end (with error)" << endl;
throw;
}
log() << "compact " << ns << " end" << endl;
}
return ok;
}
示例4: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
LOG(6) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
Lock::assertWriteLocked(ns);
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
// updates aren't allowed for indexes -- so we will do a regular insert. if index already
// exists, that is ok.
theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjectsForReplication(ns, o, o, true, false, false, debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
}
}
}
示例5: dupCheck
void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc) {
int z = d.nIndexesBeingBuilt();
for( int i = 0; i < z; i++ ) {
IndexDetails& idx = d.idx(i);
v[i].dupCheck(idx, curObjLoc);
}
}
示例6: setHead
void IndexCatalogEntry::setHead( DiskLoc newHead ) {
NamespaceDetails* nsd = _collection->detailsWritable();
int idxNo = _indexNo();
IndexDetails& id = nsd->idx( idxNo );
id.head.writing() = newHead;
_head = newHead;
}
示例7: touch
bool touch( std::string& ns,
std::string& errmsg,
bool touch_data,
bool touch_indexes,
BSONObjBuilder& result ) {
if (touch_data) {
log() << "touching namespace " << ns << endl;
touchNs( ns );
log() << "touching namespace " << ns << " complete" << endl;
}
if (touch_indexes) {
// enumerate indexes
std::vector< std::string > indexes;
{
Client::ReadContext ctx(ns);
NamespaceDetails *nsd = nsdetails(ns);
massert( 16153, "namespace does not exist", nsd );
NamespaceDetails::IndexIterator ii = nsd->ii();
while ( ii.more() ) {
IndexDetails& idx = ii.next();
indexes.push_back( idx.indexNamespace() );
}
}
for ( std::vector<std::string>::const_iterator it = indexes.begin();
it != indexes.end();
it++ ) {
touchNs( *it );
}
}
return true;
}
示例8: cc
shared_ptr<Cursor> NamespaceDetailsTransient::getCursor( const char *ns, const BSONObj &query, const BSONObj &order ) {
if ( query.isEmpty() && order.isEmpty() ) {
// TODO This will not use a covered index currently.
return theDataFileMgr.findAll( ns );
}
if ( isSimpleIdQuery( query ) ) {
Database *database = cc().database();
assert( database );
NamespaceDetails *d = database->namespaceIndex.details(ns);
if ( d ) {
int idxNo = d->findIdIndex();
if ( idxNo >= 0 ) {
IndexDetails& i = d->idx( idxNo );
BSONObj key = i.getKeyFromQuery( query );
return shared_ptr<Cursor>( BtreeCursor::make( d, idxNo, i, key, key, true, 1 ) );
}
}
}
auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false
shared_ptr<Cursor> single = mps->singleCursor();
if ( single ) {
if ( !query.isEmpty() && !single->matcher() ) {
shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, single->indexKeyPattern() ) );
single->setMatcher( matcher );
}
return single;
}
return newQueryOptimizerCursor( mps );
}
示例9: nsdetails
BSONObj Sync::getMissingDoc(const BSONObj& o) {
OplogReader missingObjReader;
const char *ns = o.getStringField("ns");
// capped collections
NamespaceDetails *nsd = nsdetails(ns);
if ( nsd && nsd->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
}
uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch(DBException& e) {
log() << "replication assertion fetching missing object: " << e.what() << endl;
throw;
}
return missingObj;
}
示例10: handle_system_collection_insert
static int handle_system_collection_insert(const char *ns, const BSONObj &obj, bool logop) {
// Trying to insert into a system collection. Fancy side-effects go here:
// TODO: see insert_checkSys
if (mongoutils::str::endsWith(ns, ".system.indexes")) {
// obj is something like this:
// { _id: ObjectId('511d34f6d3080c48017a14d0'), ns: "test.leif", key: { a: -1.0 }, name: "a_-1", unique: true }
const string &coll = obj["ns"].String();
NamespaceDetails *details = getAndMaybeCreateNS(coll.c_str(), logop);
BSONObj key = obj["key"].Obj();
int i = details->findIndexByKeyPattern(key);
if (i >= 0) {
return ASSERT_ID_DUPKEY;
} else {
details->createIndex(obj);
}
} else if (legalClientSystemNS(ns, true)) {
if (mongoutils::str::endsWith(ns, ".system.users")) {
uassert( 14051 , "system.users entry needs 'user' field to be a string", obj["user"].type() == String );
uassert( 14052 , "system.users entry needs 'pwd' field to be a string", obj["pwd"].type() == String );
uassert( 14053 , "system.users entry needs 'user' field to be non-empty", obj["user"].String().size() );
uassert( 14054 , "system.users entry needs 'pwd' field to be non-empty", obj["pwd"].String().size() );
}
} else {
uasserted(16459, str::stream() << "attempt to insert in system namespace '" << ns << "'");
}
return 0;
}
示例11: forward
/* get a table scan cursor, but can be forward or reverse direction.
order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
*/
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc) {
BSONElement el = order.getField("$natural"); // e.g., { $natural : -1 }
if ( el.number() >= 0 )
return DataFileMgr::findAll(ns, startLoc);
// "reverse natural order"
NamespaceDetails *d = nsdetails(ns);
if ( !d )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
if ( !d->isCapped() ) {
if ( !startLoc.isNull() )
return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
Extent *e = d->lastExtent().ext();
while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
e = e->getPrevExtent();
}
return shared_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
}
else {
return shared_ptr<Cursor>( new ReverseCappedCursor( d, startLoc ) );
}
}
示例12: ensureHaveIdIndex
void ensureHaveIdIndex(const char* ns, bool mayInterrupt) {
NamespaceDetails *d = nsdetails(ns);
if ( d == 0 || d->isSystemFlagSet(NamespaceDetails::Flag_HaveIdIndex) )
return;
d->setSystemFlag( NamespaceDetails::Flag_HaveIdIndex );
{
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
if( i.next().isIdIndex() )
return;
}
}
string system_indexes = cc().database()->name() + ".system.indexes";
BSONObjBuilder b;
b.append("name", "_id_");
b.append("ns", ns);
b.append("key", id_obj);
BSONObj o = b.done();
/* edge case: note the insert could fail if we have hit maxindexes already */
theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), mayInterrupt, true);
}
示例13: nsdetails
void Helpers::ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name) {
NamespaceDetails *d = nsdetails(ns);
if( d == 0 )
return;
{
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
if( i.next().keyPattern().woCompare(keyPattern) == 0 )
return;
}
}
if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n';
return;
}
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("name", name);
b.append("ns", ns);
b.append("key", keyPattern);
b.appendBool("unique", unique);
BSONObj o = b.done();
theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
}
示例14: emptyOplog
static void emptyOplog() {
writelock lk(rsoplog);
Client::Context ctx(rsoplog);
NamespaceDetails *d = nsdetails(rsoplog);
// temp
if( d && d->stats.nrecords == 0 )
return; // already empty, ok.
log(1) << "replSet empty oplog" << rsLog;
d->emptyCappedCollection(rsoplog);
/*
string errmsg;
bob res;
dropCollection(rsoplog, errmsg, res);
log() << "replSet recreated oplog so it is empty. todo optimize this..." << rsLog;
createOplog();*/
// TEMP: restart to recreate empty oplog
//log() << "replSet FATAL error during initial sync. mongod restart required." << rsLog;
//dbexit( EXIT_CLEAN );
/*
writelock lk(rsoplog);
Client::Context c(rsoplog, dbpath, 0, doauth/false);
NamespaceDetails *oplogDetails = nsdetails(rsoplog);
uassert(13412, str::stream() << "replSet error " << rsoplog << " is missing", oplogDetails != 0);
oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
*/
}
示例15: operator
void operator()(DBClientCursorBatchIterator &i) {
const string to_dbname = nsToDatabase(to_collection);
while (i.moreInCurrentBatch()) {
if (n % 128 == 127) {
time_t now = time(0);
if (now - lastLog >= 60) {
// report progress
if (lastLog) {
log() << "clone " << to_collection << ' ' << n << endl;
}
lastLog = now;
}
mayInterrupt(_mayBeInterrupted);
}
BSONObj js = i.nextSafe();
++n;
if (isindex) {
verify(strstr(from_collection, "system.indexes"));
storedForLater->push_back(fixindex(js, to_dbname).getOwned());
}
else {
try {
Client::ReadContext ctx(to_collection);
if (_isCapped) {
NamespaceDetails *d = nsdetails(to_collection);
verify(d->isCapped());
BSONObj pk = js["$_"].Obj();
BSONObjBuilder rowBuilder;
BSONObjIterator it(js);
while (it.moreWithEOO()) {
BSONElement e = it.next();
if (e.eoo()) {
break;
}
if (!mongoutils::str::equals(e.fieldName(), "$_")) {
rowBuilder.append(e);
}
}
BSONObj row = rowBuilder.obj();
d->insertObjectIntoCappedWithPK(pk, row, NamespaceDetails::NO_LOCKTREE);
}
else {
insertObject(to_collection, js, 0, logForRepl);
}
}
catch (UserException& e) {
error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
throw;
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
}