本文整理汇总了C++中BSONObj类的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj类的具体用法?C++ BSONObj怎么用?C++ BSONObj使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BSONObj类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ThreadReturnData
v8::Handle<v8::Value> ThreadReturnData(V8Scope* scope, const v8::Arguments& args) {
BSONObj data = thisConfig(scope, args)->returnData();
return scope->mongoToV8Element(data.firstElement(), true);
}
示例2: _renameSingleNamespace
Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
StringData fromNS,
StringData toNS,
bool stayTemp) {
Status s = _renameSingleNamespace(txn, fromNS, toNS, stayTemp);
if (!s.isOK())
return s;
NamespaceDetails* details = _namespaceIndex.details(toNS);
invariant(details);
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
auto cursor = systemIndexRecordStore->getCursor(txn);
while (auto record = cursor->next()) {
BSONObj oldIndexSpec = record->data.releaseToBson();
if (fromNS != oldIndexSpec["ns"].valuestrsafe())
continue;
BSONObj newIndexSpec;
{
BSONObjBuilder b;
BSONObjIterator i(oldIndexSpec);
while (i.more()) {
BSONElement e = i.next();
if (strcmp(e.fieldName(), "ns") != 0)
b.append(e);
else
b << "ns" << toNS;
}
newIndexSpec = b.obj();
}
StatusWith<RecordId> newIndexSpecLoc = systemIndexRecordStore->insertRecord(
txn, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
if (!newIndexSpecLoc.isOK())
return newIndexSpecLoc.getStatus();
const std::string& indexName = oldIndexSpec.getStringField("name");
{
// Fix the IndexDetails pointer.
int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(txn, indexName);
IndexDetails& indexDetails = details->idx(indexI);
*txn->recoveryUnit()->writing(&indexDetails.info) =
DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
}
{
// Move the underlying namespace.
std::string oldIndexNs = IndexDescriptor::makeIndexNamespace(fromNS, indexName);
std::string newIndexNs = IndexDescriptor::makeIndexNamespace(toNS, indexName);
Status s = _renameSingleNamespace(txn, oldIndexNs, newIndexNs, false);
if (!s.isOK())
return s;
}
systemIndexRecordStore->deleteRecord(txn, record->id);
}
return Status::OK();
}
示例3: is
BSONObj* DBController::readBSON(StreamType* stream) {
auto_ptr<BSONInputStream> is(new BSONInputStream(stream));
BSONObj* res = is->readBSON();
if (_logger->isDebug()) _logger->debug(3, "DBController read bson from disc: %s", res->toChar());
return res;
}
示例4: TEST
TEST(MatchExpressionParserTest, ParseIntegerElementToNonNegativeLongRejectsNegative) {
BSONObj query = BSON("" << -2LL);
ASSERT_NOT_OK(
MatchExpressionParser::parseIntegerElementToNonNegativeLong(query.firstElement()));
}
示例5: operator
void operator()( DBClientCursorBatchIterator &i ) {
mongolock l( true );
if ( context ) {
context->relocked();
}
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
time_t now = time(0);
if( now - lastLog >= 60 ) {
// report progress
if( lastLog )
log() << "clone " << to_collection << ' ' << n << endl;
lastLog = now;
}
mayInterrupt( _mayBeInterrupted );
dbtempreleaseif t( _mayYield );
}
BSONObj tmp = i.nextSafe();
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
ss << "Cloner: skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
ss << " firstElement: " << e;
}
catch( ... ) {
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
assert( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
}
try {
theDataFileMgr.insertWithObjMod(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
getDur().commitIfNeeded();
}
catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
示例6: run
bool run( const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result,
bool ) {
string ns;
if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
return false;
}
if ( ns.size() == 0 ) {
errmsg = "no namespace specified";
return false;
}
vector<BSONObj> bounds;
if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
return false;
}
if ( bounds.size() == 0 ) {
errmsg = "no bounds were specified";
return false;
}
if ( bounds.size() != 2 ) {
errmsg = "only a min and max bound may be specified";
return false;
}
BSONObj minKey = bounds[0];
BSONObj maxKey = bounds[1];
if ( minKey.isEmpty() ) {
errmsg = "no min key specified";
return false;
}
if ( maxKey.isEmpty() ) {
errmsg = "no max key specified";
return false;
}
//
// This might be the first call from mongos, so we may need to pass the config and shard
// information to initialize the shardingState.
//
string config;
FieldParser::FieldState extracted = FieldParser::extract( cmdObj,
configField,
&config,
&errmsg );
if ( !extracted ) return false;
if ( extracted != FieldParser::FIELD_NONE ) {
ShardingState::initialize( config );
}
else if ( !shardingState.enabled() ) {
errmsg =
"sharding state must be enabled or config server specified to merge chunks";
return false;
}
// ShardName is optional, but might not be set yet
string shardName;
extracted = FieldParser::extract( cmdObj, shardNameField, &shardName, &errmsg );
if ( !extracted ) return false;
if ( extracted != FieldParser::FIELD_NONE ) {
shardingState.gotShardName( shardName );
}
//
// Epoch is optional, and if not set indicates we should use the latest epoch
//
OID epoch;
if ( !FieldParser::extract( cmdObj, epochField, &epoch, &errmsg ) ) {
return false;
}
return mergeChunks( NamespaceString( ns ), minKey, maxKey, epoch, true, &errmsg );
}
示例7: reqSemTake
/* ****************************************************************************
*
* mongoUnsubscribeContextAvailability -
*/
HttpStatusCode mongoUnsubscribeContextAvailability
(
UnsubscribeContextAvailabilityRequest* requestP,
UnsubscribeContextAvailabilityResponse* responseP,
const std::string& tenant
)
{
bool reqSemTaken;
std::string err;
reqSemTake(__FUNCTION__, "ngsi9 unsubscribe request", SemWriteOp, &reqSemTaken);
LM_T(LmtMongo, ("Unsubscribe Context Availability"));
/* No matter if success or failure, the subscriptionId in the response is always the one
* in the request */
responseP->subscriptionId = requestP->subscriptionId;
/* Look for document */
BSONObj sub;
OID id;
if (!safeGetSubId(requestP->subscriptionId, &id, &(responseP->statusCode)))
{
reqSemGive(__FUNCTION__, "ngsi9 unsubscribe request (safeGetSubId fail)", reqSemTaken);
if (responseP->statusCode.code == SccContextElementNotFound)
{
// FIXME: doubt: invalid OID format? Or, subscription not found?
std::string details = std::string("invalid OID format: '") + requestP->subscriptionId.get() + "'";
alarmMgr.badInput(clientIp, details);
}
else // SccReceiverInternalError
{
LM_E(("Runtime Error (exception getting OID: %s)", responseP->statusCode.details.c_str()));
}
return SccOk;
}
if (!collectionFindOne(getSubscribeContextAvailabilityCollectionName(tenant), BSON("_id" << id), &sub, &err))
{
reqSemGive(__FUNCTION__, "ngsi9 unsubscribe request (mongo db exception)", reqSemTaken);
responseP->statusCode.fill(SccReceiverInternalError, err);
return SccOk;
}
alarmMgr.dbErrorReset();
if (sub.isEmpty())
{
responseP->statusCode.fill(SccContextElementNotFound);
reqSemGive(__FUNCTION__, "ngsi9 unsubscribe request (no subscriptions)", reqSemTaken);
return SccOk;
}
/* Remove document in MongoDB */
// FIXME: I would prefer to do the find and remove in a single operation. Is the some similar
// to findAndModify for this?
if (!collectionRemove(getSubscribeContextAvailabilityCollectionName(tenant), BSON("_id" << OID(requestP->subscriptionId.get())), &err))
{
reqSemGive(__FUNCTION__, "ngsi9 unsubscribe request (mongo db exception)", reqSemTaken);
responseP->statusCode.fill(SccReceiverInternalError, err);
return SccOk;
}
reqSemGive(__FUNCTION__, "ngsi9 unsubscribe request", reqSemTaken);
responseP->statusCode.fill(SccOk);
return SccOk;
}
示例8: update
UpdateResult update(UpdateRequest& request, UpdateDriver* driver) {
const NamespaceString& nsString = request.getNamespaceString();
validateUpdate( nsString.ns().c_str(), request.getUpdates(), request.getQuery() );
NamespaceDetails* nsDetails = nsdetails( nsString.ns() );
NamespaceDetailsTransient* nsDetailsTransient =
&NamespaceDetailsTransient::get( nsString.ns().c_str() );
OpDebug& debug = request.getDebug();
// TODO: This seems a bit circuitious.
debug.updateobj = request.getUpdates();
driver->refreshIndexKeys( nsDetailsTransient->indexKeys() );
shared_ptr<Cursor> cursor = getOptimizedCursor(
nsString.ns(), request.getQuery(), BSONObj(), request.getQueryPlanSelectionPolicy() );
// If the update was marked with '$isolated' (a.k.a '$atomic'), we are not allowed to
// yield while evaluating the update loop below.
//
// TODO: Old code checks this repeatedly within the update loop. Is that necessary? It seems
// that once atomic should be always atomic.
const bool isolated =
cursor->ok() &&
cursor->matcher() &&
cursor->matcher()->docMatcher().atomic();
// The 'cursor' the optimizer gave us may contain query plans that generate duplicate
// diskloc's. We set up here the mechanims that will prevent us from processing those
// twice if we see them. We also set up a 'ClientCursor' so that we can support
// yielding.
//
// TODO: Is it valid to call this on a non-ok cursor?
const bool dedupHere = cursor->autoDedup();
//
// We'll start assuming we have one or more documents for this update. (Othwerwise,
// we'll fallback to upserting.)
//
// We record that this will not be an upsert, in case a mod doesn't want to be applied
// when in strict update mode.
driver->setContext( ModifierInterface::ExecInfo::UPDATE_CONTEXT );
// Let's fetch each of them and pipe them through the update expression, making sure to
// keep track of the necessary stats. Recall that we'll be pulling documents out of
// cursors and some of them do not deduplicate the entries they generate. We have
// deduping logic in here, too -- for now.
unordered_set<DiskLoc, DiskLoc::Hasher> seenLocs;
int numMatched = 0;
debug.nscanned = 0;
Client& client = cc();
mutablebson::Document doc;
// If we are going to be yielding, we will need a ClientCursor scoped to this loop. We
// only loop as long as the underlying cursor is OK.
for ( auto_ptr<ClientCursor> clientCursor; cursor->ok(); ) {
// If we haven't constructed a ClientCursor, and if the client allows us to throw
// page faults, and if we are referring to a location that is likely not in
// physical memory, then throw a PageFaultException. The entire operation will be
// restarted.
if ( clientCursor.get() == NULL &&
client.allowedToThrowPageFaultException() &&
!cursor->currLoc().isNull() &&
!cursor->currLoc().rec()->likelyInPhysicalMemory() ) {
// We should never throw a PFE if we have already updated items.
dassert((numMatched == 0) || (numMatched == debug.nupdateNoops));
throw PageFaultException( cursor->currLoc().rec() );
}
if ( !isolated && debug.nscanned != 0 ) {
// We are permitted to yield. To do so we need a ClientCursor, so create one
// now if we have not yet done so.
if ( !clientCursor.get() )
clientCursor.reset(
new ClientCursor( QueryOption_NoCursorTimeout, cursor, nsString.ns() ) );
// Ask the client cursor to yield. We get two bits of state back: whether or not
// we yielded, and whether or not we correctly recovered from yielding.
bool yielded = false;
const bool recovered = clientCursor->yieldSometimes(
ClientCursor::WillNeed, &yielded );
if ( !recovered ) {
// If we failed to recover from the yield, then the ClientCursor is already
// gone. Release it so we don't destroy it a second time.
clientCursor.release();
break;
}
if ( !cursor->ok() ) {
// If the cursor died while we were yielded, just get out of the update loop.
break;
//.........这里部分代码省略.........
示例9: clear
// static
Status ClearFilters::clear(OperationContext* txn,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
const BSONObj& cmdObj) {
invariant(querySettings);
// According to the specification, the planCacheClearFilters command runs in two modes:
// - clear all hints; or
// - clear hints for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
querySettings->removeAllowedIndices(planCache->computeKey(*cq));
// Remove entry from plan cache
planCache->remove(*cq);
LOG(0) << "Removed index filter on " << ns << " " << cq->toStringShort();
return Status::OK();
}
// If query is not provided, make sure sort and projection are not in arguments.
// We do not want to clear the entire cache inadvertently when the user
// forgot to provide a value for "query".
if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
return Status(ErrorCodes::BadValue, "sort or projection provided without query");
}
// Get entries from query settings. We need to remove corresponding entries from the plan
// cache shortly.
OwnedPointerVector<AllowedIndexEntry> entries;
entries.mutableVector() = querySettings->getAllAllowedIndices();
// OK to proceed with clearing entire cache.
querySettings->clearAllowedIndices();
const NamespaceString nss(ns);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
// Remove corresponding entries from plan cache.
// Admin hints affect the planning process directly. If there were
// plans generated as a result of applying index filter, these need to be
// invalidated. This allows the planner to re-populate the plan cache with
// non-filtered indexed solutions next time the query is run.
// Resolve plan cache key from (query, sort, projection) in query settings entry.
// Concurrency note: There's no harm in removing plan cache entries one at at time.
// Only way that PlanCache::remove() can fail is when the query shape has been removed from
// the cache by some other means (re-index, collection info reset, ...). This is OK since
// that's the intended effect of calling the remove() function with the key from the hint entry.
for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
AllowedIndexEntry* entry = *i;
invariant(entry);
// Create canonical query.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(entry->query);
qr->setSort(entry->sort);
qr->setProj(entry->projection);
auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
invariantOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Remove plan cache entry.
planCache->remove(*cq);
}
LOG(0) << "Removed all index filters for collection: " << ns;
return Status::OK();
}
示例10: handle_modify
void handle_modify(DBClientBase *client, DBOperation *operation)
{
// First, we have to format our findandmodify.
BSONObjBuilder sets;
bool has_sets = false;
BSONObjBuilder unsets;
bool has_unsets = false;
for(auto it = operation->set_fields().begin();
it != operation->set_fields().end(); ++it) {
stringstream fieldname;
fieldname << "fields." << it->first->get_name();
if(it->second.empty()) {
unsets << fieldname.str() << true;
has_unsets = true;
} else {
DatagramPtr dg = Datagram::create();
dg->add_data(it->second);
DatagramIterator dgi(dg);
sets << fieldname.str() << bamboo2bson(it->first->get_type(), dgi)["_"];
has_sets = true;
}
}
BSONObjBuilder updates_b;
if(has_sets) {
updates_b << "$set" << sets.obj();
}
if(has_unsets) {
updates_b << "$unset" << unsets.obj();
}
BSONObj updates = updates_b.obj();
// Also format any criteria for the change:
BSONObjBuilder query_b;
query_b << "_id" << operation->doid();
for(auto it = operation->criteria_fields().begin();
it != operation->criteria_fields().end(); ++it) {
stringstream fieldname;
fieldname << "fields." << it->first->get_name();
if(it->second.empty()) {
query_b << fieldname.str() << BSON("$exists" << false);
} else {
DatagramPtr dg = Datagram::create();
dg->add_data(it->second);
DatagramIterator dgi(dg);
query_b << fieldname.str() << bamboo2bson(it->first->get_type(), dgi)["_"];
}
}
BSONObj query = query_b.obj();
m_log->trace() << "Performing updates to " << operation->doid()
<< ": " << updates << endl;
m_log->trace() << "Query is: " << query << endl;
BSONObj result;
bool success;
try {
success = client->runCommand(
m_db,
BSON("findandmodify" << "astron.objects"
<< "query" << query
<< "update" << updates),
result);
} catch(mongo::DBException &e) {
m_log->error() << "Unexpected error while modifying "
<< operation->doid() << ": " << e.what() << endl;
operation->on_failure();
return;
}
m_log->trace() << "Update result: " << result << endl;
BSONObj obj;
if(!success || result["value"].isNull()) {
// Okay, something didn't work right. If we had criteria, let's
// try to fetch the object without the criteria to see if it's a
// criteria mismatch or a missing DOID.
if(!operation->criteria_fields().empty()) {
try {
obj = client->findOne(m_obj_collection,
BSON("_id" << operation->doid()));
} catch(mongo::DBException &e) {
m_log->error() << "Unexpected error while modifying "
<< operation->doid() << ": " << e.what() << endl;
operation->on_failure();
return;
}
if(!obj.isEmpty()) {
// There's the problem. Now we can send back a snapshot:
DBObjectSnapshot *snap = format_snapshot(operation->doid(), obj);
if(snap && operation->verify_class(snap->m_dclass)) {
operation->on_criteria_mismatch(snap);
return;
} else {
// Something else weird happened with our snapshot;
// either the class wasn't recognized or it was the
// wrong class. Either way, an error has been logged,
// and we need to fail the operation.
operation->on_failure();
return;
//.........这里部分代码省略.........
示例11: operator
void operator()( DBClientCursorBatchIterator &i ) {
Lock::GlobalWrite lk;
if ( context ) {
context->relocked();
}
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
time_t now = time(0);
if( now - lastLog >= 60 ) {
// report progress
if( lastLog )
log() << "clone " << to_collection << ' ' << n << endl;
lastLog = now;
}
mayInterrupt( _mayBeInterrupted );
dbtempreleaseif t( _mayYield );
}
BSONObj tmp = i.nextSafe();
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
ss << "Cloner: skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
ss << " firstElement: " << e;
}
catch( ... ) {
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
verify( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
}
try {
// add keys for presorting
DiskLoc loc = theDataFileMgr.insertWithObjMod(to_collection, js);
loc.assertOk();
if (_sortersForIndex != NULL) {
// add key to SortersForNS
for (SortersForIndex::iterator iSorter = _sortersForIndex->begin();
iSorter != _sortersForIndex->end();
++iSorter) {
iSorter->second.preSortPhase.addKeys(iSorter->second.spec, js,
loc, false);
}
}
if ( logForRepl )
logOp("i", to_collection, js);
getDur().commitIfNeeded();
}
catch( UserException& e ) {
error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
throw;
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
示例12: extractNonFTSKeyElement
void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet* keys) {
int extraSize = 0;
vector<BSONElement> extrasBefore;
vector<BSONElement> extrasAfter;
// Compute the non FTS key elements for the prefix.
for (unsigned i = 0; i < spec.numExtraBefore(); i++) {
auto indexedElement = extractNonFTSKeyElement(obj, spec.extraBefore(i));
extrasBefore.push_back(indexedElement);
extraSize += indexedElement.size();
}
// Compute the non FTS key elements for the suffix.
for (unsigned i = 0; i < spec.numExtraAfter(); i++) {
auto indexedElement = extractNonFTSKeyElement(obj, spec.extraAfter(i));
extrasAfter.push_back(indexedElement);
extraSize += indexedElement.size();
}
TermFrequencyMap term_freqs;
spec.scoreDocument(obj, &term_freqs);
// create index keys from raw scores
// only 1 per string
// TODO SERVER-36440: Completely remove this limit in 4.3.
if (serverGlobalParams.featureCompatibility.isVersionInitialized() &&
serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16732,
str::stream() << "too many unique keys for a single document to"
<< " have a text index, max is "
<< term_freqs.size()
<< obj["_id"],
term_freqs.size() <= 400000);
}
long long keyBSONSize = 0;
const int MaxKeyBSONSizeMB = 4;
for (TermFrequencyMap::const_iterator i = term_freqs.begin(); i != term_freqs.end(); ++i) {
const string& term = i->first;
double weight = i->second;
// guess the total size of the btree entry based on the size of the weight, term tuple
int guess = 5 /* bson overhead */ + 10 /* weight */ + 8 /* term overhead */ +
/* term size (could be truncated/hashed) */
guessTermSize(term, spec.getTextIndexVersion()) + extraSize;
BSONObjBuilder b(guess); // builds a BSON object with guess length.
for (unsigned k = 0; k < extrasBefore.size(); k++) {
b.appendAs(extrasBefore[k], "");
}
_appendIndexKey(b, weight, term, spec.getTextIndexVersion());
for (unsigned k = 0; k < extrasAfter.size(); k++) {
b.appendAs(extrasAfter[k], "");
}
BSONObj res = b.obj();
verify(guess >= res.objsize());
keys->insert(res);
keyBSONSize += res.objsize();
// TODO SERVER-36440: Completely remove this limit in 4.3.
if (serverGlobalParams.featureCompatibility.isVersionInitialized() &&
serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16733,
str::stream() << "trying to index text where term list is too big, max is "
<< MaxKeyBSONSizeMB
<< "mb "
<< obj["_id"],
keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
}
}
}
示例13: getUpdateExprType
Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
const BatchedUpdateDocument& updateDoc,
vector<ShardEndpoint*>* endpoints) const {
//
// Update targeting may use either the query or the update. This is to support save-style
// updates, of the form:
//
// coll.update({ _id : xxx }, { _id : xxx, shardKey : 1, foo : bar }, { upsert : true })
//
// Because drivers do not know the shard key, they can't pull the shard key automatically
// into the query doc, and to correctly support upsert we must target a single shard.
//
// The rule is simple - If the update is replacement style (no '$set'), we target using the
// update. If the update is replacement style, we target using the query.
//
// If we have the exact shard key in either the query or replacement doc, we target using
// that extracted key.
//
BSONObj query = updateDoc.getQuery();
BSONObj updateExpr = updateDoc.getUpdateExpr();
UpdateType updateType = getUpdateExprType(updateDoc.getUpdateExpr());
if (updateType == UpdateType_Unknown) {
return Status(ErrorCodes::UnsupportedFormat,
stream() << "update document " << updateExpr
<< " has mixed $operator and non-$operator style fields");
}
BSONObj shardKey;
if (_manager) {
//
// Sharded collections have the following futher requirements for targeting:
//
// Upserts must be targeted exactly by shard key.
// Non-multi updates must be targeted exactly by shard key *or* exact _id.
//
// Get the shard key
if (updateType == UpdateType_OpStyle) {
// Target using the query
StatusWith<BSONObj> status =
_manager->getShardKeyPattern().extractShardKeyFromQuery(query);
// Bad query
if (!status.isOK())
return status.getStatus();
shardKey = status.getValue();
} else {
// Target using the replacement document
shardKey = _manager->getShardKeyPattern().extractShardKeyFromDoc(updateExpr);
}
//
// Extra sharded update validation
//
if (updateDoc.getUpsert()) {
// Sharded upserts *always* need to be exactly targeted by shard key
if (shardKey.isEmpty()) {
return Status(ErrorCodes::ShardKeyNotFound,
stream() << "upsert " << updateDoc.toBSON()
<< " does not contain shard key for pattern "
<< _manager->getShardKeyPattern().toString());
}
// Also check shard key size on upsert
Status status = ShardKeyPattern::checkShardKeySize(shardKey);
if (!status.isOK())
return status;
}
// Validate that single (non-multi) sharded updates are targeted by shard key or _id
if (!updateDoc.getMulti() && shardKey.isEmpty() && !isExactIdQuery(updateDoc.getQuery())) {
return Status(ErrorCodes::ShardKeyNotFound,
stream() << "update " << updateDoc.toBSON()
<< " does not contain _id or shard key for pattern "
<< _manager->getShardKeyPattern().toString());
}
}
// Target the shard key, query, or replacement doc
if (!shardKey.isEmpty()) {
// We can't rely on our query targeting to be exact
ShardEndpoint* endpoint = NULL;
Status result =
targetShardKey(txn, shardKey, (query.objsize() + updateExpr.objsize()), &endpoint);
endpoints->push_back(endpoint);
return result;
} else if (updateType == UpdateType_OpStyle) {
return targetQuery(query, endpoints);
} else {
return targetDoc(updateExpr, endpoints);
}
}
示例14: _repairExtent
DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ) {
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ) {
error() << "invalid extent ofs: " << eLoc.getOfs() << endl;
return DiskLoc();
}
MongoDataFile * mdf = db->getFile( eLoc.a() );
Extent * e = mdf->debug_getExtent( eLoc );
if ( ! e->isOk() ) {
warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
}
log() << "length:" << e->length << endl;
LogIndentLevel lil2;
set<DiskLoc> seen;
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ) {
if ( ! seen.insert( loc ).second ) {
error() << "infinite loop in extend, seen: " << loc << " before" << endl;
break;
}
if ( loc.getOfs() <= 0 ) {
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
log(1) << loc << endl;
Record* rec = loc.rec();
BSONObj obj;
try {
obj = loc.obj();
assert( obj.valid() );
LOG(1) << obj << endl;
w( obj );
}
catch ( std::exception& e ) {
log() << "found invalid document @ " << loc << " " << e.what() << endl;
if ( ! obj.isEmpty() ) {
try {
BSONElement e = obj.firstElement();
stringstream ss;
ss << "first element: " << e;
log() << ss.str();
}
catch ( std::exception& ) {
}
}
}
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
}
return forward ? e->xnext : e->xprev;
}
示例15: run
int run() {
if ( hasParam( "repair" ) ) {
warning() << "repair is a work in progress" << endl;
return repair();
}
{
string q = getParam("query");
if ( q.size() )
_query = fromjson( q );
}
string opLogName = "";
unsigned long long opLogStart = 0;
if (hasParam("oplog")) {
if (hasParam("query") || hasParam("db") || hasParam("collection")) {
cout << "oplog mode is only supported on full dumps" << endl;
return -1;
}
BSONObj isMaster;
conn("true").simpleCommand("admin", &isMaster, "isMaster");
if (isMaster.hasField("hosts")) { // if connected to replica set member
opLogName = "local.oplog.rs";
}
else {
opLogName = "local.oplog.$main";
if ( ! isMaster["ismaster"].trueValue() ) {
cout << "oplog mode is only supported on master or replica set member" << endl;
return -1;
}
}
auth("local");
BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
if (op.isEmpty()) {
cout << "No operations in oplog. Please ensure you are connecting to a master." << endl;
return -1;
}
assert(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
// check if we're outputting to stdout
string out = getParam("out");
if ( out == "-" ) {
if ( _db != "" && _coll != "" ) {
writeCollectionStdout( _db+"."+_coll );
return 0;
}
else {
cout << "You must specify database and collection to print to stdout" << endl;
return -1;
}
}
_usingMongos = isMongos();
path root( out );
string db = _db;
if ( db == "" ) {
cout << "all dbs" << endl;
auth( "admin" );
BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
if ( ! res["databases"].isABSONObj() ) {
error() << "output of listDatabases isn't what we expected, no 'databases' field:\n" << res << endl;
return -2;
}
BSONObj dbs = res["databases"].embeddedObjectUserCheck();
set<string> keys;
dbs.getFieldNames( keys );
for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
string key = *i;
if ( ! dbs[key].isABSONObj() ) {
error() << "database field not an object key: " << key << " value: " << dbs[key] << endl;
return -3;
}
BSONObj dbobj = dbs[key].embeddedObjectUserCheck();
const char * dbName = dbobj.getField( "name" ).valuestr();
if ( (string)dbName == "local" )
continue;
go ( dbName , root / dbName );
}
}
else {
auth( db );
go( db , root / db );
}
//.........这里部分代码省略.........