本文整理汇总了C++中BSONObj::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::begin方法的具体用法?C++ BSONObj::begin怎么用?C++ BSONObj::begin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::begin方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: skipPrefix
// Skip the key comprised of the first k fields of currentKey and the
// rest set to max/min key for direction > 0 or < 0 respectively.
void IndexCursor::skipPrefix(const BSONObj &key, const int k) {
TOKULOG(3) << "skipPrefix skipping first " << k << " elements in key " << key << endl;
BSONObjBuilder b(key.objsize());
BSONObjIterator it = key.begin();
const int nFields = key.nFields();
for ( int i = 0; i < nFields; i++ ) {
if ( i < k ) {
b.append( it.next() );
} else {
if ( _ordering.descending( 1 << i ) ) {
// Descending sort order, so min key skips forward.
forward() ? b.appendMinKey( "" ) : b.appendMaxKey( "" );
} else {
// Regular ascending order. Max key skips forward.
forward() ? b.appendMaxKey( "" ) : b.appendMinKey( "" );
}
}
}
// This differs from findKey in that we set PK to max to move forward and min
// to move backward, resulting in a "skip" of the key prefix, not a "find".
const bool isSecondary = !_cl->isPKIndex(_idx);
const BSONObj &pk = forward() ? maxKey : minKey;
setPosition( b.done(), isSecondary ? pk : BSONObj() );
}
示例2: BSONInputStream
/*
* Executes the migration of the indexes format that were saved before 0.3
**/
void DBController::migrateIndex0_3(const char* db, const char* ns, InputStream* stream, IndexAlgorithm* impl) {
long currentPos = stream->currentPos();
stream->seek(0);
int records = 0;
BSONInputStream* bis = new BSONInputStream(stream);
while (!stream->eof()) {
BSONObj* obj = bis->readBSON();
if (!impl) {
std::set<std::string> skeys;
for (BSONObj::const_iterator i = obj->begin(); i != obj->end(); i++) {
std::string key = i->first;
skeys.insert(key);
}
}
long indexPos = stream->readLong();
long posData = stream->readLong();
if (obj->has("_id")) {
insertIndex(db, ns, obj, posData);
records++;
}
delete obj;
}
stream->close();
if (_logger->isInfo()) _logger->info("db: %s, ns: %s, Index migrated to version 0.3. Records: %d", db, ns, records);
delete bis;
}
示例3: compoundObjectResponse
/* ****************************************************************************
*
* compoundObjectResponse -
*/
void compoundObjectResponse(orion::CompoundValueNode* cvP, const BSONElement& be)
{
BSONObj obj = be.embeddedObject();
cvP->valueType = orion::ValueTypeObject;
for (BSONObj::iterator i = obj.begin(); i.more();)
{
BSONElement e = i.next();
addCompoundNode(cvP, e);
}
}
示例4: mongoHeader
void MongoListModel::mongoHeader() const
{
BSONObj objOne = mongoDatabase->findOne("mongolab_database.random_data",Query());
mongoHeaderDataList.clear();
for ( BSONObj::iterator iter = objOne.begin(); iter.more(); )
{
BSONElement element = iter.next();
QString key = QString::fromStdString( element.fieldName() );
if ( key == "_id" ) continue;
mongoHeaderDataList.append(key);
}
}
示例5: writeBSON
void BSONOutputStream::writeBSON(const BSONObj& bson) {
Logger* log = getLogger(NULL);
if (log->isDebug()) log->debug("BSONOutputStream::writeBSON bson elements: %d", bson.length());
_outputStream->writeLong(bson.length());
for (std::map<t_keytype, BSONContent* >::const_iterator i = bson.begin(); i != bson.end(); i++) {
t_keytype key = i->first;
if (log->isDebug()) log->debug("BSONOutputStream::writeBSON name: %s", key.c_str());
_outputStream->writeString(key);
BSONContent* cont = i->second;
// If the type is PTRCHAR_TYPE change it to string_type, to remove this type in future
_outputStream->writeLong(cont->type() != PTRCHAR_TYPE? cont->type(): STRING_TYPE);
char* text;
BSONObj* inner;
switch (cont->type()) {
case BSON_TYPE:
inner = (BSONObj*)cont->_element;
writeBSON(*inner);
break;
case INT_TYPE:
_outputStream->writeInt(*((int*)cont->_element));
break;
case LONG_TYPE:
_outputStream->writeLong(*((long*)cont->_element));
break;
case DOUBLE_TYPE:
_outputStream->writeDoubleIEEE(*((double*)cont->_element));
break;
case PTRCHAR_TYPE:
text = (char*)cont->_element;
_outputStream->writeString(std::string(text));
break;
case STRING_TYPE:
{
string* str = (string*)cont->_element;
_outputStream->writeString(*str);
break;
}
case BSONARRAY_TYPE:
{
BSONArrayObj* array = (BSONArrayObj*)cont->_element;
writeBSONArray(array);
break;
}
}
}
delete log;
}
示例6: DBObjectSnapshot
// Get a DBObjectSnapshot from a MongoDB BSON object; returns NULL if failure.
DBObjectSnapshot *format_snapshot(doid_t doid, const BSONObj &obj)
{
m_log->trace() << "Formatting database snapshot of " << doid << ": "
<< obj << endl;
try {
string dclass_name = obj["dclass"].String();
const dclass::Class *dclass = g_dcf->get_class_by_name(dclass_name);
if(!dclass) {
m_log->error() << "Encountered unknown database object: "
<< dclass_name << "(" << doid << ")" << endl;
return NULL;
}
BSONObj fields = obj["fields"].Obj();
DBObjectSnapshot *snap = new DBObjectSnapshot();
snap->m_dclass = dclass;
for(auto it = fields.begin(); it.more(); ++it) {
const char *name = (*it).fieldName();
const dclass::Field *field = dclass->get_field_by_name(name);
if(!field) {
m_log->warning() << "Encountered unexpected field " << name
<< " while formatting " << dclass_name
<< "(" << doid << "); ignored." << endl;
continue;
}
{
DatagramPtr dg = Datagram::create();
bson2bamboo(field->get_type(), *it, *dg);
snap->m_fields[field].resize(dg->size());
memcpy(snap->m_fields[field].data(), dg->get_data(), dg->size());
}
}
return snap;
} catch(mongo::DBException &e) {
m_log->error() << "Unexpected error while trying to format"
" database snapshot for " << doid << ": "
<< e.what() << endl;
return NULL;
}
}
示例7: _initAndListen
ExitCode _initAndListen(int listenPort) {
Client::initThread("initandlisten");
_initWireSpec();
auto globalServiceContext = getGlobalServiceContext();
globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10)));
globalServiceContext->setOpObserver(stdx::make_unique<OpObserver>());
DBDirectClientFactory::get(globalServiceContext)
.registerImplementation([](OperationContext* txn) {
return std::unique_ptr<DBClientBase>(new DBDirectClient(txn));
});
const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();
{
ProcessId pid = ProcessId::getCurrent();
LogstreamBuilder l = log(LogComponent::kControl);
l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
<< " dbpath=" << storageGlobalParams.dbpath;
if (replSettings.isMaster())
l << " master=" << replSettings.isMaster();
if (replSettings.isSlave())
l << " slave=" << (int)replSettings.isSlave();
const bool is32bit = sizeof(int*) == 4;
l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl;
}
DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;
#if defined(_WIN32)
VersionInfoInterface::instance().logTargetMinOS();
#endif
logProcessDetails();
checked_cast<ServiceContextMongoD*>(getGlobalServiceContext())->createLockFile();
transport::TransportLayerLegacy::Options options;
options.port = listenPort;
options.ipList = serverGlobalParams.bind_ip;
auto sep =
stdx::make_unique<ServiceEntryPointMongod>(getGlobalServiceContext()->getTransportLayer());
auto sepPtr = sep.get();
getGlobalServiceContext()->setServiceEntryPoint(std::move(sep));
// Create, start, and attach the TL
auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>(options, sepPtr);
auto res = transportLayer->setup();
if (!res.isOK()) {
error() << "Failed to set up listener: " << res;
return EXIT_NET_ERROR;
}
std::shared_ptr<DbWebServer> dbWebServer;
if (serverGlobalParams.isHttpInterfaceEnabled) {
dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip,
serverGlobalParams.port + 1000,
getGlobalServiceContext(),
new RestAdminAccess()));
if (!dbWebServer->setupSockets()) {
error() << "Failed to set up sockets for HTTP interface during startup.";
return EXIT_NET_ERROR;
}
}
getGlobalServiceContext()->initializeGlobalStorageEngine();
#ifdef MONGO_CONFIG_WIREDTIGER_ENABLED
if (WiredTigerCustomizationHooks::get(getGlobalServiceContext())->restartRequired()) {
exitCleanly(EXIT_CLEAN);
}
#endif
// Warn if we detect configurations for multiple registered storage engines in
// the same configuration file/environment.
if (serverGlobalParams.parsedOpts.hasField("storage")) {
BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
invariant(storageElement.isABSONObj());
BSONObj storageParamsObj = storageElement.Obj();
BSONObjIterator i = storageParamsObj.begin();
while (i.more()) {
BSONElement e = i.next();
// Ignore if field name under "storage" matches current storage engine.
if (storageGlobalParams.engine == e.fieldName()) {
continue;
}
// Warn if field name matches non-active registered storage engine.
if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
warning() << "Detected configuration for non-active storage engine "
<< e.fieldName() << " when current storage engine is "
<< storageGlobalParams.engine;
}
}
}
//.........这里部分代码省略.........
示例8: cmdElement
intrusive_ptr<Pipeline> Pipeline::parseCommand(
string &errmsg, BSONObj &cmdObj,
const intrusive_ptr<ExpressionContext> &pCtx) {
intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
vector<BSONElement> pipeline;
/* gather the specification for the aggregation */
for(BSONObj::iterator cmdIterator = cmdObj.begin();
cmdIterator.more(); ) {
BSONElement cmdElement(cmdIterator.next());
const char *pFieldName = cmdElement.fieldName();
// ignore top-level fields prefixed with $. They are for the command processor, not us.
if (pFieldName[0] == '$') {
continue;
}
/* look for the aggregation command */
if (!strcmp(pFieldName, commandName)) {
pPipeline->collectionName = cmdElement.String();
continue;
}
/* check for the collection name */
if (!strcmp(pFieldName, pipelineName)) {
pipeline = cmdElement.Array();
continue;
}
/* check for explain option */
if (!strcmp(pFieldName, explainName)) {
pPipeline->explain = cmdElement.Bool();
continue;
}
/* if the request came from the router, we're in a shard */
if (!strcmp(pFieldName, fromRouterName)) {
pCtx->setInShard(cmdElement.Bool());
continue;
}
/* check for debug options */
if (!strcmp(pFieldName, splitMongodPipelineName)) {
pPipeline->splitMongodPipeline = true;
continue;
}
/* we didn't recognize a field in the command */
ostringstream sb;
sb <<
"unrecognized field \"" <<
cmdElement.fieldName();
errmsg = sb.str();
return intrusive_ptr<Pipeline>();
}
/*
If we get here, we've harvested the fields we expect for a pipeline.
Set up the specified document source pipeline.
*/
SourceContainer& sources = pPipeline->sources; // shorthand
/* iterate over the steps in the pipeline */
const size_t nSteps = pipeline.size();
for(size_t iStep = 0; iStep < nSteps; ++iStep) {
/* pull out the pipeline element as an object */
BSONElement pipeElement(pipeline[iStep]);
uassert(15942, str::stream() << "pipeline element " <<
iStep << " is not an object",
pipeElement.type() == Object);
BSONObj bsonObj(pipeElement.Obj());
// Parse a pipeline stage from 'bsonObj'.
uassert(16435, "A pipeline stage specification object must contain exactly one field.",
bsonObj.nFields() == 1);
BSONElement stageSpec = bsonObj.firstElement();
const char* stageName = stageSpec.fieldName();
// Create a DocumentSource pipeline stage from 'stageSpec'.
StageDesc key;
key.pName = stageName;
const StageDesc* pDesc = (const StageDesc*)
bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
stageDescCmp);
uassert(16436,
str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
pDesc);
intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx);
verify(stage);
stage->setPipelineStep(iStep);
sources.push_back(stage);
}
/* if there aren't any pipeline stages, there's nothing more to do */
if (sources.empty())
return pPipeline;
/*
//.........这里部分代码省略.........
示例9: _initAndListen
static void _initAndListen(int listenPort) {
Client::initThread("initandlisten");
// Due to SERVER-15389, we must setupSockets first thing at startup in order to avoid
// obtaining too high a file descriptor for our calls to select().
MessageServer::Options options;
options.port = listenPort;
options.ipList = serverGlobalParams.bind_ip;
MessageServer* server = createServer(options, new MyMessageHandler());
server->setAsTimeTracker();
// This is what actually creates the sockets, but does not yet listen on them because we
// do not want connections to just hang if recovery takes a very long time.
server->setupSockets();
std::shared_ptr<DbWebServer> dbWebServer;
if (serverGlobalParams.isHttpInterfaceEnabled) {
dbWebServer.reset(new DbWebServer(
serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, new RestAdminAccess()));
dbWebServer->setupSockets();
}
getGlobalServiceContext()->initializeGlobalStorageEngine();
// Warn if we detect configurations for multiple registered storage engines in
// the same configuration file/environment.
if (serverGlobalParams.parsedOpts.hasField("storage")) {
BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
invariant(storageElement.isABSONObj());
BSONObj storageParamsObj = storageElement.Obj();
BSONObjIterator i = storageParamsObj.begin();
while (i.more()) {
BSONElement e = i.next();
// Ignore if field name under "storage" matches current storage engine.
if (storageGlobalParams.engine == e.fieldName()) {
continue;
}
// Warn if field name matches non-active registered storage engine.
if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
warning() << "Detected configuration for non-active storage engine "
<< e.fieldName() << " when current storage engine is "
<< storageGlobalParams.engine;
}
}
}
getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserver>());
const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();
{
ProcessId pid = ProcessId::getCurrent();
LogstreamBuilder l = log(LogComponent::kControl);
l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
<< " dbpath=" << storageGlobalParams.dbpath;
if (replSettings.master)
l << " master=" << replSettings.master;
if (replSettings.slave)
l << " slave=" << (int)replSettings.slave;
const bool is32bit = sizeof(int*) == 4;
l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl;
}
DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;
logMongodStartupWarnings(storageGlobalParams);
#if defined(_WIN32)
printTargetMinOS();
#endif
logProcessDetails();
{
stringstream ss;
ss << endl;
ss << "*********************************************************************" << endl;
ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl;
ss << " Create this directory or give existing directory in --dbpath." << endl;
ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl;
ss << "*********************************************************************" << endl;
uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
}
{
stringstream ss;
ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist";
uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath));
}
// TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct
// dependencies.
if (snmpInit) {
snmpInit();
}
boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
//.........这里部分代码省略.........
示例10: cmdElement
intrusive_ptr<Pipeline> Pipeline::parseCommand(
string &errmsg, BSONObj &cmdObj,
const intrusive_ptr<ExpressionContext> &pCtx) {
intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
vector<BSONElement> pipeline;
/* gather the specification for the aggregation */
for(BSONObj::iterator cmdIterator = cmdObj.begin();
cmdIterator.more(); ) {
BSONElement cmdElement(cmdIterator.next());
const char *pFieldName = cmdElement.fieldName();
/* look for the aggregation command */
if (!strcmp(pFieldName, commandName)) {
pPipeline->collectionName = cmdElement.String();
continue;
}
/* check for the collection name */
if (!strcmp(pFieldName, pipelineName)) {
pipeline = cmdElement.Array();
continue;
}
/* check for explain option */
if (!strcmp(pFieldName, explainName)) {
pPipeline->explain = cmdElement.Bool();
continue;
}
/* if the request came from the router, we're in a shard */
if (!strcmp(pFieldName, fromRouterName)) {
pCtx->setInShard(cmdElement.Bool());
continue;
}
/* check for debug options */
if (!strcmp(pFieldName, splitMongodPipelineName)) {
pPipeline->splitMongodPipeline = true;
continue;
}
/* Ignore $auth information sent along with the command. The authentication system will
* use it, it's not a part of the pipeline.
*/
if (!strcmp(pFieldName, AuthenticationTable::fieldName.c_str())) {
continue;
}
/* we didn't recognize a field in the command */
ostringstream sb;
sb <<
"unrecognized field \"" <<
cmdElement.fieldName();
errmsg = sb.str();
return intrusive_ptr<Pipeline>();
}
/*
If we get here, we've harvested the fields we expect for a pipeline.
Set up the specified document source pipeline.
*/
SourceVector *pSourceVector = &pPipeline->sourceVector; // shorthand
/* iterate over the steps in the pipeline */
const size_t nSteps = pipeline.size();
for(size_t iStep = 0; iStep < nSteps; ++iStep) {
/* pull out the pipeline element as an object */
BSONElement pipeElement(pipeline[iStep]);
uassert(15942, str::stream() << "pipeline element " <<
iStep << " is not an object",
pipeElement.type() == Object);
BSONObj bsonObj(pipeElement.Obj());
// Parse a pipeline stage from 'bsonObj'.
uassert(16435, "A pipeline stage specification object must contain exactly one field.",
bsonObj.nFields() == 1);
BSONElement stageSpec = bsonObj.firstElement();
const char* stageName = stageSpec.fieldName();
// Create a DocumentSource pipeline stage from 'stageSpec'.
StageDesc key;
key.pName = stageName;
const StageDesc* pDesc = (const StageDesc*)
bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
stageDescCmp);
uassert(16436,
str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
pDesc);
intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx);
verify(stage);
stage->setPipelineStep(iStep);
pSourceVector->push_back(stage);
}
/* if there aren't any pipeline stages, there's nothing more to do */
if (!pSourceVector->size())
return pPipeline;
//.........这里部分代码省略.........
示例11: skipToNextKey
int IndexCursor::skipToNextKey( const BSONObj ¤tKey ) {
int skipPrefixIndex = _boundsIterator->advance( currentKey );
if ( skipPrefixIndex == -2 ) {
// We are done iterating completely.
_ok = false;
return -2;
}
else if ( skipPrefixIndex == -1 ) {
// We should skip nothing.
return -1;
}
// We should skip to a further key, efficiently.
//
// If after(), skip to the first key greater/less than the key comprised
// of the first "skipPrefixIndex" elements of currentKey, and the rest
// set to MaxKey/MinKey for direction > 0 and direction < 0 respectively.
// eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}, direction > 0, so we skip
// to the first key greater than {a:1, b:maxkey, c:maxkey}
//
// If after() is false, we use the same key prefix but set the reamining
// elements to the elements described by cmp(), in order.
// eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}) and cmp() [b:5, c:11]
// so we use skip to {a:1, b:5, c:11}, also noting direction.
if ( _boundsIterator->after() ) {
skipPrefix( currentKey, skipPrefixIndex );
} else {
BSONObjBuilder b(currentKey.objsize());
BSONObjIterator it = currentKey.begin();
const vector<const BSONElement *> &endKeys = _boundsIterator->cmp();
const int nFields = currentKey.nFields();
for ( int i = 0; i < nFields; i++ ) {
if ( i < skipPrefixIndex ) {
verify( it.more() );
b.append( it.next() );
} else {
b.appendAs( *endKeys[i] , "" );
}
}
findKey( b.done() );
// Skip passed key prefixes that are not supposed to be inclusive
// as described by _boundsIterator->inc() and endKeys
//
// We'll spend at worst nFields^2 time ensuring all key elements
// are properly set if all the inclusive bits are false and we
// keep landing on keys where the ith element of curr == endkeys[i].
//
// This complexity is usually ok, since this skipping is supposed to
// save us from really big linear scans across the key space in
// some pathological cases. It's not clear whether or not small
// cases are hurt too badly by this algorithm.
bool allInclusive = true;
const vector<bool> &inclusive = _boundsIterator->inc();
for ( int i = 0; i < nFields; i++ ) {
if ( !inclusive[i] ) {
allInclusive = false;
break;
}
}
again: while ( !allInclusive && ok() ) {
BSONObj key = _currKey;
it = key.begin();
dassert( nFields == key.nFields() );
for ( int i = 0; i < nFields; i++ ) {
const BSONElement e = it.next();
if ( i >= skipPrefixIndex && !inclusive[i] && e.valuesEqual(*endKeys[i]) ) {
// The ith element equals the ith endKey but it's not supposed to be inclusive.
// Skipping to the next value for the ith element involves skipping a prefix
// with i + 1 elements.
skipPrefix( key, i + 1 );
goto again;
}
}
break;
}
}
return 0;
}