本文整理汇总了C++中BSONObj::getField方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::getField方法的具体用法?C++ BSONObj::getField怎么用?C++ BSONObj::getField使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::getField方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: init
// PD_TRACE_DECLARE_FUNCTION ( SDB__CLSSPLIT_INIT, "_rtnSplit::init" )
INT32 _rtnSplit::init ( INT32 flags, INT64 numToSkip, INT64 numToReturn,
const CHAR * pMatcherBuff,
const CHAR * pSelectBuff,
const CHAR * pOrderByBuff,
const CHAR * pHintBuff )
{
INT32 rc = SDB_OK ;
PD_TRACE_ENTRY ( SDB__CLSSPLIT_INIT ) ;
const CHAR *pCollectionName = NULL ;
const CHAR *pTargetName = NULL ;
const CHAR *pSourceName = NULL ;
try
{
BSONObj boRequest ( pMatcherBuff ) ;
BSONElement beName = boRequest.getField ( CAT_COLLECTION_NAME ) ;
BSONElement beTarget = boRequest.getField ( CAT_TARGET_NAME ) ;
BSONElement beSplitKey = boRequest.getField ( CAT_SPLITVALUE_NAME ) ;
BSONElement beSource = boRequest.getField ( CAT_SOURCE_NAME ) ;
BSONElement bePercent = boRequest.getField ( CAT_SPLITPERCENT_NAME ) ;
PD_CHECK ( !beName.eoo() && beName.type() == String,
SDB_INVALIDARG, error, PDERROR,
"Invalid collection name: %s", beName.toString().c_str() ) ;
pCollectionName = beName.valuestr() ;
PD_CHECK ( ossStrlen ( pCollectionName ) <
DMS_COLLECTION_SPACE_NAME_SZ +
DMS_COLLECTION_NAME_SZ + 1,
SDB_INVALIDARG, error, PDERROR,
"Collection name is too long: %s", pCollectionName ) ;
ossStrncpy ( _szCollection, pCollectionName,
DMS_COLLECTION_SPACE_NAME_SZ +
DMS_COLLECTION_NAME_SZ + 1 ) ;
PD_CHECK ( !beTarget.eoo() && beTarget.type() == String,
SDB_INVALIDARG, error, PDERROR,
"Invalid target group name: %s",
beTarget.toString().c_str() ) ;
pTargetName = beTarget.valuestr() ;
PD_CHECK ( ossStrlen ( pTargetName ) < OP_MAXNAMELENGTH,
SDB_INVALIDARG, error, PDERROR,
"target group name is too long: %s",
pTargetName ) ;
ossStrncpy ( _szTargetName, pTargetName, OP_MAXNAMELENGTH ) ;
PD_CHECK ( !beSource.eoo() && beSource.type() == String,
SDB_INVALIDARG, error, PDERROR,
"Invalid source group name: %s",
beSource.toString().c_str() ) ;
pSourceName = beSource.valuestr() ;
PD_CHECK ( ossStrlen ( pSourceName ) < OP_MAXNAMELENGTH,
SDB_INVALIDARG, error, PDERROR,
"source group name is too long: %s",
pSourceName ) ;
ossStrncpy ( _szSourceName, pSourceName, OP_MAXNAMELENGTH ) ;
PD_CHECK ( !beSplitKey.eoo() && beSplitKey.type() == Object,
SDB_INVALIDARG, error, PDERROR,
"Invalid split key: %s",
beSplitKey.toString().c_str() ) ;
_splitKey = beSplitKey.embeddedObject () ;
_percent = bePercent.numberDouble() ;
}
catch ( std::exception &e )
{
PD_RC_CHECK ( SDB_SYS, PDERROR,
"Exception handled when parsing split request: %s",
e.what() ) ;
}
PD_TRACE4 ( SDB__CLSSPLIT_INIT,
PD_PACK_STRING ( pCollectionName ),
PD_PACK_STRING ( pTargetName ),
PD_PACK_STRING ( pSourceName ),
PD_PACK_STRING ( _splitKey.toString().c_str() ) ) ;
done:
PD_TRACE_EXITRC ( SDB__CLSSPLIT_INIT, rc ) ;
return rc ;
error:
goto done ;
}
示例2: sync_pullOpLog
/* slave: pull some data from the master's oplog
note: not yet in db mutex at this point.
@return -1 error
0 ok, don't sleep
1 ok, sleep
*/
int ReplSource::sync_pullOpLog(int& nApplied) {
int okResultCode = 1;
string ns = string("local.oplog.$") + sourceName();
log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
bool tailing = true;
oplogReader.tailCheck();
bool initial = syncedTo.isNull();
if ( !oplogReader.haveCursor() || initial ) {
if ( initial ) {
// Important to grab last oplog timestamp before listing databases.
syncToTailOfRemoteLog();
BSONObj info;
bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
massert( 10389 , "Unable to get database list", ok );
BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
while( i.moreWithEOO() ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
string name = e.embeddedObject().getField( "name" ).valuestr();
if ( !e.embeddedObject().getBoolField( "empty" ) ) {
if ( name != "local" ) {
if ( only.empty() || only == name ) {
log( 2 ) << "adding to 'addDbNextPass': " << name << endl;
addDbNextPass.insert( name );
}
}
}
}
dblock lk;
save();
}
BSONObjBuilder q;
q.appendDate("$gte", syncedTo.asDate());
BSONObjBuilder query;
query.append("ts", q.done());
if ( !only.empty() ) {
// note we may here skip a LOT of data table scanning, a lot of work for the master.
query.appendRegex("ns", string("^") + only); // maybe append "\\." here?
}
BSONObj queryObj = query.done();
// e.g. queryObj = { ts: { $gte: syncedTo } }
oplogReader.tailingQuery(ns.c_str(), queryObj);
tailing = false;
}
else {
log(2) << "repl: tailing=true\n";
}
if( !oplogReader.haveCursor() ) {
problem() << "repl: dbclient::query returns null (conn closed?)" << endl;
oplogReader.resetConnection();
return -1;
}
// show any deferred database creates from a previous pass
{
set<string>::iterator i = addDbNextPass.begin();
if ( i != addDbNextPass.end() ) {
BSONObjBuilder b;
b.append("ns", *i + '.');
b.append("op", "db");
BSONObj op = b.done();
sync_pullOpLog_applyOperation(op, false);
}
}
if ( !oplogReader.more() ) {
if ( tailing ) {
log(2) << "repl: tailing & no new activity\n";
if( oplogReader.awaitCapable() )
okResultCode = 0; // don't sleep
}
else {
log() << "repl: " << ns << " oplog is empty\n";
}
{
dblock lk;
save();
}
return okResultCode;
}
OpTime nextOpTime;
{
BSONObj op = oplogReader.next();
BSONElement ts = op.getField("ts");
if ( ts.type() != Date && ts.type() != Timestamp ) {
//.........这里部分代码省略.........
示例3: run
int run() {
if ( hasParam( "repair" ) ){
warning() << "repair is a work in progress" << endl;
return repair();
}
{
string q = getParam("query");
if ( q.size() )
_query = fromjson( q );
}
string opLogName = "";
unsigned long long opLogStart = 0;
if (hasParam("oplog")) {
if (hasParam("query") || hasParam("db") || hasParam("collection")) {
cout << "oplog mode is only supported on full dumps" << endl;
return -1;
}
BSONObj isMaster;
conn("true").simpleCommand("admin", &isMaster, "isMaster");
if (isMaster.hasField("hosts")) { // if connected to replica set member
opLogName = "local.oplog.rs";
}
else {
opLogName = "local.oplog.$main";
if ( ! isMaster["ismaster"].trueValue() ) {
cout << "oplog mode is only supported on master or replica set member" << endl;
return -1;
}
}
auth("local");
BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
if (op.isEmpty()) {
cout << "No operations in oplog. Please ensure you are connecting to a master." << endl;
return -1;
}
assert(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
// check if we're outputting to stdout
string out = getParam("out");
if ( out == "-" ) {
if ( _db != "*" && _coll != "*" ) {
writeCollectionStdout( _db+"."+_coll );
return 0;
}
else {
cout << "You must specify database and collection to print to stdout" << endl;
return -1;
}
}
_usingMongos = isMongos();
path root( out );
string db = _db;
if ( db == "*" ) {
cout << "all dbs" << endl;
auth( "admin" );
BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
if ( ! res["databases"].isABSONObj() ) {
error() << "output of listDatabases isn't what we expected, no 'databases' field:\n" << res << endl;
return -2;
}
BSONObj dbs = res["databases"].embeddedObjectUserCheck();
set<string> keys;
dbs.getFieldNames( keys );
for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
string key = *i;
if ( ! dbs[key].isABSONObj() ) {
error() << "database field not an object key: " << key << " value: " << dbs[key] << endl;
return -3;
}
BSONObj dbobj = dbs[key].embeddedObjectUserCheck();
const char * dbName = dbobj.getField( "name" ).valuestr();
if ( (string)dbName == "local" )
continue;
go ( dbName , root / dbName );
}
}
else {
auth( db );
go( db , root / db );
}
//.........这里部分代码省略.........
示例4: _userCreateNS
bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
LOG(1) << "create collection " << ns << ' ' << options << endl;
if ( nsdetails(ns) ) {
err = "collection already exists";
return false;
}
long long size = Extent::initialSize(128);
{
BSONElement e = options.getField("size");
if ( e.isNumber() ) {
size = e.numberLong();
uassert( 10083 , "create collection invalid size spec", size >= 0 );
size += 0xff;
size &= 0xffffffffffffff00LL;
if ( size < Extent::minSize() )
size = Extent::minSize();
}
}
bool newCapped = false;
long long mx = 0;
if( options["capped"].trueValue() ) {
newCapped = true;
BSONElement e = options.getField("max");
if ( e.isNumber() ) {
mx = e.numberLong();
uassert( 16495,
"max in a capped collection has to be < 2^31 or not set",
NamespaceDetails::validMaxCappedDocs(&mx) );
}
}
cc().database()->createCollection( ns, options["capped"].trueValue(), &options );
Collection* collection = cc().database()->getCollection( ns );
verify( collection );
// $nExtents just for debug/testing.
BSONElement e = options.getField( "$nExtents" );
if ( e.type() == Array ) {
// We create one extent per array entry, with size specified
// by the array value.
BSONObjIterator i( e.embeddedObject() );
while( i.more() ) {
BSONElement e = i.next();
int size = int( e.number() );
verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
collection->increaseStorageSize( (int)size, false );
}
}
else if ( int( e.number() ) > 0 ) {
// We create '$nExtents' extents, each of size 'size'.
int nExtents = int( e.number() );
verify( size <= 0x7fffffff );
for ( int i = 0; i < nExtents; ++i ) {
verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
collection->increaseStorageSize( (int)size, false );
}
}
else {
// This is the non test case, where we don't have a $nExtents spec.
while ( size > 0 ) {
const int max = Extent::maxSize();
const int min = Extent::minSize();
int desiredExtentSize = static_cast<int> (size > max ? max : size);
desiredExtentSize = static_cast<int> (desiredExtentSize < min ? min : desiredExtentSize);
desiredExtentSize &= 0xffffff00;
Extent* e = collection->increaseStorageSize( (int)desiredExtentSize, true );
size -= e->length;
}
}
NamespaceDetails *d = nsdetails(ns);
verify(d);
bool ensure = true;
// respect autoIndexId if set. otherwise, create an _id index for all colls, except for
// capped ones in local w/o autoIndexID (reason for the exception is for the oplog and
// non-replicated capped colls)
if( options.hasField( "autoIndexId" ) ||
(newCapped && nsToDatabase( ns ) == "local" ) ) {
ensure = options.getField( "autoIndexId" ).trueValue();
}
if( ensure ) {
if( deferIdIndex )
*deferIdIndex = true;
//.........这里部分代码省略.........
示例5: run
int run() {
if (mongoDumpGlobalParams.repair) {
return repair();
}
{
if (mongoDumpGlobalParams.query.size()) {
_query = fromjson(mongoDumpGlobalParams.query);
}
}
if (mongoDumpGlobalParams.dumpUsersAndRoles) {
uassertStatusOK(auth::getRemoteStoredAuthorizationVersion(&conn(true),
&_serverAuthzVersion));
uassert(17369,
mongoutils::str::stream() << "Backing up users and roles is only supported for "
"clusters with auth schema versions 1 or 3, found: " <<
_serverAuthzVersion,
_serverAuthzVersion == AuthorizationManager::schemaVersion24 ||
_serverAuthzVersion == AuthorizationManager::schemaVersion26Final);
}
string opLogName = "";
unsigned long long opLogStart = 0;
if (mongoDumpGlobalParams.useOplog) {
BSONObj isMaster;
conn("true").simpleCommand("admin", &isMaster, "isMaster");
if (isMaster.hasField("hosts")) { // if connected to replica set member
opLogName = "local.oplog.rs";
}
else {
opLogName = "local.oplog.$main";
if ( ! isMaster["ismaster"].trueValue() ) {
toolError() << "oplog mode is only supported on master or replica set member"
<< std::endl;
return -1;
}
}
BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
if (op.isEmpty()) {
toolError() << "No operations in oplog. Please ensure you are connecting to a "
<< "master." << std::endl;
return -1;
}
verify(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
// check if we're outputting to stdout
if (mongoDumpGlobalParams.outputDirectory == "-") {
if (toolGlobalParams.db != "" && toolGlobalParams.coll != "") {
writeCollectionStdout(toolGlobalParams.db + "." + toolGlobalParams.coll);
return 0;
}
else {
toolError() << "You must specify database and collection to print to stdout"
<< std::endl;
return -1;
}
}
_usingMongos = isMongos();
boost::filesystem::path root(mongoDumpGlobalParams.outputDirectory);
if (toolGlobalParams.db == "") {
if (toolGlobalParams.coll != "") {
toolError() << "--db must be specified with --collection" << std::endl;
return -1;
}
toolInfoLog() << "all dbs" << std::endl;
BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
if ( ! res["databases"].isABSONObj() ) {
toolError() << "output of listDatabases isn't what we expected, no 'databases' "
<< "field:\n" << res << std::endl;
return -2;
}
BSONObj dbs = res["databases"].embeddedObjectUserCheck();
set<string> keys;
dbs.getFieldNames( keys );
for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
string key = *i;
if ( ! dbs[key].isABSONObj() ) {
toolError() << "database field not an document key: " << key << " value: "
<< dbs[key] << std::endl;
return -3;
}
BSONObj dbobj = dbs[key].embeddedObjectUserCheck();
const char * dbName = dbobj.getField( "name" ).valuestr();
if ( (string)dbName == "local" )
continue;
//.........这里部分代码省略.........
示例6: rtnExplain
// PD_TRACE_DECLARE_FUNCTION ( SDB_RTNEXPLAIN, "rtnExplain" )
INT32 rtnExplain( const CHAR *pCollectionName,
const BSONObj &selector,
const BSONObj &matcher,
const BSONObj &orderBy,
const BSONObj &hint,
SINT32 flags,
SINT64 numToSkip,
SINT64 numToReturn,
pmdEDUCB *cb, SDB_DMSCB *dmsCB,
SDB_RTNCB *rtnCB, INT64 &contextID,
rtnContextBase **ppContext )
{
INT32 rc = SDB_OK ;
PD_TRACE_ENTRY ( SDB_RTNEXPLAIN ) ;
SDB_ASSERT ( cb, "educb can't be NULL" ) ;
SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ;
SDB_ASSERT ( rtnCB, "rtnCB can't be NULL" ) ;
BSONObj explainOptions ;
BSONObj realHint ;
BSONElement ele = hint.getField( FIELD_NAME_OPTIONS ) ;
if ( Object == ele.type() )
{
explainOptions = ele.embeddedObject() ;
}
ele = hint.getField( FIELD_NAME_HINT ) ;
if ( Object == ele.type() )
{
realHint = ele.embeddedObject() ;
}
rtnQueryOptions options( matcher, selector,
orderBy, realHint,
pCollectionName,
numToSkip, numToReturn,
OSS_BIT_CLEAR( flags, FLG_QUERY_EXPLAIN),
FALSE ) ;
rtnContextExplain *context = NULL ;
rc = rtnCB->contextNew( RTN_CONTEXT_EXPLAIN,
( rtnContext **)( &context ),
contextID, cb ) ;
if ( SDB_OK != rc )
{
PD_LOG( PDERROR, "failed to create explain context:%d", rc ) ;
goto error ;
}
rc = context->open( options, explainOptions ) ;
if ( SDB_OK != rc )
{
PD_LOG( PDERROR, "failed to open explain context:%d", rc ) ;
goto error ;
}
if ( ppContext )
{
*ppContext = context ;
}
done:
PD_TRACE_EXITRC( SDB_RTNEXPLAIN, rc ) ;
return rc ;
error:
if ( -1 != contextID )
{
rtnCB->contextDelete( contextID, cb ) ;
contextID = -1 ;
}
goto done ;
}
示例7: _extractMsg
INT32 _spdFMP::_extractMsg( BSONObj &msg, BOOLEAN &extracted )
{
INT32 rc = SDB_OK ;
extracted = FALSE ;
SDB_ASSERT( 0 <= _expect, "impossible" ) ;
/// magic has already been found.
if ( sizeof( SPD_MAGIC ) == _expect )
{
found:
if ( (_totalRead - _itr) < (INT32)sizeof(INT32) )
{
extracted = FALSE ;
goto done ;
}
else
{
SINT32 bsonLen = *((SINT32 *)(_readBuf+_itr)) ;
if ( (_totalRead - _itr) < bsonLen )
{
rc = FALSE ;
goto done ;
}
else if ( (_totalRead - _itr) == bsonLen )
{
SDB_ASSERT( _itr >= (INT32)sizeof( SPD_MAGIC ) ,
"impossible" ) ;
BSONObj tmp ;
try
{
tmp = BSONObj( _readBuf + _itr ) ;
}
catch ( std::exception &e )
{
PD_LOG( PDERROR, "unexpected err happened:%s", e.what() ) ;
rc = SDB_SYS ;
goto error ;
}
if ( sizeof( SPD_MAGIC ) == _itr )
{
/// only a bson msg.
msg = tmp ;
extracted = TRUE ;
}
else
{
/// not only a bson msg.
_readBuf[_itr - 4] = '\0' ;
BSONElement retCode = tmp.getField( FMP_RES_CODE ) ;
BSONElement errMsg = tmp.getField( FMP_ERR_MSG ) ;
/// some code like 'print' may return msg more than a bsonobj.
/// we must parse it's return code. if it is ok, we ignore
/// print. else we put it into errmsg.
if ( !retCode.eoo() && NumberInt != retCode.type() )
{
rc = SDB_SYS ;
PD_LOG( PDERROR,
"invalid type of retCode:%d", retCode.type() ) ;
goto error ;
}
else if ( !retCode.eoo() )
{
if ( SDB_OK != retCode.Int() )
{
if ( !errMsg.eoo() )
{
msg = tmp ;
}
else
{
BSONObjBuilder builder ;
builder.append( FMP_ERR_MSG, _readBuf ) ;
builder.append( retCode ) ;
msg = builder.obj() ;
}
}
else
{
msg = tmp ;
}
}
else
{
/// retCode is eoo.
msg = tmp ;
}
extracted = TRUE ;
}
}
else
{
SDB_ASSERT( FALSE, "impossible" ) ;
rc = SDB_SYS ;
PD_LOG( PDERROR, "left len can not be lager than objsize" ) ;
goto error ;
}
//.........这里部分代码省略.........
示例8: go
void go( const string db , const boost::filesystem::path outdir ) {
log() << "DATABASE: " << db << "\t to \t" << outdir.string() << endl;
boost::filesystem::create_directories( outdir );
map <string, BSONObj> collectionOptions;
multimap <string, BSONObj> indexes;
vector <string> collections;
// Save indexes for database
string ins = db + ".system.indexes";
auto_ptr<DBClientCursor> cursor = conn( true ).query( ins.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
const string name = obj.getField( "ns" ).valuestr();
indexes.insert( pair<string, BSONObj> (name, obj.getOwned()) );
}
string sns = db + ".system.namespaces";
cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
const string name = obj.getField( "name" ).valuestr();
if (obj.hasField("options")) {
collectionOptions[name] = obj.getField("options").embeddedObject().getOwned();
}
// skip namespaces with $ in them only if we don't specify a collection to dump
if ( _coll == "" && name.find( ".$" ) != string::npos ) {
log(1) << "\tskipping collection: " << name << endl;
continue;
}
const string filename = name.substr( db.size() + 1 );
//if a particular collections is specified, and it's not this one, skip it
if ( _coll != "" && db + "." + _coll != name && _coll != name )
continue;
// raise error before writing collection with non-permitted filename chars in the name
size_t hasBadChars = name.find_first_of("/\0");
if (hasBadChars != string::npos){
error() << "Cannot dump " << name << ". Collection has '/' or null in the collection name." << endl;
continue;
}
// Don't dump indexes
if ( endsWith(name.c_str(), ".system.indexes") ) {
continue;
}
if ( _coll != "" && db + "." + _coll != name && _coll != name )
continue;
collections.push_back(name);
}
for (vector<string>::iterator it = collections.begin(); it != collections.end(); ++it) {
string name = *it;
const string filename = name.substr( db.size() + 1 );
writeCollectionFile( name , outdir / ( filename + ".bson" ) );
writeMetadataFile( name, outdir / (filename + ".metadata.json"), collectionOptions, indexes);
}
}
示例9: flush
int LocalMemoryGridFile::flush() {
trace() << " -> LocalMemoryGridFile::flush {file: " << _filename << "}" << endl;
if (!_dirty) {
// Since, there are no dirty chunks, this does not need a flush
info() << "buffers are not dirty.. need not flush {filename: " << _filename << "}" << endl;
return 0;
}
size_t bufferLen = 0;
boost::shared_array<char> buffer = createFlushBuffer(bufferLen);
if (!buffer.get() && bufferLen > 0) {
// Failed to create flush buffer
return -ENOMEM;
}
// Get the existing gridfile from GridFS to get metadata and delete the
// file from the system
try {
ScopedDbConnection dbc(globalFSOptions._connectString);
GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);
GridFile origGridFile = gridFS.findFile(BSON("filename" << _filename));
if (!origGridFile.exists()) {
dbc.done();
warn() << "Requested file not found for flushing back data {file: " << _filename << "}" << endl;
return -EBADF;
}
//TODO: Make checks for appropriate object correctness
//i.e. do not update anything that is not a Regular File
//Check what happens in case of a link
gridFS.removeFile(_filename);
trace() << "Removing the current file from GridFS {file: " << _filename << "}" << endl;
//TODO: Check for remove status if that was successfull or not
//TODO: Rather have an update along with active / passive flag for the
//file
try {
GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);
// Create an empty file to signify the file creation and open a local file for the same
trace() << "Adding new file to GridFS {file: " << _filename << "}" << endl;
BSONObj fileObj = gridFS.storeFile(buffer.get(), bufferLen, _filename);
if (!fileObj.isValid()) {
warn() << "Failed to save file object in data flush {file: " << _filename << "}" << std::endl;
dbc.done();
return -EBADF;
}
// Update the last updated date for the document
BSONObj metadata = origGridFile.getMetadata();
BSONElement fileObjId = fileObj.getField("_id");
dbc->update(globalFSOptions._filesNS, BSON("_id" << fileObjId.OID()),
BSON("$set" << BSON(
"uploadDate" << origGridFile.getUploadDate()
<< "metadata.type" << "file"
<< "metadata.filename" << mgridfs::getPathBasename(_filename)
<< "metadata.directory" << mgridfs::getPathDirname(_filename)
<< "metadata.lastUpdated" << jsTime()
<< "metadata.uid" << metadata["uid"]
<< "metadata.gid" << metadata["gid"]
<< "metadata.mode" << metadata["mode"]
)
)
);
} catch (DBException& e) {
error() << "Caught exception in saving remote file in flush {code: " << e.getCode() << ", what: " << e.what()
<< ", exception: " << e.toString() << "}" << endl;
return -EIO;
}
dbc.done();
} catch (DBException& e) {
// Something failed in getting the file from GridFS
error() << "Caught exception in getting remote file for flush {code: " << e.getCode() << ", what: " << e.what()
<< ", exception: " << e.toString() << "}" << endl;
return -EIO;
}
_dirty = false;
debug() << "Completed flushing the file content to GridFS {file: " << _filename << "}" << endl;
return 0;
}
示例10: dbEval
bool dbEval(const string& dbName, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
BSONElement e = cmd.firstElement();
uassert( 10046 , "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );
const char *code = 0;
switch ( e.type() ) {
case String:
case Code:
code = e.valuestr();
break;
case CodeWScope:
code = e.codeWScopeCode();
break;
default:
verify(0);
}
verify( code );
if ( ! globalScriptEngine ) {
errmsg = "db side execution is disabled";
return false;
}
const string userToken = ClientBasic::getCurrent()->getAuthorizationManager()
->getAuthenticatedPrincipalNamesToken();
auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbName, "dbeval" + userToken );
ScriptingFunction f = s->createFunction(code);
if ( f == 0 ) {
errmsg = (string)"compile failed: " + s->getError();
return false;
}
if ( e.type() == CodeWScope )
s->init( e.codeWScopeScopeDataUnsafe() );
s->localConnect( dbName.c_str() );
BSONObj args;
{
BSONElement argsElement = cmd.getField("args");
if ( argsElement.type() == Array ) {
args = argsElement.embeddedObject();
if ( edebug ) {
out() << "args:" << args.toString() << endl;
out() << "code:\n" << code << endl;
}
}
}
int res;
{
Timer t;
res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
int m = t.millis();
if ( m > cmdLine.slowMS ) {
out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
if ( m >= 1000 ) log() << code << endl;
else OCCASIONALLY log() << code << endl;
}
}
if (res || s->isLastRetNativeCode()) {
result.append("errno", (double) res);
errmsg = "invoke failed: ";
if (s->isLastRetNativeCode())
errmsg += "cannot return native function";
else
errmsg += s->getError();
return false;
}
s->append( result , "retval" , "__returnValue" );
return true;
}
示例11: dbEval
static bool dbEval(OperationContext* txn,
const string& dbName,
const BSONObj& cmd,
BSONObjBuilder& result,
string& errmsg) {
BSONElement e = cmd.firstElement();
uassert( 10046 , "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );
const char *code = 0;
switch ( e.type() ) {
case String:
case Code:
code = e.valuestr();
break;
case CodeWScope:
code = e.codeWScopeCode();
break;
default:
verify(0);
}
verify( code );
if ( ! globalScriptEngine ) {
errmsg = "db side execution is disabled";
return false;
}
scoped_ptr<Scope> s(globalScriptEngine->newScope());
ScriptingFunction f = s->createFunction(code);
if ( f == 0 ) {
errmsg = (string)"compile failed: " + s->getError();
return false;
}
s->localConnectForDbEval(txn, dbName.c_str());
if ( e.type() == CodeWScope )
s->init( e.codeWScopeScopeDataUnsafe() );
BSONObj args;
{
BSONElement argsElement = cmd.getField("args");
if ( argsElement.type() == Array ) {
args = argsElement.embeddedObject();
if ( edebug ) {
log() << "args:" << args.toString() << endl;
log() << "code:\n" << code << endl;
}
}
}
int res;
{
Timer t;
res = s->invoke(f, &args, 0, storageGlobalParams.quota ? 10 * 60 * 1000 : 0);
int m = t.millis();
if (m > serverGlobalParams.slowMS) {
log() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
if ( m >= 1000 ) log() << code << endl;
else OCCASIONALLY log() << code << endl;
}
}
if (res || s->isLastRetNativeCode()) {
result.append("errno", (double) res);
errmsg = "invoke failed: ";
if (s->isLastRetNativeCode())
errmsg += "cannot return native function";
else
errmsg += s->getError();
return false;
}
s->append( result , "retval" , "__returnValue" );
return true;
}
示例12: validate
//.........这里部分代码省略.........
Status s = storageValid(newElem, true);
if (!s.isOK())
return s;
// Check parents to make sure they are valid as well.
s = storageValidParents(newElem);
if (!s.isOK())
return s;
}
// Check if the updated field conflicts with immutable fields
immutableFieldRef.findConflicts(¤t, &changedImmutableFields);
}
}
const bool checkIdField = (updatedFields.empty() && !original.isEmpty()) ||
updatedFields.findConflicts(&idFieldRef, NULL);
// Add _id to fields to check since it too is immutable
if (checkIdField)
changedImmutableFields.keepShortest(&idFieldRef);
else if (changedImmutableFields.empty()) {
// Return early if nothing changed which is immutable
return Status::OK();
}
LOG(4) << "Changed immutable fields: " << changedImmutableFields;
// 2.) Now compare values of the changed immutable fields (to make sure they haven't)
const mutablebson::ConstElement newIdElem = updated.root()[idFieldName];
FieldRefSet::const_iterator where = changedImmutableFields.begin();
const FieldRefSet::const_iterator end = changedImmutableFields.end();
for (; where != end; ++where) {
const FieldRef& current = **where;
// Find the updated field in the updated document.
mutablebson::ConstElement newElem = updated.root();
size_t currentPart = 0;
while (newElem.ok() && currentPart < current.numParts())
newElem = newElem[current.getPart(currentPart++)];
if (!newElem.ok()) {
if (original.isEmpty()) {
// If the _id is missing and not required, then skip this check
if (!(current.dottedField() == idFieldName))
return Status(ErrorCodes::NoSuchKey,
mongoutils::str::stream() << "After applying the update, the new"
<< " document was missing the '"
<< current.dottedField()
<< "' (required and immutable) field.");
} else {
if (current.dottedField() != idFieldName)
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document with "
<< newIdElem.toString()
<< ", the '"
<< current.dottedField()
<< "' (required and immutable) field was "
"found to have been removed --"
<< original);
}
} else {
// Find the potentially affected field in the original document.
const BSONElement oldElem = dps::extractElementAtPath(original, current.dottedField());
const BSONElement oldIdElem = original.getField(idFieldName);
// Ensure no arrays since neither _id nor shard keys can be in an array, or one.
mb::ConstElement currElem = newElem;
while (currElem.ok()) {
if (currElem.getType() == Array) {
return Status(
ErrorCodes::NotSingleValueField,
mongoutils::str::stream()
<< "After applying the update to the document {"
<< (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString())
<< " , ...}, the (immutable) field '"
<< current.dottedField()
<< "' was found to be an array or array descendant.");
}
currElem = currElem.parent();
}
// If we have both (old and new), compare them. If we just have new we are good
if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, nullptr, false) != 0) {
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document {"
<< oldElem.toString()
<< " , ...}, the (immutable) field '"
<< current.dottedField()
<< "' was found to have been altered to "
<< newElem.toString());
}
}
}
return Status::OK();
}
示例13: DBException
/* ****************************************************************************
*
* attributeType -
*
*/
static std::string attributeType
(
const std::string& tenant,
const std::vector<std::string>& servicePathV,
const std::string entityType,
const std::string attrName
)
{
std::string idType = std::string("_id.") + ENT_ENTITY_TYPE;
std::string idServicePath = std::string("_id.") + ENT_SERVICE_PATH;
std::string attributeName = std::string(ENT_ATTRS) + "." + attrName;
BSONObj query = BSON(idType << entityType <<
idServicePath << fillQueryServicePath(servicePathV) <<
attributeName << BSON("$exists" << true));
std::auto_ptr<DBClientCursor> cursor;
DBClientBase* connection = NULL;
LM_T(LmtMongo, ("query() in '%s' collection: '%s'",
getEntitiesCollectionName(tenant).c_str(),
query.toString().c_str()));
try
{
connection = getMongoConnection();
cursor = connection->query(getEntitiesCollectionName(tenant).c_str(), query);
/*
* We have observed that in some cases of DB errors (e.g. the database daemon is down) instead of
* raising an exception, the query() method sets the cursor to NULL. In this case, we raise the
* exception ourselves
*/
if (cursor.get() == NULL)
{
throw DBException("Null cursor from mongo (details on this is found in the source code)", 0);
}
releaseMongoConnection(connection);
LM_I(("Database Operation Successful (%s)", query.toString().c_str()));
}
catch (const DBException &e)
{
releaseMongoConnection(connection);
LM_E(("Database Error ('%s', '%s')", query.toString().c_str(), e.what()));
return "";
}
catch (...)
{
releaseMongoConnection(connection);
LM_E(("Database Error ('%s', '%s')", query.toString().c_str(), "generic exception"));
return "";
}
while (cursor->more())
{
BSONObj r = cursor->next();
LM_T(LmtMongo, ("retrieved document: '%s'", r.toString().c_str()));
/* It could happen that different entities within the same entity type may have attributes with the same name
* but different types. In that case, one type (at random) is returned. A list could be returned but the
* NGSIv2 operations only allow to set one type */
return r.getField(ENT_ATTRS).embeddedObject().getField(attrName).embeddedObject().getStringField(ENT_ATTRS_TYPE);
}
return "";
}
示例14: atoi
//.........这里部分代码省略.........
}
catch (const DBException& e)
{
releaseMongoConnection(connection);
std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
" - command: " + cmd.toString() +
" - exception: " + e.what();
LM_E(("Database Error (%s)", err.c_str()));
responseP->statusCode.fill(SccReceiverInternalError, err);
reqSemGive(__FUNCTION__, "query types request", reqSemTaken);
return SccOk;
}
catch (...)
{
releaseMongoConnection(connection);
std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
" - command: " + cmd.toString() +
" - exception: " + "generic";
LM_E(("Database Error (%s)", err.c_str()));
responseP->statusCode.fill(SccReceiverInternalError, err);
reqSemGive(__FUNCTION__, "query types request", reqSemTaken);
return SccOk;
}
// Processing result to build response
LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));
std::vector<BSONElement> resultsArray = result.getField("result").Array();
if (resultsArray.size() == 0)
{
responseP->statusCode.fill(SccContextElementNotFound);
reqSemGive(__FUNCTION__, "query types request", reqSemTaken);
return SccOk;
}
/* Another strategy to implement pagination is to use the $skip and $limit operators in the
* aggregation framework. However, doing so, we don't know the total number of results, which can
* be needed in the case of details=on (using that approach, we need to do two queries: one to get
* the count and other to get the actual results with $skip and $limit, in the same "transaction" to
* avoid incoherence between both if some entity type is created or deleted in the process).
*
* However, considering that the number of types will be small compared with the number of entities,
* the current approach seems to be ok
*/
for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
{
BSONObj resultItem = resultsArray[ix].embeddedObject();
TypeEntity* entityType = new TypeEntity(resultItem.getStringField("_id"));
std::vector<BSONElement> attrsArray = resultItem.getField("attrs").Array();
entityType->count = countEntities(tenant, servicePathV, entityType->type);
if (!attrsArray[0].isNull())
{
for (unsigned int jx = 0; jx < attrsArray.size(); ++jx)
{
/* This is where NULL elements in the resulting attrs vector are pruned */
if (attrsArray[jx].isNull())
示例15: ca
/* ****************************************************************************
*
* createEntity -
*/
TEST(mongoNotifyContextRequest, createEntity)
{
HttpStatusCode ms;
NotifyContextRequest req;
NotifyContextResponse res;
/* Prepare database */
prepareDatabase();
/* Forge the request */
ContextElementResponse cer;
req.subscriptionId.set("51307b66f481db11bf860001");
req.originator.set("localhost");
cer.contextElement.entityId.fill("E10", "T10", "false");
ContextAttribute ca("A1", "TA1", "new_val");
cer.contextElement.contextAttributeVector.push_back(&ca);
cer.statusCode.fill(SccOk);
req.contextElementResponseVector.push_back(&cer);
/* Prepare mock */
TimerMock* timerMock = new TimerMock();
ON_CALL(*timerMock, getCurrentTime())
.WillByDefault(Return(1360232700));
setTimer(timerMock);
/* Invoke the function in mongoBackend library */
ms = mongoNotifyContext(&req, &res);
/* Check response is as expected */
EXPECT_EQ(SccOk, ms);
EXPECT_EQ(SccOk, res.responseCode.code);
EXPECT_EQ("OK", res.responseCode.reasonPhrase);
EXPECT_EQ(0, res.responseCode.details.size());
/* Check that every involved collection at MongoDB is as expected */
/* Note we are using EXPECT_STREQ() for some cases, as Mongo Driver returns const char*, not string
* objects (see http://code.google.com/p/googletest/wiki/Primer#String_Comparison) */
DBClientBase* connection = getMongoConnection();
/* entities collection */
BSONObj ent;
std::vector<BSONElement> attrs;
ASSERT_EQ(6, connection->count(ENTITIES_COLL, BSONObj()));
ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E1" << "_id.type" << "T1"));
EXPECT_STREQ("E1", C_STR_FIELD(ent.getObjectField("_id"), "id"));
EXPECT_STREQ("T1", C_STR_FIELD(ent.getObjectField("_id"), "type"));
EXPECT_FALSE(ent.hasField("modDate"));
attrs = ent.getField("attrs").Array();
ASSERT_EQ(2, attrs.size());
BSONObj a1 = getAttr(attrs, "A1", "TA1");
BSONObj a2 = getAttr(attrs, "A2", "TA2");
EXPECT_STREQ("A1", C_STR_FIELD(a1, "name"));
EXPECT_STREQ("TA1",C_STR_FIELD(a1, "type"));
EXPECT_STREQ("val1", C_STR_FIELD(a1, "value"));
EXPECT_FALSE(a1.hasField("modDate"));
EXPECT_STREQ("A2", C_STR_FIELD(a2, "name"));
EXPECT_STREQ("TA2", C_STR_FIELD(a2, "type"));
EXPECT_FALSE(a2.hasField("value"));
EXPECT_FALSE(a2.hasField("modDate"));
ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E2" << "_id.type" << "T2"));
EXPECT_STREQ("E2", C_STR_FIELD(ent.getObjectField("_id"), "id"));
EXPECT_STREQ("T2", C_STR_FIELD(ent.getObjectField("_id"), "type"));
EXPECT_FALSE(ent.hasField("modDate"));
attrs = ent.getField("attrs").Array();
ASSERT_EQ(2, attrs.size());
BSONObj a3 = getAttr(attrs, "A3", "TA3");
BSONObj a4 = getAttr(attrs, "A4", "TA4");
EXPECT_STREQ("A3", C_STR_FIELD(a3, "name"));
EXPECT_STREQ("TA3", C_STR_FIELD(a3, "type"));
EXPECT_STREQ("val3", C_STR_FIELD(a3, "value"));
EXPECT_FALSE(a3.hasField("modDate"));
EXPECT_STREQ("A4", C_STR_FIELD(a4, "name"));
EXPECT_STREQ("TA4", C_STR_FIELD(a4, "type"));
EXPECT_FALSE(a4.hasField("value"));
EXPECT_FALSE(a4.hasField("modDate"));
ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E3" << "_id.type" << "T3"));
EXPECT_STREQ("E3", C_STR_FIELD(ent.getObjectField("_id"), "id"));
EXPECT_STREQ("T3", C_STR_FIELD(ent.getObjectField("_id"), "type"));
EXPECT_FALSE(ent.hasField("modDate"));
attrs = ent.getField("attrs").Array();
ASSERT_EQ(2, attrs.size());
BSONObj a5 = getAttr(attrs, "A5", "TA5");
BSONObj a6 = getAttr(attrs, "A6", "TA6");
EXPECT_STREQ("A5", C_STR_FIELD(a5, "name"));
EXPECT_STREQ("TA5", C_STR_FIELD(a5, "type"));
EXPECT_STREQ("val5", C_STR_FIELD(a5, "value"));
EXPECT_FALSE(a5.hasField("modDate"));
EXPECT_STREQ("A6", C_STR_FIELD(a6, "name"));
EXPECT_STREQ("TA6", C_STR_FIELD(a6, "type"));
EXPECT_FALSE(a6.hasField("value"));
EXPECT_FALSE(a6.hasField("modDate"));
//.........这里部分代码省略.........