本文整理汇总了C++中OID::isSet方法的典型用法代码示例。如果您正苦于以下问题:C++ OID::isSet方法的具体用法?C++ OID::isSet怎么用?C++ OID::isSet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OID
的用法示例。
在下文中一共展示了OID::isSet方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: applyUpdate
bool WriteBatchExecutor::applyUpdate( const string& ns,
const BatchedUpdateDocument& updateOp,
CurOp* currentOp,
WriteStats* stats,
BatchedErrorDetail* error ) {
OpDebug& opDebug = currentOp->debug();
_opCounters->gotUpdate();
BSONObj queryObj = updateOp.getQuery();
BSONObj updateObj = updateOp.getUpdateExpr();
bool multi = updateOp.isMultiSet() ? updateOp.getMulti() : false;
bool upsert = updateOp.isUpsertSet() ? updateOp.getUpsert() : false;
currentOp->setQuery( queryObj );
opDebug.op = dbUpdate;
opDebug.query = queryObj;
bool resExisting = false;
long long resNum = 0;
OID resUpserted = OID();
try {
const NamespaceString requestNs( ns );
UpdateRequest request( requestNs );
request.setQuery( queryObj );
request.setUpdates( updateObj );
request.setUpsert( upsert );
request.setMulti( multi );
request.setUpdateOpLog();
UpdateResult res = update( request, &opDebug );
resExisting = res.existing;
resNum = res.numMatched;
resUpserted = res.upserted;
stats->numUpdated += !resUpserted.isSet() ? resNum : 0;
stats->numUpserted += resUpserted.isSet() ? 1 : 0;
}
catch ( const UserException& ex ) {
opDebug.exceptionInfo = ex.getInfo();
toBatchedError( ex, error );
return false;
}
_le->recordUpdate( resExisting, resNum, resUpserted );
return true;
}
示例2: updateSlaveLocation
void ClientCursor::updateSlaveLocation(OperationContext* txn, CurOp& curop) {
if (_slaveReadTill.isNull())
return;
verify(str::startsWith(_ns.c_str(), "local.oplog."));
Client* c = curop.getClient();
verify(c);
OID rid = c->getRemoteID();
if (!rid.isSet())
return;
repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill);
}
示例3: queueWriteBack
void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) {
static mongo::mutex xxx( "WriteBackManager::queueWriteBack tmp" );
static OID lastOID;
scoped_lock lk( xxx );
const BSONElement& e = o["id"];
if ( lastOID.isSet() ) {
if ( e.OID() < lastOID ) {
log() << "this could fail" << endl;
printStackTrace();
}
}
lastOID = e.OID();
getWritebackQueue( remote )->queue.push( o );
}
示例4: parseBSON
void CollectionType::parseBSON(BSONObj source) {
clear();
bool ok = true;
ok &= FieldParser::extract(source, ns, "", &_ns);
ok &= FieldParser::extract(source, primary, "", &_primary);
ok &= FieldParser::extract(source, keyPattern, BSONObj(), &_keyPattern);
ok &= FieldParser::extract(source, unique, false, &_unique);
ok &= FieldParser::extract(source, createdAt, 0ULL, &_createdAt);
ok &= FieldParser::extract(source, noBalance, false, &_noBalance);
ok &= FieldParser::extract(source, epoch, OID(), &_epoch);
if (! ok) {
clear();
return;
}
//
// backward compatibility
//
// 'createAt' used to be called 'lastmod' up to 2.2.
Date_t lastmod;
if (! FieldParser::extract(source, DEPRECATED_lastmod, 0ULL, &lastmod)) {
clear();
return;
}
else if (lastmod != 0ULL) {
_createdAt = lastmod;
}
// There was a flag to mark a collection as loggically dropped, up to 2.2.
bool dropped;
if (! FieldParser::extract(source, DEPRECATED_dropped, false, &dropped) || dropped) {
clear();
return;
}
// 'lastmodEpoch' was a transition format to 'epoch', up to 2.2
OID lastmodEpoch;
if (! FieldParser::extract(source, DEPRECATED_lastmodEpoch, OID(), &lastmodEpoch)) {
clear();
return;
}
else if (lastmodEpoch.isSet()) {
_epoch = lastmodEpoch;
}
}
示例5: mergeChunks
bool mergeChunks( OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch,
string* errMsg ) {
//
// Get sharding state up-to-date
//
ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(),
*errMsg );
if ( !configLoc.isValid() ){
warning() << *errMsg << endl;
return false;
}
//
// Get the distributed lock
//
ScopedDistributedLock collLock( configLoc, nss.ns() );
collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from "
<< minKey << " to " << maxKey );
Status acquisitionStatus = collLock.tryAcquire();
if (!acquisitionStatus.isOK()) {
*errMsg = stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << minKey << "," << maxKey << ")"
<< causedBy(acquisitionStatus);
warning() << *errMsg << endl;
return false;
}
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion);
if ( !status.isOK() ) {
*errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
<< nss.ns() << causedBy( status.reason() );
warning() << *errMsg << endl;
return false;
}
if ( epoch.isSet() && shardVersion.epoch() != epoch ) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " has changed" << " since merge was sent" << "(sent epoch : "
<< epoch.toString()
<< ", current epoch : " << shardVersion.epoch().toString() << ")";
warning() << *errMsg << endl;
return false;
}
CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() );
if ( !metadata || metadata->getKeyPattern().isEmpty() ) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
warning() << *errMsg << endl;
return false;
}
dassert( metadata->getShardVersion().equals( shardVersion ) );
if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {
*errMsg = stream() << "could not merge chunks, the range "
<< rangeToString( minKey, maxKey ) << " is not valid"
<< " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern();
warning() << *errMsg << endl;
return false;
}
//
// Get merged chunk information
//
ChunkVersion mergeVersion = metadata->getCollVersion();
mergeVersion.incMinor();
OwnedPointerVector<ChunkType> chunksToMerge;
ChunkType itChunk;
itChunk.setMin( minKey );
itChunk.setMax( minKey );
itChunk.setNS( nss.ns() );
//.........这里部分代码省略.........
示例6: if
/**
* Upgrade v3 to v4 described here.
*
* This upgrade takes a config server without collection epochs (potentially) and adds
* epochs to all mongo processes.
*
*/
bool doUpgradeV3ToV4(const ConnectionString& configLoc,
const VersionType& lastVersionInfo,
string* errMsg)
{
string dummy;
if (!errMsg) errMsg = &dummy;
verify(lastVersionInfo.getCurrentVersion() == UpgradeHistory_NoEpochVersion);
if (lastVersionInfo.isUpgradeIdSet() && lastVersionInfo.getUpgradeId().isSet()) {
//
// Another upgrade failed, so cleanup may be necessary
//
BSONObj lastUpgradeState = lastVersionInfo.getUpgradeState();
bool inCriticalSection;
if (!FieldParser::extract(lastUpgradeState,
inCriticalSectionField,
&inCriticalSection,
errMsg))
{
*errMsg = stream() << "problem reading previous upgrade state" << causedBy(errMsg);
return false;
}
if (inCriticalSection) {
// Manual intervention is needed here. Somehow our upgrade didn't get applied
// consistently across config servers.
*errMsg = cannotCleanupMessage;
return false;
}
if (!_cleanupUpgradeState(configLoc, lastVersionInfo.getUpgradeId(), errMsg)) {
// If we can't cleanup the old upgrade state, the user might have done it for us,
// not a fatal problem (we'll just end up with extra collections).
warning() << "could not cleanup previous upgrade state" << causedBy(errMsg) << endl;
*errMsg = "";
}
}
//
// Check the versions of other mongo processes in the cluster before upgrade.
// We can't upgrade if there are active pre-v2.2 processes in the cluster
//
Status mongoVersionStatus = checkClusterMongoVersions(configLoc,
string(minMongoProcessVersion));
if (!mongoVersionStatus.isOK()) {
*errMsg = stream() << "cannot upgrade with pre-v" << minMongoProcessVersion
<< " mongo processes active in the cluster"
<< causedBy(mongoVersionStatus);
return false;
}
VersionType newVersionInfo;
lastVersionInfo.cloneTo(&newVersionInfo);
// Set our upgrade id and state
OID upgradeId = OID::gen();
newVersionInfo.setUpgradeId(upgradeId);
newVersionInfo.setUpgradeState(BSONObj());
// Write our upgrade id and state
{
scoped_ptr<ScopedDbConnection> connPtr;
try {
connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
ScopedDbConnection& conn = *connPtr;
verify(newVersionInfo.isValid(NULL));
conn->update(VersionType::ConfigNS,
BSON("_id" << 1 << VersionType::version_DEPRECATED(3)),
newVersionInfo.toBSON());
_checkGLE(conn);
}
catch (const DBException& e) {
*errMsg = stream() << "could not initialize version info for upgrade"
<< causedBy(e);
//.........这里部分代码省略.........
示例7: setLockID
void LocksType::setLockID(const OID& lockID) {
invariant(lockID.isSet());
_lockID = lockID;
}
示例8: mergeChunks
bool mergeChunks(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch,
string* errMsg) {
// Get the distributed lock
string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to "
<< maxKey;
auto scopedDistLock = grid.catalogManager(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
*errMsg = stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << minKey << "," << maxKey << ")"
<< causedBy(scopedDistLock.getStatus());
warning() << *errMsg;
return false;
}
ShardingState* shardingState = ShardingState::get(txn);
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
Status status = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);
if (!status.isOK()) {
*errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
<< nss.ns() << causedBy(status.reason());
warning() << *errMsg;
return false;
}
if (epoch.isSet() && shardVersion.epoch() != epoch) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed"
<< " since merge was sent"
<< "(sent epoch : " << epoch.toString()
<< ", current epoch : " << shardVersion.epoch().toString() << ")";
warning() << *errMsg;
return false;
}
shared_ptr<CollectionMetadata> metadata = shardingState->getCollectionMetadata(nss.ns());
if (!metadata || metadata->getKeyPattern().isEmpty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
warning() << *errMsg;
return false;
}
dassert(metadata->getShardVersion().equals(shardVersion));
if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) {
*errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey)
<< " is not valid"
<< " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern();
warning() << *errMsg;
return false;
}
//
// Get merged chunk information
//
ChunkVersion mergeVersion = metadata->getCollVersion();
mergeVersion.incMinor();
std::vector<ChunkType> chunksToMerge;
ChunkType itChunk;
itChunk.setMin(minKey);
itChunk.setMax(minKey);
itChunk.setNS(nss.ns());
itChunk.setShard(shardingState->getShardName());
while (itChunk.getMax().woCompare(maxKey) < 0 &&
metadata->getNextChunk(itChunk.getMax(), &itChunk)) {
chunksToMerge.push_back(itChunk);
}
if (chunksToMerge.empty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " range starting at " << minKey << " and ending at " << maxKey
<< " does not belong to shard " << shardingState->getShardName();
warning() << *errMsg;
return false;
}
//
//.........这里部分代码省略.........
示例9: setEpoch
void ShardCollectionType::setEpoch(const OID& epoch) {
invariant(epoch.isSet());
_epoch = epoch;
}
示例10: run
void WriteBackListener::run(){
OID lastID;
lastID.clear();
int secsToSleep = 0;
while ( ! inShutdown() && Shard::isMember( _addr ) ){
if ( lastID.isSet() ){
scoped_lock lk( _seenWritebacksLock );
_seenWritebacks.insert( lastID );
lastID.clear();
}
try {
ScopedDbConnection conn( _addr );
BSONObj result;
{
BSONObjBuilder cmd;
cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){
log() << "writebacklisten command failed! " << result << endl;
conn.done();
continue;
}
}
log(1) << "writebacklisten result: " << result << endl;
BSONObj data = result.getObjectField( "data" );
if ( data.getBoolField( "writeBack" ) ){
string ns = data["ns"].valuestrsafe();
{
BSONElement e = data["id"];
if ( e.type() == jstOID )
lastID = e.OID();
}
int len;
Message m( (void*)data["msg"].binData( len ) , false );
massert( 10427 , "invalid writeback message" , m.header()->valid() );
DBConfigPtr db = grid.getDBConfig( ns );
ShardChunkVersion needVersion( data["version"] );
log(1) << "writeback id: " << lastID << " needVersion : " << needVersion.toString()
<< " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3)
if ( logLevel ) log(1) << debugString( m ) << endl;
if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ){
// this means when the write went originally, the version was old
// if we're here, it means we've already updated the config, so don't need to do again
//db->getChunkManager( ns , true ); // SERVER-1349
}
else {
// we received a writeback object that was sent to a previous version of a shard
// the actual shard may not have the object the writeback operation is for
// we need to reload the chunk manager and get the new shard versions
db->getChunkManager( ns , true );
}
Request r( m , 0 );
r.init();
r.process();
}
else if ( result["noop"].trueValue() ){
// no-op
}
else {
log() << "unknown writeBack result: " << result << endl;
}
conn.done();
secsToSleep = 0;
continue;
}
catch ( std::exception e ){
if ( inShutdown() ){
// we're shutting down, so just clean up
return;
}
log() << "WriteBackListener exception : " << e.what() << endl;
// It's possible this shard was removed
Shard::reloadShardInfo();
}
catch ( ... ){
log() << "WriteBackListener uncaught exception!" << endl;
}
secsToSleep++;
sleepsecs(secsToSleep);
if ( secsToSleep > 10 )
secsToSleep = 0;
}
log() << "WriteBackListener exiting : address no longer in cluster " << _addr;
//.........这里部分代码省略.........