本文整理汇总了C++中StatusWith::getValue方法的典型用法代码示例。如果您正苦于以下问题:C++ StatusWith::getValue方法的具体用法?C++ StatusWith::getValue怎么用?C++ StatusWith::getValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类StatusWith
的用法示例。
在下文中一共展示了StatusWith::getValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: step
Status NativeSaslAuthenticationSession::step(const StringData& inputData,
std::string* outputData) {
StatusWith<bool> status = _saslConversation->step(inputData, outputData);
if (status.isOK()) {
_done = status.getValue();
}
return status.getStatus();
}
示例2: td
TEST( RocksRecordStoreTest, Update1 ) {
unittest::TempDir td( _rocksRecordStoreTestDir );
scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
{
RocksRecordStore rs( "foo.bar", db.get(),
db->DefaultColumnFamily(),
db->DefaultColumnFamily() );
string s1 = "eliot1";
string s2 = "eliot2 and more";
DiskLoc loc;
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( opCtx.recoveryUnit() );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
s1.c_str(),
s1.size() + 1,
-1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
}
ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
}
{
MyOperationContext opCtx( db.get() );
{
WriteUnitOfWork uow( opCtx.recoveryUnit() );
StatusWith<DiskLoc> res = rs.updateRecord( &opCtx,
loc,
s2.c_str(),
s2.size() + 1,
-1,
NULL );
ASSERT_OK( res.getStatus() );
ASSERT( loc == res.getValue() );
}
ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
}
}
}
示例3: uow
// Insert multiple records and create a random iterator for the record store
TEST(RecordStoreTestHarness, GetRandomIteratorNonEmpty) {
unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
const unsigned nToInsert =
5000; // should be non-trivial amount, so we get multiple btree levels
RecordId locs[nToInsert];
for (unsigned i = 0; i < nToInsert; i++) {
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
stringstream ss;
ss << "record " << i;
string data = ss.str();
WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
ASSERT_OK(res.getStatus());
locs[i] = res.getValue();
uow.commit();
}
}
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
}
set<RecordId> remain(locs, locs + nToInsert);
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
auto cursor = rs->getRandomCursor(opCtx.get());
// returns NULL if getRandomCursor is not supported
if (!cursor) {
return;
}
// Iterate documents and mark those visited, but let at least one remain
for (unsigned i = 0; i < nToInsert - 1; i++) {
// Get a new cursor once in a while, shouldn't affect things
if (i % (nToInsert / 8) == 0) {
cursor = rs->getRandomCursor(opCtx.get());
}
remain.erase(cursor->next()->id); // can happen more than once per doc
}
ASSERT(!remain.empty());
ASSERT(cursor->next());
// We should have at least visited a quarter of the items if we're any random at all
// The expected fraction of visited records is 62.3%.
ASSERT_LT(remain.size(), nToInsert * 3 / 4);
}
}
示例4: _scheduleWithDistLock
void MigrationManager::_scheduleWithDistLock(OperationContext* txn,
const HostAndPort& targetHost,
Migration migration) {
const NamespaceString nss(migration.nss);
executor::TaskExecutor* const executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
stdx::unique_lock<stdx::mutex> lock(_mutex);
auto it = _activeMigrationsWithDistLock.find(nss);
if (it == _activeMigrationsWithDistLock.end()) {
// Acquire the collection distributed lock (blocking call)
auto distLockHandleStatus = acquireDistLock(txn, nss);
if (!distLockHandleStatus.isOK()) {
migration.completionNotification->set(distLockHandleStatus.getStatus());
return;
}
it = _activeMigrationsWithDistLock
.insert(std::make_pair(
nss, CollectionMigrationsState(std::move(distLockHandleStatus.getValue()))))
.first;
}
auto collectionMigrationState = &it->second;
// Add ourselves to the list of migrations on this collection
collectionMigrationState->migrations.push_front(std::move(migration));
auto itMigration = collectionMigrationState->migrations.begin();
const RemoteCommandRequest remoteRequest(
targetHost, NamespaceString::kAdminDb.toString(), itMigration->moveChunkCmdObj, txn);
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
[this, collectionMigrationState, itMigration](
const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
Client::initThread(getThreadName().c_str());
ON_BLOCK_EXIT([&] { Client::destroy(); });
auto txn = cc().makeOperationContext();
_completeWithDistLock(
txn.get(),
itMigration,
extractMigrationStatusFromRemoteCommandResponse(args.response));
});
if (callbackHandleWithStatus.isOK()) {
itMigration->callbackHandle = std::move(callbackHandleWithStatus.getValue());
return;
}
// The completion routine takes its own lock
lock.unlock();
_completeWithDistLock(txn, itMigration, std::move(callbackHandleWithStatus.getStatus()));
}
示例5: run
void UserCacheInvalidator::run() {
Client::initThread("UserCacheInvalidator");
lastInvalidationTime = Date_t::now();
while (true) {
stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
Date_t now = Date_t::now();
while (now < sleepUntil) {
invalidationIntervalChangedCondition.wait_for(lock, sleepUntil - now);
sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
now = Date_t::now();
}
lastInvalidationTime = now;
lock.unlock();
if (inShutdown()) {
break;
}
auto txn = cc().makeOperationContext();
StatusWith<OID> currentGeneration = getCurrentCacheGeneration(txn.get());
if (!currentGeneration.isOK()) {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
"this most likely means you are running an outdated version of mongod "
"on the config servers" << std::endl;
} else {
warning() << "An error occurred while fetching current user cache generation "
"to check if user cache needs invalidation: "
<< currentGeneration.getStatus() << std::endl;
}
// When in doubt, invalidate the cache
_authzManager->invalidateUserCache();
continue;
}
if (currentGeneration.getValue() != _previousCacheGeneration) {
log() << "User cache generation changed from " << _previousCacheGeneration << " to "
<< currentGeneration.getValue() << "; invalidating user cache" << std::endl;
_authzManager->invalidateUserCache();
_previousCacheGeneration = currentGeneration.getValue();
}
}
}
示例6: run
void UserCacheInvalidator::run() {
Client::initThread("UserCacheInvalidator");
auto interval = globalInvalidationInterval();
interval->start();
while (true) {
interval->wait();
if (globalInShutdownDeprecated()) {
break;
}
auto opCtx = cc().makeOperationContext();
StatusWith<OID> currentGeneration = getCurrentCacheGeneration(opCtx.get());
if (!currentGeneration.isOK()) {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
"this most likely means you are running an outdated version of mongod "
"on the config servers";
} else {
warning() << "An error occurred while fetching current user cache generation "
"to check if user cache needs invalidation: "
<< currentGeneration.getStatus();
}
// When in doubt, invalidate the cache
try {
_authzManager->invalidateUserCache(opCtx.get());
} catch (const DBException& e) {
warning() << "Error invalidating user cache: " << e.toStatus();
}
continue;
}
if (currentGeneration.getValue() != _previousCacheGeneration) {
log() << "User cache generation changed from " << _previousCacheGeneration << " to "
<< currentGeneration.getValue() << "; invalidating user cache";
try {
_authzManager->invalidateUserCache(opCtx.get());
} catch (const DBException& e) {
warning() << "Error invalidating user cache: " << e.toStatus();
}
_previousCacheGeneration = currentGeneration.getValue();
}
}
}
示例7: uow
TEST( RecordStoreTestHarness, UpdateInPlace1 ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
if (!rs->updateWithDamagesSupported())
return;
string s1 = "aaa111bbb";
string s2 = "aaa222bbb";
RecordId loc;
const RecordData s1Rec(s1.c_str(), s1.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
s1Rec.data(),
s1Rec.size(),
-1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() );
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
const char* damageSource = "222";
mutablebson::DamageVector dv;
dv.push_back( mutablebson::DamageEvent() );
dv[0].sourceOffset = 0;
dv[0].targetOffset = 3;
dv[0].size = 3;
Status res = rs->updateWithDamages( opCtx.get(),
loc,
s1Rec,
damageSource,
dv );
ASSERT_OK( res );
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() );
}
}
示例8: rec
// Insert a record and try to perform an in-place update on it with a DamageVector
// containing overlapping DamageEvents. The changes should be applied in the order
// specified by the DamageVector, and not -- for instance -- by the targetOffset.
TEST( RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
}
string data = "00010111";
DiskLoc loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
mutablebson::DamageVector dv( 2 );
dv[0].sourceOffset = 0;
dv[0].targetOffset = 3;
dv[0].size = 5;
dv[1].sourceOffset = 3;
dv[1].targetOffset = 0;
dv[1].size = 5;
WriteUnitOfWork uow( opCtx.get() );
ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) );
uow.commit();
}
}
data = "10111010";
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
RecordData record = rs->dataFor( opCtx.get(), loc );
ASSERT_EQUALS( data, record.data() );
}
}
}
示例9: getFunction
StatusWithFunc SharedLibrary::getFunction(StringData name) {
StatusWith<void*> s = getSymbol(name);
if (!s.isOK()) {
return StatusWithFunc(s.getStatus());
}
return StatusWithFunc(reinterpret_cast<void (*)()>(s.getValue()));
}
示例10: oplogDiskLocRegister
Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
StatusWith<RecordId> loc = oploghack::keyForOptime(opTime);
if (!loc.isOK())
return loc.getStatus();
stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
_addUncommitedDiskLoc_inlock(txn, loc.getValue());
return Status::OK();
}
示例11: uow
TEST(RocksRecordStoreTest, Isolation1 ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
RecordId loc1;
RecordId loc2;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc2 = res.getValue();
uow.commit();
}
}
{
scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
scoped_ptr<WriteUnitOfWork> w2( new WriteUnitOfWork( t2.get() ) );
rs->dataFor( t1.get(), loc1 );
rs->dataFor( t2.get(), loc1 );
ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() );
ASSERT_OK( rs->updateRecord( t1.get(), loc2, "B", 2, false, NULL ).getStatus() );
// this should throw
ASSERT_THROWS(rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL),
WriteConflictException);
w1->commit(); // this should succeed
}
}
示例12: request
StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAddShard(
OperationContext* txn,
RemoteCommandTargeter* targeter,
const std::string& dbName,
const BSONObj& cmdObj) {
auto host = targeter->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly},
RemoteCommandTargeter::selectFindHostMaxWaitTime(txn));
if (!host.isOK()) {
return host.getStatus();
}
executor::RemoteCommandRequest request(
host.getValue(), dbName, cmdObj, rpc::makeEmptyMetadata(), Seconds(30));
StatusWith<executor::RemoteCommandResponse> swResponse =
Status(ErrorCodes::InternalError, "Internal error running command");
auto callStatus = _executorForAddShard->scheduleRemoteCommand(
request, [&swResponse](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
swResponse = args.response;
});
if (!callStatus.isOK()) {
return callStatus.getStatus();
}
// Block until the command is carried out
_executorForAddShard->wait(callStatus.getValue());
if (!swResponse.isOK()) {
if (swResponse.getStatus().compareCode(ErrorCodes::ExceededTimeLimit)) {
LOG(0) << "Operation for addShard timed out with status " << swResponse.getStatus();
}
return swResponse.getStatus();
}
BSONObj responseObj = swResponse.getValue().data.getOwned();
BSONObj responseMetadata = swResponse.getValue().metadata.getOwned();
Status commandStatus = getStatusFromCommandResult(responseObj);
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj);
return Shard::CommandResponse(std::move(responseObj),
std::move(responseMetadata),
std::move(commandStatus),
std::move(writeConcernStatus));
}
示例13: uow
TEST(RecordStoreTestHarness, Simple1) {
unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
string s = "eliot was here";
RecordId loc1;
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
ASSERT_OK(res.getStatus());
loc1 = res.getValue();
uow.commit();
}
ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data());
}
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data());
ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
RecordData rd;
ASSERT(!rs->findRecord(opCtx.get(), RecordId(111, 17), &rd));
ASSERT(rd.data() == NULL);
ASSERT(rs->findRecord(opCtx.get(), loc1, &rd));
ASSERT_EQUALS(s, rd.data());
}
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
ASSERT_OK(res.getStatus());
uow.commit();
}
}
{
unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(2, rs->numRecords(opCtx.get()));
}
}
示例14: insertKeys
Status AbstractIndexAccessMethod::insertKeys(OperationContext* opCtx,
const BSONObjSet& keys,
const BSONObjSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
const RecordId& loc,
const InsertDeleteOptions& options,
InsertResult* result) {
bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx);
// Add all new data keys, and all new multikey metadata keys, into the index. When iterating
// over the data keys, each of them should point to the doc's RecordId. When iterating over
// the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'.
for (const auto keySet : {&keys, &multikeyMetadataKeys}) {
const auto& recordId = (keySet == &keys ? loc : kMultikeyMetadataKeyId);
for (const auto& key : *keySet) {
Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK();
if (status.isOK()) {
bool unique = _descriptor->unique();
StatusWith<SpecialFormatInserted> ret =
_newInterface->insert(opCtx, key, recordId, !unique /* dupsAllowed */);
status = ret.getStatus();
// When duplicates are encountered and allowed, retry with dupsAllowed. Add the
// key to the output vector so callers know which duplicate keys were inserted.
if (ErrorCodes::DuplicateKey == status.code() && options.dupsAllowed) {
invariant(unique);
ret = _newInterface->insert(opCtx, key, recordId, true /* dupsAllowed */);
status = ret.getStatus();
// This is speculative in that the 'dupsInserted' vector is not used by any code
// today. It is currently in place to test detecting duplicate key errors during
// hybrid index builds. Duplicate detection in the future will likely not take
// place in this insert() method.
if (status.isOK() && result) {
result->dupsInserted.push_back(key);
}
}
if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted)
_btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx);
}
if (isFatalError(opCtx, status, key)) {
return status;
}
}
}
if (result) {
result->numInserted += keys.size() + multikeyMetadataKeys.size();
}
if (shouldMarkIndexAsMultikey(keys, multikeyMetadataKeys, multikeyPaths)) {
_btreeState->setMultikey(opCtx, multikeyPaths);
}
return Status::OK();
}
示例15: request
StatusWith<ShardRegistry::CommandResponse> ShardRegistry::_runCommandWithMetadata(
TaskExecutor* executor,
const std::shared_ptr<Shard>& shard,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
const BSONObj& metadata) {
auto targeter = shard->getTargeter();
auto host = targeter->findHost(readPref);
if (!host.isOK()) {
return host.getStatus();
}
executor::RemoteCommandRequest request(
host.getValue(), dbName, cmdObj, metadata, kConfigCommandTimeout);
StatusWith<executor::RemoteCommandResponse> responseStatus =
Status(ErrorCodes::InternalError, "Internal error running command");
auto callStatus =
executor->scheduleRemoteCommand(request,
[&responseStatus](const RemoteCommandCallbackArgs& args) {
responseStatus = args.response;
});
if (!callStatus.isOK()) {
return callStatus.getStatus();
}
// Block until the command is carried out
executor->wait(callStatus.getValue());
updateReplSetMonitor(targeter, host.getValue(), responseStatus.getStatus());
if (!responseStatus.isOK()) {
return responseStatus.getStatus();
}
auto response = responseStatus.getValue();
updateReplSetMonitor(targeter, host.getValue(), getStatusFromCommandResult(response.data));
CommandResponse cmdResponse;
cmdResponse.response = response.data;
cmdResponse.metadata = response.metadata;
if (response.metadata.hasField(rpc::kReplSetMetadataFieldName)) {
auto replParseStatus = rpc::ReplSetMetadata::readFromMetadata(response.metadata);
if (!replParseStatus.isOK()) {
return replParseStatus.getStatus();
}
const auto& replMetadata = replParseStatus.getValue();
cmdResponse.visibleOpTime = replMetadata.getLastOpVisible();
}
return cmdResponse;
}