本文整理汇总了C++中Timestamp::isNull方法的典型用法代码示例。如果您正苦于以下问题:C++ Timestamp::isNull方法的具体用法?C++ Timestamp::isNull怎么用?C++ Timestamp::isNull使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Timestamp
的用法示例。
在下文中一共展示了Timestamp::isNull方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _applyToEndOfOplog
void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
Timestamp oplogApplicationStartPoint,
Timestamp topOfOplog) {
invariant(!oplogApplicationStartPoint.isNull());
invariant(!topOfOplog.isNull());
// Check if we have any unapplied ops in our oplog. It is important that this is done after
// deleting the ragged end of the oplog.
if (oplogApplicationStartPoint == topOfOplog) {
log()
<< "No oplog entries to apply for recovery. appliedThrough is at the top of the oplog.";
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
severe() << "Applied op " << oplogApplicationStartPoint.toBSON()
<< " not found. Top of oplog is " << topOfOplog.toBSON() << '.';
fassertFailedNoTrace(40313);
}
log() << "Replaying stored operations from " << oplogApplicationStartPoint.toBSON()
<< " (exclusive) to " << topOfOplog.toBSON() << " (inclusive).";
DBDirectClient db(opCtx);
auto cursor = db.query(NamespaceString::kRsOplogNamespace.ns(),
QUERY("ts" << BSON("$gte" << oplogApplicationStartPoint)),
/*batchSize*/ 0,
/*skip*/ 0,
/*projection*/ nullptr,
QueryOption_OplogReplay);
// Check that the first document matches our appliedThrough point then skip it since it's
// already been applied.
if (!cursor->more()) {
// This should really be impossible because we check above that the top of the oplog is
// strictly > appliedThrough. If this fails it represents a serious bug in either the
// storage engine or query's implementation of OplogReplay.
severe() << "Couldn't find any entries in the oplog >= "
<< oplogApplicationStartPoint.toBSON() << " which should be impossible.";
fassertFailedNoTrace(40293);
}
auto firstTimestampFound =
fassertStatusOK(40291, OpTime::parseFromOplogEntry(cursor->nextSafe())).getTimestamp();
if (firstTimestampFound != oplogApplicationStartPoint) {
severe() << "Oplog entry at " << oplogApplicationStartPoint.toBSON()
<< " is missing; actual entry found is " << firstTimestampFound.toBSON();
fassertFailedNoTrace(40292);
}
// Apply remaining ops one at at time, but don't log them because they are already logged.
UnreplicatedWritesBlock uwb(opCtx);
while (cursor->more()) {
auto entry = cursor->nextSafe();
fassertStatusOK(40294,
SyncTail::syncApply(opCtx, entry, OplogApplication::Mode::kRecovering));
_consistencyMarkers->setAppliedThrough(
opCtx, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry)));
}
}
示例2: pushAllNonBlocking
void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn,
Batch::const_iterator begin,
Batch::const_iterator end) {
if (begin == end) {
return;
}
size_t numDocs = std::distance(begin, end);
Batch docsToInsert(numDocs);
Timestamp ts;
std::transform(begin, end, docsToInsert.begin(), [&ts](const Value& value) {
auto pair = addIdToDocument(value);
invariant(ts.isNull() || pair.second > ts);
ts = pair.second;
return pair.first;
});
stdx::lock_guard<stdx::mutex> lk(_mutex);
auto status = _storageInterface->insertDocuments(txn, _nss, docsToInsert);
fassertStatusOK(40161, status);
_lastPushedTimestamp = ts;
_count += numDocs;
_size += std::accumulate(begin, end, 0U, [](const size_t& docSize, const Value& value) {
return docSize + size_t(value.objsize());
});
_cvNoLongerEmpty.notify_all();
}
示例3: invariant
std::pair<BSONObj, Timestamp> OplogBufferCollection::addIdToDocument(const BSONObj& orig) {
invariant(!orig.isEmpty());
BSONObjBuilder bob;
Timestamp ts = orig["ts"].timestamp();
invariant(!ts.isNull());
bob.append("_id", ts);
bob.append(kOplogEntryFieldName, orig);
return std::pair<BSONObj, Timestamp> {bob.obj(), ts};
}
示例4: _recoverFromStableTimestamp
void ReplicationRecoveryImpl::_recoverFromStableTimestamp(OperationContext* opCtx,
Timestamp stableTimestamp,
OpTime appliedThrough,
OpTime topOfOplog) {
invariant(!stableTimestamp.isNull());
invariant(!topOfOplog.isNull());
const auto truncateAfterPoint = _consistencyMarkers->getOplogTruncateAfterPoint(opCtx);
log() << "Recovering from stable timestamp: " << stableTimestamp
<< " (top of oplog: " << topOfOplog << ", appliedThrough: " << appliedThrough
<< ", TruncateAfter: " << truncateAfterPoint << ")";
log() << "Starting recovery oplog application at the stable timestamp: " << stableTimestamp;
_applyToEndOfOplog(opCtx, stableTimestamp, topOfOplog.getTimestamp());
}
示例5: runQuery
//.........这里部分代码省略.........
// If we don't cache the executor later, we are deleting it, so it must be deregistered.
//
// So, no matter what, deregister the executor.
exec->deregisterExec();
// Caller expects exceptions thrown in certain cases.
if (PlanExecutor::FAILURE == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< Explain::statsToBSON(*stats);
uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
}
// TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
// chunk belonged on this node are gone. Separating chunk lifetime management from
// ClientCursor should allow this check to go away.
if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
// if the version changed during the query we might be missing some data and its safe to
// send this as mongos can resend at this point
throw SendStaleConfigException(nss.ns(), "version changed during initial query",
shardingVersionAtStart,
shardingState.getVersion(nss.ns()));
}
// Fill out curop based on query results. If we have a cursorid, we will fill out curop with
// this cursorid later.
long long ccId = 0;
if (shouldSaveCursor(txn, collection, state, exec.get())) {
// We won't use the executor until it's getMore'd.
exec->saveState();
// Allocate a new ClientCursor. We don't have to worry about leaking it as it's
// inserted into a global map by its ctor.
ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
pq.getOptions(),
pq.getFilter());
ccId = cc->cursorid();
if (txn->getClient()->isInDirectClient()) {
cc->setUnownedRecoveryUnit(txn->recoveryUnit());
}
else if (state == PlanExecutor::IS_EOF && pq.isTailable()) {
// Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
// next getMore.
}
else {
// We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
// getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
txn->recoveryUnit()->abandonSnapshot();
cc->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
OperationContext::kNotInUnitOfWork)
== OperationContext::kNotInUnitOfWork);
}
LOG(5) << "caching executor with cursorid " << ccId
<< " after returning " << numResults << " results" << endl;
// TODO document
if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
// TODO document
if (pq.isExhaust()) {
curop.debug().exhaust = true;
}
cc->setPos(numResults);
// If the query had a time limit, remaining time is "rolled over" to the cursor (for
// use by future getmore ops).
cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
endQueryOp(cc->getExecutor(), dbProfilingLevel, numResults, ccId, &curop);
}
else {
LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
endQueryOp(exec.get(), dbProfilingLevel, numResults, ccId, &curop);
}
// Add the results from the query into the output buffer.
result.appendData(bb.buf(), bb.len());
bb.decouple();
// Fill out the output buffer's header.
QueryResult::View qr = result.header().view2ptr();
qr.setCursorId(ccId);
qr.setResultFlagsToOk();
qr.msgdata().setOperation(opReply);
qr.setStartingFrom(0);
qr.setNReturned(numResults);
// curop.debug().exhaust is set above.
return curop.debug().exhaust ? nss.ns() : "";
}
示例6: getMore
//.........这里部分代码省略.........
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
// Propagate this error to caller.
if (PlanExecutor::FAILURE == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< Explain::statsToBSON(*stats);
uasserted(17406, "getMore executor error: " +
WorkingSetCommon::toStatusString(obj));
}
// In the old system tailable capped cursors would be killed off at the
// cursorid level. If a tailable capped cursor is nuked the cursorid
// would vanish.
//
// In the new system they die and are cleaned up later (or time out).
// So this is where we get to remove the cursorid.
if (0 == numResults) {
resultFlags = ResultFlag_CursorNotFound;
}
}
const bool shouldSaveCursor =
shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
// In order to deregister a cursor, we need to be holding the DB + collection lock and
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
}
// Our two possible ClientCursorPin cleanup paths are:
// 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
// 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
// this case, the pin's destructor will be invoked, which will call release() on the
// pin. Because our ClientCursorPin is declared after our lock is declared, this
// will happen under the lock.
if (!shouldSaveCursor) {
ruSwapper.reset();
ccPin.deleteUnderlying();
// cc is now invalid, as is the executor
cursorid = 0;
cc = NULL;
curop.debug().cursorExhausted = true;
LOG(5) << "getMore NOT saving client cursor, ended with state "
<< PlanExecutor::statestr(state)
<< endl;
}
else {
// Continue caching the ClientCursor.
cc->incPos(numResults);
exec->saveState();
LOG(5) << "getMore saving client cursor ended with state "
<< PlanExecutor::statestr(state)
<< endl;
if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
if (!txn->getClient()->isInDirectClient()) {
// Don't stash the RU. Get a new one on the next getMore.
ruSwapper->dismiss();
}
if ((queryOptions & QueryOption_AwaitData)
&& (numResults == 0)
&& (pass < 1000)) {
// Bubble up to the AwaitData handling code in receivedGetMore which will
// try again.
return NULL;
}
}
// Possibly note slave's position in the oplog.
if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
exhaust = (queryOptions & QueryOption_Exhaust);
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );
}
}
QueryResult::View qr = bb.buf();
qr.msgdata().setLen(bb.len());
qr.msgdata().setOperation(opReply);
qr.setResultFlags(resultFlags);
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
bb.decouple();
LOG(5) << "getMore returned " << numResults << " results\n";
return qr;
}
示例7: getMore
//.........这里部分代码省略.........
// Must get the version before we call generateBatch in case a write comes in after
// that call and before we call wait on the notifier.
notifierVersion = notifier->getVersion();
}
PlanExecutor* exec = cc->getExecutor();
exec->reattachToOperationContext(txn);
exec->restoreState();
PlanExecutor::ExecState state;
generateBatch(ntoreturn, cc, &bb, &numResults, &slaveReadTill, &state);
// If this is an await data cursor, and we hit EOF without generating any results, then
// we block waiting for new data to arrive.
if (isCursorAwaitData(cc) && state == PlanExecutor::IS_EOF && numResults == 0) {
// Save the PlanExecutor and drop our locks.
exec->saveState();
ctx.reset();
// Block waiting for data for up to 1 second.
Seconds timeout(1);
notifier->wait(notifierVersion, timeout);
notifier.reset();
// Set expected latency to match wait time. This makes sure the logs aren't spammed
// by awaitData queries that exceed slowms due to blocking on the CappedInsertNotifier.
curop.setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
// Reacquiring locks.
ctx = make_unique<AutoGetCollectionForRead>(txn, nss);
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
// way, attempt to generate another batch of results.
generateBatch(ntoreturn, cc, &bb, &numResults, &slaveReadTill, &state);
}
// We have to do this before re-acquiring locks in the agg case because
// shouldSaveCursorGetMore() can make a network call for agg cursors.
//
// TODO: Getting rid of PlanExecutor::isEOF() in favor of PlanExecutor::IS_EOF would mean
// that this network operation is no longer necessary.
const bool shouldSaveCursor = shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
// In order to deregister a cursor, we need to be holding the DB + collection lock and
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock = make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_IS);
unpinCollLock = make_unique<Lock::CollectionLock>(txn->lockState(), nss.ns(), MODE_IS);
}
// Our two possible ClientCursorPin cleanup paths are:
// 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
// 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
// this case, the pin's destructor will be invoked, which will call release() on the
// pin. Because our ClientCursorPin is declared after our lock is declared, this
// will happen under the lock.
if (!shouldSaveCursor) {
ccPin.deleteUnderlying();
// cc is now invalid, as is the executor
cursorid = 0;
cc = NULL;
curop.debug().cursorExhausted = true;
LOG(5) << "getMore NOT saving client cursor, ended with state "
<< PlanExecutor::statestr(state) << endl;
} else {
// Continue caching the ClientCursor.
cc->incPos(numResults);
exec->saveState();
exec->detachFromOperationContext();
LOG(5) << "getMore saving client cursor ended with state "
<< PlanExecutor::statestr(state) << endl;
// Possibly note slave's position in the oplog.
if ((cc->queryOptions() & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
*exhaust = cc->queryOptions() & QueryOption_Exhaust;
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
}
}
QueryResult::View qr = bb.buf();
qr.msgdata().setLen(bb.len());
qr.msgdata().setOperation(opReply);
qr.setResultFlags(resultFlags);
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
bb.decouple();
LOG(5) << "getMore returned " << numResults << " results\n";
return qr;
}
示例8: runQuery
//.........这里部分代码省略.........
// Count the result.
++numResults;
// Possibly note slave's position in the oplog.
if (pq.isOplogReplay()) {
BSONElement e = obj["ts"];
if (Date == e.type() || bsonTimestamp == e.type()) {
slaveReadTill = e.timestamp();
}
}
if (FindCommon::enoughForFirstBatch(pq, numResults)) {
LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
<< " ntoreturn=" << pq.getNToReturn().value_or(0) << " numResults=" << numResults
<< endl;
break;
}
}
// If we cache the executor later, we want to deregister it as it receives notifications
// anyway by virtue of being cached.
//
// If we don't cache the executor later, we are deleting it, so it must be deregistered.
//
// So, no matter what, deregister the executor.
exec->deregisterExec();
// Caller expects exceptions thrown in certain cases.
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
<< ", stats: " << Explain::getWinningPlanStats(exec.get());
uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
}
// Before saving the cursor, ensure that whatever plan we established happened with the expected
// collection version
auto css = CollectionShardingState::get(txn, nss);
css->checkShardVersionOrThrow(txn);
// Fill out curop based on query results. If we have a cursorid, we will fill out curop with
// this cursorid later.
long long ccId = 0;
if (shouldSaveCursor(txn, collection, state, exec.get())) {
// We won't use the executor until it's getMore'd.
exec->saveState();
exec->detachFromOperationContext();
// Allocate a new ClientCursor. We don't have to worry about leaking it as it's
// inserted into a global map by its ctor.
ClientCursor* cc =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
pq.getOptions(),
pq.getFilter());
ccId = cc->cursorid();
LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
<< " results" << endl;
// TODO document
if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
// TODO document
if (pq.isExhaust()) {
curop.debug().exhaust = true;
}
cc->setPos(numResults);
// If the query had a time limit, remaining time is "rolled over" to the cursor (for
// use by future getmore ops).
cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
endQueryOp(txn, collection, *cc->getExecutor(), dbProfilingLevel, numResults, ccId);
} else {
LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, ccId);
}
// Add the results from the query into the output buffer.
result.appendData(bb.buf(), bb.len());
bb.decouple();
// Fill out the output buffer's header.
QueryResult::View qr = result.header().view2ptr();
qr.setCursorId(ccId);
qr.setResultFlagsToOk();
qr.msgdata().setOperation(opReply);
qr.setStartingFrom(0);
qr.setNReturned(numResults);
// curop.debug().exhaust is set above.
return curop.debug().exhaust ? nss.ns() : "";
}
示例9: _applyToEndOfOplog
void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
const Timestamp& oplogApplicationStartPoint,
const Timestamp& topOfOplog) {
invariant(!oplogApplicationStartPoint.isNull());
invariant(!topOfOplog.isNull());
// Check if we have any unapplied ops in our oplog. It is important that this is done after
// deleting the ragged end of the oplog.
if (oplogApplicationStartPoint == topOfOplog) {
log() << "No oplog entries to apply for recovery. Start point is at the top of the oplog.";
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
severe() << "Applied op " << oplogApplicationStartPoint.toBSON()
<< " not found. Top of oplog is " << topOfOplog.toBSON() << '.';
fassertFailedNoTrace(40313);
}
log() << "Replaying stored operations from " << oplogApplicationStartPoint.toBSON()
<< " (exclusive) to " << topOfOplog.toBSON() << " (inclusive).";
OplogBufferLocalOplog oplogBuffer(oplogApplicationStartPoint);
oplogBuffer.startup(opCtx);
RecoveryOplogApplierStats stats;
auto writerPool = OplogApplier::makeWriterPool();
OplogApplier::Options options;
options.allowNamespaceNotFoundErrorsOnCrudOps = true;
options.skipWritesToOplog = true;
// During replication recovery, the stableTimestampForRecovery refers to the stable timestamp
// from which we replay the oplog.
// For startup recovery, this will be the recovery timestamp, which is the stable timestamp that
// the storage engine recovered to on startup. For rollback recovery, this will be the last
// stable timestamp, returned when we call recoverToStableTimestamp.
// We keep track of this for prepared transactions so that when we apply a commitTransaction
// oplog entry, we can check if it occurs before or after the stable timestamp and decide
// whether the operations would have already been reflected in the data.
options.stableTimestampForRecovery = oplogApplicationStartPoint;
OplogApplierImpl oplogApplier(nullptr,
&oplogBuffer,
&stats,
nullptr,
_consistencyMarkers,
_storageInterface,
options,
writerPool.get());
OplogApplier::BatchLimits batchLimits;
batchLimits.bytes = OplogApplier::calculateBatchLimitBytes(opCtx, _storageInterface);
batchLimits.ops = OplogApplier::getBatchLimitOperations();
OpTime applyThroughOpTime;
OplogApplier::Operations batch;
while (
!(batch = fassert(50763, oplogApplier.getNextApplierBatch(opCtx, batchLimits))).empty()) {
applyThroughOpTime = uassertStatusOK(oplogApplier.multiApply(opCtx, std::move(batch)));
}
stats.complete(applyThroughOpTime);
invariant(oplogBuffer.isEmpty(),
str::stream() << "Oplog buffer not empty after applying operations. Last operation "
"applied with optime: "
<< applyThroughOpTime.toBSON());
invariant(applyThroughOpTime.getTimestamp() == topOfOplog,
str::stream() << "Did not apply to top of oplog. Applied through: "
<< applyThroughOpTime.toString()
<< ". Top of oplog: "
<< topOfOplog.toString());
oplogBuffer.shutdown(opCtx);
// We may crash before setting appliedThrough. If we have a stable checkpoint, we will recover
// to that checkpoint at a replication consistent point, and applying the oplog is safe.
// If we don't have a stable checkpoint, then we must be in startup recovery, and not rollback
// recovery, because we only roll back to a stable timestamp when we have a stable checkpoint.
// Startup recovery from an unstable checkpoint only ever applies a single batch and it is safe
// to replay the batch from any point.
_consistencyMarkers->setAppliedThrough(opCtx, applyThroughOpTime);
}