本文整理汇总了C++中Timestamp::toStringLong方法的典型用法代码示例。如果您正苦于以下问题:C++ Timestamp::toStringLong方法的具体用法?C++ Timestamp::toStringLong怎么用?C++ Timestamp::toStringLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Timestamp
的用法示例。
在下文中一共展示了Timestamp::toStringLong方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: finder
StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
const OplogInterface& localOplog,
const OplogInterface& remoteOplog,
const RollBackLocalOperations::RollbackOperationFn& rollbackOperation) {
auto remoteIterator = remoteOplog.makeIterator();
auto remoteResult = remoteIterator->next();
if (!remoteResult.isOK()) {
return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
ErrorCodes::InvalidSyncSource, "remote oplog empty or unreadable");
}
RollBackLocalOperations finder(localOplog, rollbackOperation);
Timestamp theirTime;
while (remoteResult.isOK()) {
theirTime = remoteResult.getValue().first["ts"].timestamp();
BSONObj theirObj = remoteResult.getValue().first;
auto result = finder.onRemoteOperation(theirObj);
if (result.isOK()) {
return result.getValue();
} else if (result.getStatus().code() != ErrorCodes::NoSuchKey) {
return result;
}
remoteResult = remoteIterator->next();
}
severe() << "rollback error RS100 reached beginning of remote oplog";
log() << " them: " << remoteOplog.toString();
log() << " theirTime: " << theirTime.toStringLong();
return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
ErrorCodes::NoMatchingDocument, "RS100 reached beginning of remote oplog [1]");
}
示例2: syncFixUp
void syncFixUp(OperationContext* txn,
FixUpInfo& fixUpInfo,
OplogReader* oplogreader,
ReplicationCoordinator* replCoord) {
DBClientConnection* them = oplogreader->conn();
// fetch all first so we needn't handle interruption in a fancy way
unsigned long long totalSize = 0;
list< pair<DocID, BSONObj> > goodVersions;
BSONObj newMinValid;
// fetch all the goodVersions of each document from current primary
DocID doc;
unsigned long long numFetched = 0;
try {
for (set<DocID>::iterator it = fixUpInfo.toRefetch.begin();
it != fixUpInfo.toRefetch.end();
it++) {
doc = *it;
verify(!doc._id.eoo());
{
// TODO : slow. lots of round trips.
numFetched++;
BSONObj good = them->findOne(doc.ns, doc._id.wrap(),
NULL, QueryOption_SlaveOk).getOwned();
totalSize += good.objsize();
uassert(13410, "replSet too much data to roll back",
totalSize < 300 * 1024 * 1024);
// note good might be eoo, indicating we should delete it
goodVersions.push_back(pair<DocID, BSONObj>(doc,good));
}
}
newMinValid = oplogreader->getLastOp(rsOplogName);
if (newMinValid.isEmpty()) {
error() << "rollback error newMinValid empty?";
return;
}
}
catch (DBException& e) {
LOG(1) << "rollback re-get objects: " << e.toString();
error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' '
<< numFetched << '/' << fixUpInfo.toRefetch.size();
throw e;
}
log() << "rollback 3.5";
if (fixUpInfo.rbid != getRBID(oplogreader->conn())) {
// our source rolled back itself. so the data we received isn't necessarily consistent.
warning() << "rollback rbid on source changed during rollback, cancelling this attempt";
return;
}
// update them
log() << "rollback 4 n:" << goodVersions.size();
bool warn = false;
invariant(!fixUpInfo.commonPointOurDiskloc.isNull());
invariant(txn->lockState()->isW());
// we have items we are writing that aren't from a point-in-time. thus best not to come
// online until we get to that point in freshness.
Timestamp minValid = newMinValid["ts"].timestamp();
log() << "minvalid=" << minValid.toStringLong();
setMinValid(txn, minValid);
// any full collection resyncs required?
if (!fixUpInfo.collectionsToResyncData.empty()
|| !fixUpInfo.collectionsToResyncMetadata.empty()) {
for (const string& ns : fixUpInfo.collectionsToResyncData) {
log() << "rollback 4.1.1 coll resync " << ns;
fixUpInfo.collectionsToResyncMetadata.erase(ns);
const NamespaceString nss(ns);
Database* db = dbHolder().openDb(txn, nss.db().toString());
invariant(db);
{
WriteUnitOfWork wunit(txn);
db->dropCollection(txn, ns);
wunit.commit();
}
{
string errmsg;
// This comes as a GlobalWrite lock, so there is no DB to be acquired after
// resume, so we can skip the DB stability checks. Also
// copyCollectionFromRemote will acquire its own database pointer, under the
// appropriate locks, so just releasing and acquiring the lock is safe.
invariant(txn->lockState()->isW());
//.........这里部分代码省略.........