本文整理汇总了C++中OplogReader类的典型用法代码示例。如果您正苦于以下问题:C++ OplogReader类的具体用法?C++ OplogReader怎么用?C++ OplogReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OplogReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getMissingDoc
BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char* ns = o.getStringField("ns");
// capped collections
Collection* collection = db->getCollection(ns);
if (collection && collection->isCapped()) {
log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
return BSONObj();
}
const int retryMax = 3;
for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
if (retryCount != 1) {
// if we are retrying, sleep a bit to let the network possibly recover
sleepsecs(retryCount * retryCount);
}
try {
bool ok = missingObjReader.connect(HostAndPort(_hostname));
if (!ok) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
} catch (const SocketException&) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
// get _id from oplog entry to create query to fetch document.
const BSONElement opElem = o.getField("op");
const bool isUpdate = !opElem.eoo() && opElem.str() == "u";
const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"];
if (idElem.eoo()) {
severe() << "cannot fetch missing document without _id field: " << o.toString();
fassertFailedNoTrace(28742);
}
BSONObj query = BSONObjBuilder().append(idElem).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch (const SocketException&) {
warning() << "network problem detected while fetching a missing document from the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
} catch (DBException& e) {
error() << "assertion fetching missing object: " << e.what() << endl;
throw;
}
// success!
return missingObj;
}
// retry count exceeded
msgasserted(15916,
str::stream() << "Can no longer connect to initial sync source: " << _hostname);
}
示例2: getMissingDoc
BSONObj Sync::getMissingDoc(const BSONObj& o) {
OplogReader missingObjReader;
const char *ns = o.getStringField("ns");
// capped collections
NamespaceDetails *nsd = nsdetails(ns);
if ( nsd && nsd->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
}
uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch(DBException& e) {
log() << "replication assertion fetching missing object: " << e.what() << endl;
throw;
}
return missingObj;
}
示例3: isRollbackRequired
bool BackgroundSync::isRollbackRequired(OplogReader& r) {
string hn = r.conn()->getServerAddress();
if (!r.more()) {
// In vanilla Mongo, this happened for one of the
// following reasons:
// - we were ahead of what we are syncing from (don't
// think that is possible anymore)
// - remote oplog is empty for some weird reason
// in either case, if it (strangely) happens, we'll just return
// and our caller will simply try again after a short sleep.
log() << "replSet error empty query result from " << hn << " oplog, attempting rollback" << rsLog;
return true;
}
BSONObj o = r.nextSafe();
uint64_t ts = o["ts"]._numberLong();
uint64_t lastHash = o["h"].numberLong();
GTID gtid = getGTIDFromBSON("_id", o);
if( !theReplSet->gtidManager->rollbackNeeded(gtid, ts, lastHash)) {
log() << "Rollback NOT needed! Our GTID" << gtid << endl;
return false;
}
log() << "Rollback needed! Our GTID" <<
theReplSet->gtidManager->getLiveState().toString() <<
" remote GTID: " << gtid.toString() << ". Attempting rollback." << rsLog;
runRollback(r, ts);
return true;
}
示例4: isRollbackRequired
bool isRollbackRequired(OplogReader& r, uint64_t *lastTS) {
string hn = r.conn()->getServerAddress();
verify(r.more());
BSONObj rollbackStatus;
bool found = getRollbackStatus(rollbackStatus);
if (found) {
// we have a rollback in progress,
// must complete it
log() << "Rollback needed, found rollbackStatus: " << rollbackStatus << rsLog;
return true;
}
BSONObj o = r.nextSafe();
uint64_t ts = o["ts"]._numberLong();
uint64_t lastHash = o["h"].numberLong();
GTID gtid = getGTIDFromBSON("_id", o);
if (!theReplSet->gtidManager->rollbackNeeded(gtid, ts, lastHash)) {
log() << "Rollback NOT needed! " << gtid << endl;
return false;
}
log() << "Rollback needed! Our GTID: " <<
theReplSet->gtidManager->getLiveState().toString() <<
", remote GTID: " << gtid.toString() << ". Attempting rollback." << rsLog;
*lastTS = ts;
return true;
}
示例5: syncTail
void ReplSetImpl::syncTail() {
// todo : locking vis a vis the mgr...
const Member *primary = box.getPrimary();
if( primary == 0 ) return;
string hn = primary->h().toString();
OplogReader r;
if( !r.connect(primary->h().toString()) ) {
log(2) << "replSet can't connect to " << hn << " to read operations" << rsLog;
return;
}
/* first make sure we are not hopelessly out of sync by being very stale. */
{
BSONObj remoteOldestOp = r.findOne(rsoplog, Query());
OpTime ts = remoteOldestOp["ts"]._opTime();
DEV log() << "remoteOldestOp: " << ts.toStringPretty() << endl;
else log(3) << "remoteOldestOp: " << ts.toStringPretty() << endl;
if( lastOpTimeWritten < ts ) {
log() << "replSet error too stale to catch up, at least from primary " << hn << rsLog;
log() << "replSet our last optime : " << lastOpTimeWritten.toStringPretty() << rsLog;
log() << "replSet oldest at " << hn << " : " << ts.toStringPretty() << rsLog;
log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
sethbmsg("error too stale to catch up");
sleepsecs(120);
return;
}
}
示例6: getOplogReader
void BackgroundSync::getOplogReader(OplogReader& r) {
const Member *target = NULL, *stale = NULL;
BSONObj oldest;
{
boost::unique_lock<boost::mutex> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
_currentSyncTarget = NULL;
return;
}
// Wait until we've applied the ops we have before we choose a sync target
while (!_appliedBuffer) {
_condvar.wait(lock);
}
}
verify(r.conn() == NULL);
while ((target = theReplSet->getMemberToSyncTo()) != NULL) {
string current = target->fullName();
if (!r.connect(current)) {
LOG(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
r.resetConnection();
theReplSet->veto(current);
continue;
}
if (isStale(r, oldest)) {
r.resetConnection();
theReplSet->veto(current, 600);
stale = target;
continue;
}
// if we made it here, the target is up and not stale
{
boost::unique_lock<boost::mutex> lock(_mutex);
_currentSyncTarget = target;
}
return;
}
// the only viable sync target was stale
if (stale) {
theReplSet->goStale(stale, oldest);
sleepsecs(120);
}
{
boost::unique_lock<boost::mutex> lock(_mutex);
_currentSyncTarget = NULL;
}
}
示例7: getMissingDoc
BSONObj Sync::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char *ns = o.getStringField("ns");
// capped collections
Collection* collection = db->getCollection(ns);
if ( collection && collection->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
}
const int retryMax = 3;
for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
if (retryCount != 1) {
// if we are retrying, sleep a bit to let the network possibly recover
sleepsecs(retryCount * retryCount);
}
try {
bool ok = missingObjReader.connect(HostAndPort(hn));
if (!ok) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
}
catch (const SocketException&) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
}
catch (const SocketException&) {
warning() << "network problem detected while fetching a missing document from the "
<< "sync source, attempt " << retryCount << " of "
<< retryMax << endl;
continue; // try again
}
catch (DBException& e) {
log() << "replication assertion fetching missing object: " << e.what() << endl;
throw;
}
// success!
return missingObj;
}
// retry count exceeded
msgasserted(15916,
str::stream() << "Can no longer connect to initial sync source: " << hn);
}
示例8: isRollbackRequired
bool BackgroundSync::isRollbackRequired(OplogReader& r) {
string hn = r.conn()->getServerAddress();
if (!r.more()) {
try {
BSONObj theirLastOp = r.getLastOp(rsoplog);
if (theirLastOp.isEmpty()) {
log() << "replSet error empty query result from " << hn << " oplog" << rsLog;
sleepsecs(2);
return true;
}
OpTime theirTS = theirLastOp["ts"]._opTime();
if (theirTS < _lastOpTimeFetched) {
log() << "replSet we are ahead of the sync source, will try to roll back"
<< rsLog;
theReplSet->syncRollback(r);
return true;
}
/* we're not ahead? maybe our new query got fresher data. best to come back and try again */
log() << "replSet syncTail condition 1" << rsLog;
sleepsecs(1);
}
catch(DBException& e) {
log() << "replSet error querying " << hn << ' ' << e.toString() << rsLog;
sleepsecs(2);
}
return true;
}
BSONObj o = r.nextSafe();
OpTime ts = o["ts"]._opTime();
long long h = o["h"].numberLong();
if( ts != _lastOpTimeFetched || h != _lastH ) {
log() << "replSet our last op time fetched: " << _lastOpTimeFetched.toStringPretty() << rsLog;
log() << "replset source's GTE: " << ts.toStringPretty() << rsLog;
theReplSet->syncRollback(r);
return true;
}
return false;
}
示例9: _rollbackIfNeeded
bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
string hn = r.conn()->getServerAddress();
if (!r.more()) {
try {
BSONObj theirLastOp = r.getLastOp(rsOplogName.c_str());
if (theirLastOp.isEmpty()) {
error() << "empty query result from " << hn << " oplog";
sleepsecs(2);
return true;
}
OpTime theirOpTime = extractOpTime(theirLastOp);
if (theirOpTime < _lastOpTimeFetched) {
log() << "we are ahead of the sync source, will try to roll back";
syncRollback(txn, _replCoord->getMyLastOptime(), &r, _replCoord);
return true;
}
/* we're not ahead? maybe our new query got fresher data. best to come back and try again */
log() << "syncTail condition 1";
sleepsecs(1);
}
catch(DBException& e) {
error() << "querying " << hn << ' ' << e.toString();
sleepsecs(2);
}
return true;
}
BSONObj o = r.nextSafe();
OpTime opTime = extractOpTime(o);
long long hash = o["h"].numberLong();
if ( opTime != _lastOpTimeFetched || hash != _lastFetchedHash ) {
log() << "our last op time fetched: " << _lastOpTimeFetched;
log() << "source's GTE: " << opTime;
syncRollback(txn, _replCoord->getMyLastOptime(), &r, _replCoord);
return true;
}
return false;
}
示例10: getOplogReader
void BackgroundSync::getOplogReader(OplogReader& r) {
const Member *target = NULL, *stale = NULL;
BSONObj oldest;
verify(r.conn() == NULL);
while ((target = theReplSet->getMemberToSyncTo()) != NULL) {
string current = target->fullName();
if (!r.connect(current)) {
LOG(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
r.resetConnection();
theReplSet->veto(current);
continue;
}
// if we made it here, the target is up and not stale
{
boost::unique_lock<boost::mutex> lock(_mutex);
_currentSyncTarget = target;
}
return;
}
// the only viable sync target was stale
if (stale) {
GTID remoteOldestGTID = getGTIDFromBSON("_id", oldest);
theReplSet->goStale(stale, remoteOldestGTID);
// vanilla Mongo used to do a sleep of 120 seconds here
// We removed it. It seems excessive, and if this machine is doing
// nothing anyway, sleeping won't help. It might as well
// return with a null sync target, and produce() will handle
// that fact and sleep one second
}
{
boost::unique_lock<boost::mutex> lock(_mutex);
_currentSyncTarget = NULL;
}
}
示例11: copyOplogRefsRange
// Copy a range of documents to the local oplog.refs collection
static void copyOplogRefsRange(OplogReader &r, OID oid) {
shared_ptr<DBClientCursor> c = r.getOplogRefsCursor(oid);
Client::ReadContext ctx(rsOplogRefs);
while (c->more()) {
BSONObj b = c->next();
BSONElement eOID = b.getFieldDotted("_id.oid");
if (oid != eOID.OID()) {
break;
}
LOG(6) << "copyOplogRefsRange " << b << endl;
writeEntryToOplogRefs(b);
}
}
示例12: run
int run() {
Client::initThread( "oplogreplay" );
toolInfoLog() << "going to connect" << std::endl;
OplogReader r;
r.setTailingQueryOptions( QueryOption_SlaveOk | QueryOption_AwaitData );
r.connect(mongoOplogGlobalParams.from);
toolInfoLog() << "connected" << std::endl;
OpTime start(time(0) - mongoOplogGlobalParams.seconds, 0);
toolInfoLog() << "starting from " << start.toStringPretty() << std::endl;
r.tailingQueryGTE(mongoOplogGlobalParams.ns.c_str(), start);
int num = 0;
while ( r.more() ) {
BSONObj o = r.next();
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2))) {
toolInfoLog() << o << std::endl;
}
if ( o["$err"].type() ) {
toolError() << "error getting oplog" << std::endl;
toolError() << o << std::endl;
return -1;
}
bool print = ++num % 100000 == 0;
if (print) {
toolInfoLog() << num << "\t" << o << std::endl;
}
if ( o["op"].String() == "n" )
continue;
BSONObjBuilder b( o.objsize() + 32 );
BSONArrayBuilder updates( b.subarrayStart( "applyOps" ) );
updates.append( o );
updates.done();
BSONObj c = b.obj();
BSONObj res;
bool ok = conn().runCommand( "admin" , c , res );
if (!ok) {
toolError() << res << std::endl;
} else if (print) {
toolInfoLog() << res << std::endl;
}
}
return 0;
}
示例13: isStale
bool BackgroundSync::isStale(OpTime lastOpTimeFetched,
OplogReader& r,
BSONObj& remoteOldestOp) {
remoteOldestOp = r.findOne(rsoplog, Query());
OpTime remoteTs = remoteOldestOp["ts"]._opTime();
{
boost::unique_lock<boost::mutex> lock(_mutex);
if (lastOpTimeFetched >= remoteTs) {
return false;
}
log() << "replSet remoteOldestOp: " << remoteTs.toStringLong() << rsLog;
log() << "replSet lastOpTimeFetched: " << lastOpTimeFetched.toStringLong() << rsLog;
}
return true;
}
示例14: canStartRollback
bool canStartRollback(OplogReader& r, GTID idToRollbackTo) {
shared_ptr<DBClientConnection> conn(r.conn_shared());
// before we start rollback, let's make sure that the minUnapplied on the remote
// server is past the id that we are rolling back to. Otherwise, the snapshot
// we create will not be up to date, and the rollback algorithm will not work
BSONObjBuilder b;
b.append("_id", "minUnapplied");
// Note that another way to get this information is to
// request a heartbeat. That one will technically return
// a more up to date value for minUnapplied
BSONObj res = findOneFromConn(conn.get(), rsReplInfo, Query(b.done()));
GTID minUnapplied = getGTIDFromBSON("GTID", res);
if (GTID::cmp(minUnapplied, idToRollbackTo) < 0) {
log() << "Remote server has minUnapplied " << minUnapplied.toString() << \
" we want to rollback to " << idToRollbackTo.toString() << \
". Therefore, exiting and retrying." << rsLog;
return false;
}
return true;
}
示例15: runRollback
void BackgroundSync::runRollback(OplogReader& r, uint64_t oplogTS) {
// starting from ourLast, we need to read the remote oplog
// backwards until we find an entry in the remote oplog
// that has the same GTID, timestamp, and hash as
// what we have in our oplog. If we don't find one that is within
// some reasonable timeframe, then we go fatal
GTID ourLast = theReplSet->gtidManager->getLiveState();
GTID idToRollbackTo;
uint64_t rollbackPointTS = 0;
uint64_t rollbackPointHash = 0;
incRBID();
try {
shared_ptr<DBClientCursor> rollbackCursor = r.getRollbackCursor(ourLast);
while (rollbackCursor->more()) {
BSONObj remoteObj = rollbackCursor->next();
GTID remoteGTID = getGTIDFromBSON("_id", remoteObj);
uint64_t remoteTS = remoteObj["ts"]._numberLong();
uint64_t remoteLastHash = remoteObj["h"].numberLong();
if (remoteTS + 1800*1000 < oplogTS) {
log() << "Rollback takes us too far back, throwing exception. remoteTS: " << remoteTS << " oplogTS: " << oplogTS << rsLog;
throw RollbackOplogException("replSet rollback too long a time period for a rollback (at least 30 minutes).");
break;
}
//now try to find an entry in our oplog with that GTID
BSONObjBuilder localQuery;
BSONObj localObj;
addGTIDToBSON("_id", remoteGTID, localQuery);
bool foundLocally = false;
{
LOCK_REASON(lockReason, "repl: looking up oplog entry for rollback");
Client::ReadContext ctx(rsoplog, lockReason);
Client::Transaction transaction(DB_SERIALIZABLE);
foundLocally = Collection::findOne(rsoplog, localQuery.done(), localObj);
transaction.commit();
}
if (foundLocally) {
GTID localGTID = getGTIDFromBSON("_id", localObj);
uint64_t localTS = localObj["ts"]._numberLong();
uint64_t localLastHash = localObj["h"].numberLong();
if (localLastHash == remoteLastHash &&
localTS == remoteTS &&
GTID::cmp(localGTID, remoteGTID) == 0
)
{
idToRollbackTo = localGTID;
rollbackPointTS = localTS;
rollbackPointHash = localLastHash;
log() << "found id to rollback to " << idToRollbackTo << rsLog;
break;
}
}
}
// At this point, either we have found the point to try to rollback to,
// or we have determined that we cannot rollback
if (idToRollbackTo.isInitial()) {
// we cannot rollback
throw RollbackOplogException("could not find ID to rollback to");
}
}
catch (DBException& e) {
log() << "Caught DBException during rollback " << e.toString() << rsLog;
throw RollbackOplogException("DBException while trying to find ID to rollback to: " + e.toString());
}
catch (std::exception& e2) {
log() << "Caught std::exception during rollback " << e2.what() << rsLog;
throw RollbackOplogException(str::stream() << "Exception while trying to find ID to rollback to: " << e2.what());
}
// proceed with the rollback to point idToRollbackTo
// probably ought to grab a global write lock while doing this
// I don't think we want oplog cursors reading from this machine
// while we are rolling back. Or at least do something to protect against this
// first, let's get all the operations that are being applied out of the way,
// we don't want to rollback an item in the oplog while simultaneously,
// the applier thread is applying it to the oplog
{
boost::unique_lock<boost::mutex> lock(_mutex);
while (_deque.size() > 0) {
log() << "waiting for applier to finish work before doing rollback " << rsLog;
_queueDone.wait(lock);
}
verifySettled();
}
// now let's tell the system we are going to rollback, to do so,
// abort live multi statement transactions, invalidate cursors, and
// change the state to RS_ROLLBACK
{
// so we know nothing is simultaneously occurring
RWLockRecursive::Exclusive e(operationLock);
LOCK_REASON(lockReason, "repl: killing all operations for rollback");
Lock::GlobalWrite lk(lockReason);
ClientCursor::invalidateAllCursors();
Client::abortLiveTransactions();
theReplSet->goToRollbackState();
}
try {
// now that we are settled, we have to take care of the GTIDManager
//.........这里部分代码省略.........