本文整理汇总了C++中HostAndPort::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ HostAndPort::empty方法的具体用法?C++ HostAndPort::empty怎么用?C++ HostAndPort::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类HostAndPort
的用法示例。
在下文中一共展示了HostAndPort::empty方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _shouldChangeSyncSource
bool BackgroundSync::_shouldChangeSyncSource(const HostAndPort& syncSource) {
// is it even still around?
if (getSyncTarget().empty() || syncSource.empty()) {
return true;
}
// check other members: is any member's optime more than MaxSyncSourceLag seconds
// ahead of the current sync source?
return _replCoord->shouldChangeSyncSource(syncSource);
}
示例2: run
void SyncSourceFeedback::run() {
Client::initThread("SyncSourceFeedback");
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
while (true) { // breaks once _shutdownSignaled is true
{
stdx::unique_lock<stdx::mutex> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
if (_cond.wait_for(lock, _keepAliveInterval) == stdx::cv_status::timeout) {
break;
}
}
if (_shutdownSignaled) {
break;
}
_positionChanged = false;
}
auto txn = cc().makeOperationContext();
MemberState state = replCoord->getMemberState();
if (state.primary() || state.startup()) {
_resetConnection();
continue;
}
const HostAndPort target = BackgroundSync::get()->getSyncTarget();
if (_syncTarget != target) {
_resetConnection();
_syncTarget = target;
}
if (!hasConnection()) {
// fix connection if need be
if (target.empty()) {
sleepmillis(500);
stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
if (!_connect(txn.get(), target)) {
sleepmillis(500);
stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
}
Status status = updateUpstream(txn.get());
if (!status.isOK()) {
sleepmillis(500);
stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
}
}
}
示例3: uassert
Reporter::Reporter(ReplicationExecutor* executor,
ReplicationProgressManager* ReplicationProgressManager,
const HostAndPort& target)
: _executor(executor),
_updatePositionSource(ReplicationProgressManager),
_target(target),
_status(Status::OK()),
_willRunAgain(false),
_active(false) {
uassert(ErrorCodes::BadValue, "null replication executor", executor);
uassert(ErrorCodes::BadValue,
"null replication progress manager",
ReplicationProgressManager);
uassert(ErrorCodes::BadValue, "target name cannot be empty", !target.empty());
}
示例4: Status
StatusWith<HostAndPort> RemoteCommandTargeterRS::findHost(const ReadPreferenceSetting& readPref) {
if (!_rsMonitor) {
return Status(ErrorCodes::ReplicaSetNotFound,
str::stream() << "unknown replica set " << _rsName);
}
HostAndPort hostAndPort = _rsMonitor->getHostOrRefresh(readPref);
if (hostAndPort.empty()) {
if (readPref.pref == ReadPreference::PrimaryOnly) {
return Status(ErrorCodes::NotMaster,
str::stream() << "No master found for set " << _rsName);
}
return Status(ErrorCodes::FailedToSatisfyReadPreference,
str::stream() << "could not find host matching read preference "
<< readPref.toString() << " for set " << _rsName);
}
return hostAndPort;
}
示例5: _summarizeStatus
void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
vector<BSONObj> v;
const Member *_self = this->_self;
verify( _self );
MemberState myState = box.getState();
const HostAndPort syncTarget = BackgroundSync::get()->getSyncTarget();
// add self
{
BSONObjBuilder bb;
bb.append("_id", (int) _self->id());
bb.append("name", _self->fullName());
bb.append("health", 1.0);
bb.append("state", (int)myState.s);
bb.append("stateStr", myState.toString());
bb.append("uptime", (unsigned)(time(0) - serverGlobalParams.started));
if (!_self->config().arbiterOnly) {
bb.appendTimestamp("optime", lastOpTimeWritten.asDate());
bb.appendDate("optimeDate", lastOpTimeWritten.getSecs() * 1000LL);
}
int maintenance = _maintenanceMode;
if (maintenance) {
bb.append("maintenanceMode", maintenance);
}
if ( !syncTarget.empty() &&
(myState != MemberState::RS_PRIMARY) &&
(myState != MemberState::RS_REMOVED) ) {
bb.append("syncingTo", syncTarget.toString());
}
if (theReplSet) {
string s = theReplSet->hbmsg();
if( !s.empty() )
bb.append("infoMessage", s);
if (myState == MemberState::RS_PRIMARY) {
bb.appendTimestamp("electionTime", theReplSet->getElectionTime().asDate());
bb.appendDate("electionDate", theReplSet->getElectionTime().getSecs() * 1000LL);
}
}
bb.append("self", true);
v.push_back(bb.obj());
}
Member *m =_members.head();
while( m ) {
BSONObjBuilder bb;
bb.append("_id", (int) m->id());
bb.append("name", m->fullName());
double h = m->hbinfo().health;
bb.append("health", h);
bb.append("state", (int) m->state().s);
if( h == 0 ) {
// if we can't connect the state info is from the past and could be confusing to show
bb.append("stateStr", "(not reachable/healthy)");
}
else {
bb.append("stateStr", m->state().toString());
}
bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
if (!m->config().arbiterOnly) {
bb.appendTimestamp("optime", m->hbinfo().opTime.asDate());
bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL);
}
bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat);
bb.appendTimeT("lastHeartbeatRecv", m->hbinfo().lastHeartbeatRecv);
bb.append("pingMs", m->hbinfo().ping);
string s = m->lhb();
if( !s.empty() )
bb.append("lastHeartbeatMessage", s);
if (m->hbinfo().authIssue) {
bb.append("authenticated", false);
}
string syncingTo = m->hbinfo().syncingTo;
if (!syncingTo.empty()) {
bb.append("syncingTo", syncingTo);
}
if (m->state() == MemberState::RS_PRIMARY) {
bb.appendTimestamp("electionTime", m->hbinfo().electionTime.asDate());
bb.appendDate("electionDate", m->hbinfo().electionTime.getSecs() * 1000LL);
}
v.push_back(bb.obj());
m = m->next();
}
sort(v.begin(), v.end());
b.append("set", name());
b.appendTimeT("date", time(0));
b.append("myState", myState.s);
if ( !syncTarget.empty() &&
(myState != MemberState::RS_PRIMARY) &&
(myState != MemberState::RS_REMOVED) ) {
b.append("syncingTo", syncTarget.toString());
//.........这里部分代码省略.........
示例6: connectToSyncSource
void OplogReader::connectToSyncSource(OperationContext* txn,
const OpTime& lastOpTimeFetched,
ReplicationCoordinator* replCoord) {
const Timestamp sentinelTimestamp(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max());
OpTime oldestOpTimeSeen = sentinel;
invariant(conn() == NULL);
while (true) {
HostAndPort candidate = replCoord->chooseNewSyncSource(lastOpTimeFetched.getTimestamp());
if (candidate.empty()) {
if (oldestOpTimeSeen == sentinel) {
// If, in this invocation of connectToSyncSource(), we did not successfully
// connect to any node ahead of us,
// we apparently have no sync sources to connect to.
// This situation is common; e.g. if there are no writes to the primary at
// the moment.
return;
}
// Connected to at least one member, but in all cases we were too stale to use them
// as a sync source.
error() << "too stale to catch up";
log() << "our last optime : " << lastOpTimeFetched;
log() << "oldest available is " << oldestOpTimeSeen;
log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
setMinValid(txn, oldestOpTimeSeen);
bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!worked) {
warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
<< ". Current state: " << replCoord->getMemberState();
}
return;
}
if (!connect(candidate)) {
LOG(2) << "can't connect to " << candidate.toString() << " to read operations";
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10));
continue;
}
// Read the first (oldest) op and confirm that it's not newer than our last
// fetched op. Otherwise, we have fallen off the back of that source's oplog.
BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query()));
OpTime remoteOldOpTime = fassertStatusOK(28776, OpTime::parseFromBSON(remoteOldestOp));
// remoteOldOpTime may come from a very old config, so we cannot compare their terms.
if (!lastOpTimeFetched.isNull() &&
lastOpTimeFetched.getTimestamp() < remoteOldOpTime.getTimestamp()) {
// We're too stale to use this sync source.
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(1));
if (oldestOpTimeSeen.getTimestamp() > remoteOldOpTime.getTimestamp()) {
warning() << "we are too stale to use " << candidate.toString()
<< " as a sync source";
oldestOpTimeSeen = remoteOldOpTime;
}
continue;
}
// Got a valid sync source.
return;
} // while (true)
}
示例7: run
void SyncSourceFeedback::run() {
Client::initThread("SyncSourceFeedbackThread");
OperationContextImpl txn;
bool positionChanged = false;
bool handshakeNeeded = false;
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
while (!inShutdown()) { // TODO(spencer): Remove once legacy repl coordinator is gone.
{
boost::unique_lock<boost::mutex> lock(_mtx);
while (!_positionChanged && !_handshakeNeeded && !_shutdownSignaled) {
_cond.wait(lock);
}
if (_shutdownSignaled) {
break;
}
positionChanged = _positionChanged;
handshakeNeeded = _handshakeNeeded;
_positionChanged = false;
_handshakeNeeded = false;
if (handshakeNeeded) {
positionChanged = true; // Always update position after sending a handshake
}
}
MemberState state = replCoord->getCurrentMemberState();
if (state.primary() || state.startup()) {
_resetConnection();
continue;
}
const HostAndPort target = BackgroundSync::get()->getSyncTarget();
if (_syncTarget != target) {
_resetConnection();
_syncTarget = target;
}
if (!hasConnection()) {
// fix connection if need be
if (target.empty()) {
sleepmillis(500);
continue;
}
if (!_connect(&txn, target)) {
sleepmillis(500);
continue;
}
handshakeNeeded = true;
}
if (handshakeNeeded) {
if (!replHandshake(&txn)) {
boost::unique_lock<boost::mutex> lock(_mtx);
_handshakeNeeded = true;
continue;
}
}
if (positionChanged) {
Status status = updateUpstream(&txn);
if (!status.isOK()) {
boost::unique_lock<boost::mutex> lock(_mtx);
_positionChanged = true;
if (status == ErrorCodes::NodeNotFound) {
_handshakeNeeded = true;
}
}
}
}
cc().shutdown();
}
示例8: forceSyncFrom
Status ReplSetImpl::forceSyncFrom(const string& host, BSONObjBuilder* result) {
lock lk(this);
// initial sanity check
if (iAmArbiterOnly()) {
return Status(ErrorCodes::NotSecondary, "arbiters don't sync");
}
if (box.getState().primary()) {
return Status(ErrorCodes::NotSecondary, "primaries don't sync");
}
if (_self != NULL && host == _self->fullName()) {
return Status(ErrorCodes::InvalidOptions, "I cannot sync from myself");
}
// find the member we want to sync from
Member *newTarget = 0;
for (Member *m = _members.head(); m; m = m->next()) {
if (m->fullName() == host) {
newTarget = m;
break;
}
}
// do some more sanity checks
if (!newTarget) {
// this will also catch if someone tries to sync a member from itself, as _self is not
// included in the _members list.
return Status(ErrorCodes::NodeNotFound, "could not find member in replica set");
}
if (newTarget->config().arbiterOnly) {
return Status(ErrorCodes::InvalidOptions, "I cannot sync from an arbiter");
}
if (!newTarget->config().buildIndexes && myConfig().buildIndexes) {
return Status(ErrorCodes::InvalidOptions,
"I cannot sync from a member who does not build indexes");
}
if (newTarget->hbinfo().authIssue) {
return Status(ErrorCodes::Unauthorized,
"not authorized to communicate with " + newTarget->fullName());
}
if (newTarget->hbinfo().health == 0) {
return Status(ErrorCodes::HostUnreachable, "I cannot reach the requested member");
}
if (newTarget->hbinfo().opTime.getSecs()+10 < lastOpTimeWritten.getSecs()) {
log() << "attempting to sync from " << newTarget->fullName()
<< ", but its latest opTime is " << newTarget->hbinfo().opTime.getSecs()
<< " and ours is " << lastOpTimeWritten.getSecs() << " so this may not work"
<< rsLog;
result->append("warning", "requested member is more than 10 seconds behind us");
// not returning false, just warning
}
// record the previous member we were syncing from
const HostAndPort prev = BackgroundSync::get()->getSyncTarget();
if (!prev.empty()) {
result->append("prevSyncTarget", prev.toString());
}
// finally, set the new target
_forceSyncTarget = newTarget;
return Status::OK();
}
示例9: connectOplogReader
void BackgroundSync::connectOplogReader(OperationContext* txn,
ReplicationCoordinatorImpl* replCoordImpl,
OplogReader* reader) {
OpTime lastOpTimeFetched;
{
boost::unique_lock<boost::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
}
Date_t now(curTimeMillis64());
OpTime oldestOpTimeSeen(now,0);
while (true) {
HostAndPort candidate = replCoordImpl->chooseNewSyncSource();
if (candidate.empty()) {
if (oldestOpTimeSeen == OpTime(now,0)) {
// If, in this invocation of connectOplogReader(), we did not successfully
// connect to any node ahead of us,
// we apparently have no sync sources to connect to.
// This situation is common; e.g. if there are no writes to the primary at
// the moment.
return;
}
// Connected to at least one member, but in all cases we were too stale to use them
// as a sync source.
log() << "replSet error RS102 too stale to catch up" << rsLog;
log() << "replSet our last optime : " << lastOpTimeFetched.toStringLong() << rsLog;
log() << "replSet oldest available is " << oldestOpTimeSeen.toStringLong() <<
rsLog;
log() << "replSet "
"See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember"
<< rsLog;
sethbmsg("error RS102 too stale to catch up");
theReplSet->setMinValid(txn, oldestOpTimeSeen);
replCoordImpl->setFollowerMode(MemberState::RS_RECOVERING);
return;
}
if (!reader->connect(candidate)) {
LOG(2) << "replSet can't connect to " << candidate.toString() <<
" to read operations" << rsLog;
reader->resetConnection();
replCoordImpl->blacklistSyncSource(candidate, Date_t(curTimeMillis64() + 10*1000));
continue;
}
// Read the first (oldest) op and confirm that it's not newer than our last
// fetched op. Otherwise, we have fallen off the back of that source's oplog.
BSONObj remoteOldestOp(reader->findOne(rsoplog, Query()));
BSONElement tsElem(remoteOldestOp["ts"]);
if (tsElem.type() != Timestamp) {
// This member's got a bad op in its oplog.
warning() << "oplog invalid format on node " << candidate.toString();
reader->resetConnection();
replCoordImpl->blacklistSyncSource(candidate,
Date_t(curTimeMillis64() + 600*1000));
continue;
}
OpTime remoteOldOpTime = tsElem._opTime();
if (lastOpTimeFetched < remoteOldOpTime) {
// We're too stale to use this sync source.
reader->resetConnection();
replCoordImpl->blacklistSyncSource(candidate,
Date_t(curTimeMillis64() + 600*1000));
if (oldestOpTimeSeen > remoteOldOpTime) {
warning() << "we are too stale to use " << candidate.toString() <<
" as a sync source";
oldestOpTimeSeen = remoteOldOpTime;
}
continue;
}
// Got a valid sync source.
return;
} // while (true)
}
示例10: run
void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* bgsync) {
Client::initThread("SyncSourceFeedback");
HostAndPort syncTarget;
// keepAliveInterval indicates how frequently to forward progress in the absence of updates.
Milliseconds keepAliveInterval(0);
while (true) { // breaks once _shutdownSignaled is true
auto txn = cc().makeOperationContext();
if (keepAliveInterval == Milliseconds(0)) {
keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx);
}
{
// Take SyncSourceFeedback lock before calling into ReplicationCoordinator
// to avoid deadlock because ReplicationCoordinator could conceivably calling back into
// this class.
stdx::unique_lock<stdx::mutex> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
if (_cond.wait_for(lock, keepAliveInterval.toSystemDuration()) ==
stdx::cv_status::timeout) {
MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState();
if (!(state.primary() || state.startup())) {
break;
}
}
}
if (_shutdownSignaled) {
break;
}
_positionChanged = false;
}
{
stdx::lock_guard<stdx::mutex> lock(_mtx);
MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState();
if (state.primary() || state.startup()) {
continue;
}
}
const HostAndPort target = bgsync->getSyncTarget();
// Log sync source changes.
if (target.empty()) {
if (syncTarget != target) {
syncTarget = target;
}
// Loop back around again; the keepalive functionality will cause us to retry
continue;
}
if (syncTarget != target) {
LOG(1) << "setting syncSourceFeedback to " << target;
syncTarget = target;
// Update keepalive value from config.
auto oldKeepAliveInterval = keepAliveInterval;
keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx);
if (oldKeepAliveInterval != keepAliveInterval) {
LOG(1) << "new syncSourceFeedback keep alive duration = " << keepAliveInterval
<< " (previously " << oldKeepAliveInterval << ")";
}
}
Reporter reporter(
executor,
makePrepareReplSetUpdatePositionCommandFn(txn.get(), _mtx, syncTarget, bgsync),
syncTarget,
keepAliveInterval);
{
stdx::lock_guard<stdx::mutex> lock(_mtx);
_reporter = &reporter;
}
ON_BLOCK_EXIT([this]() {
stdx::lock_guard<stdx::mutex> lock(_mtx);
_reporter = nullptr;
});
auto status = _updateUpstream(txn.get(), bgsync);
if (!status.isOK()) {
LOG(1) << "The replication progress command (replSetUpdatePosition) failed and will be "
"retried: "
<< status;
}
}
}
示例11: connectToSyncSource
void OplogReader::connectToSyncSource(OperationContext* txn,
OpTime lastOpTimeFetched,
ReplicationCoordinator* replCoord) {
const OpTime sentinel(Milliseconds(curTimeMillis64()).total_seconds(), 0);
OpTime oldestOpTimeSeen = sentinel;
invariant(conn() == NULL);
while (true) {
HostAndPort candidate = replCoord->chooseNewSyncSource(lastOpTimeFetched);
if (candidate.empty()) {
if (oldestOpTimeSeen == sentinel) {
// If, in this invocation of connectToSyncSource(), we did not successfully
// connect to any node ahead of us,
// we apparently have no sync sources to connect to.
// This situation is common; e.g. if there are no writes to the primary at
// the moment.
return;
}
// Connected to at least one member, but in all cases we were too stale to use them
// as a sync source.
log() << "replSet error RS102 too stale to catch up";
log() << "replSet our last optime : " << lastOpTimeFetched.toStringLong();
log() << "replSet oldest available is " << oldestOpTimeSeen.toStringLong();
log() << "replSet "
"See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
setMinValid(txn, oldestOpTimeSeen);
bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!worked) {
warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
<< ". Current state: " << replCoord->getMemberState();
}
return;
}
if (!connect(candidate)) {
LOG(2) << "replSet can't connect to " << candidate.toString() << " to read operations";
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t(curTimeMillis64() + 10 * 1000));
continue;
}
// Read the first (oldest) op and confirm that it's not newer than our last
// fetched op. Otherwise, we have fallen off the back of that source's oplog.
BSONObj remoteOldestOp(findOne(rsoplog, Query()));
BSONElement tsElem(remoteOldestOp["ts"]);
if (tsElem.type() != Timestamp) {
// This member's got a bad op in its oplog.
warning() << "oplog invalid format on node " << candidate.toString();
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t(curTimeMillis64() + 600 * 1000));
continue;
}
OpTime remoteOldOpTime = tsElem._opTime();
if (!lastOpTimeFetched.isNull() && lastOpTimeFetched < remoteOldOpTime) {
// We're too stale to use this sync source.
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t(curTimeMillis64() + 600 * 1000));
if (oldestOpTimeSeen > remoteOldOpTime) {
warning() << "we are too stale to use " << candidate.toString()
<< " as a sync source";
oldestOpTimeSeen = remoteOldOpTime;
}
continue;
}
// Got a valid sync source.
return;
} // while (true)
}
示例12: connectToSyncSource
void OplogReader::connectToSyncSource(OperationContext* txn,
const OpTime& lastOpTimeFetched,
const OpTime& requiredOpTime,
ReplicationCoordinator* replCoord) {
const Timestamp sentinelTimestamp(duration_cast<Seconds>(Date_t::now().toDurationSinceEpoch()),
0);
const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max());
OpTime oldestOpTimeSeen = sentinel;
invariant(conn() == NULL);
while (true) {
HostAndPort candidate = replCoord->chooseNewSyncSource(lastOpTimeFetched);
if (candidate.empty()) {
if (oldestOpTimeSeen == sentinel) {
// If, in this invocation of connectToSyncSource(), we did not successfully
// connect to any node ahead of us,
// we apparently have no sync sources to connect to.
// This situation is common; e.g. if there are no writes to the primary at
// the moment.
return;
}
// Connected to at least one member, but in all cases we were too stale to use them
// as a sync source.
error() << "too stale to catch up -- entering maintenance mode";
log() << "our last optime : " << lastOpTimeFetched;
log() << "oldest available is " << oldestOpTimeSeen;
log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
auto status = replCoord->setMaintenanceMode(true);
if (!status.isOK()) {
warning() << "Failed to transition into maintenance mode: " << status;
}
bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!worked) {
warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
<< ". Current state: " << replCoord->getMemberState();
}
return;
}
if (!connect(candidate)) {
LOG(2) << "can't connect to " << candidate.toString() << " to read operations";
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10));
continue;
}
// Read the first (oldest) op and confirm that it's not newer than our last
// fetched op. Otherwise, we have fallen off the back of that source's oplog.
BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query()));
OpTime remoteOldOpTime =
fassertStatusOK(28776, OpTime::parseFromOplogEntry(remoteOldestOp));
// remoteOldOpTime may come from a very old config, so we cannot compare their terms.
if (!lastOpTimeFetched.isNull() &&
lastOpTimeFetched.getTimestamp() < remoteOldOpTime.getTimestamp()) {
// We're too stale to use this sync source.
resetConnection();
replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(1));
if (oldestOpTimeSeen.getTimestamp() > remoteOldOpTime.getTimestamp()) {
warning() << "we are too stale to use " << candidate.toString()
<< " as a sync source";
oldestOpTimeSeen = remoteOldOpTime;
}
continue;
}
// Check if sync source contains required optime.
if (!requiredOpTime.isNull()) {
// This query is structured so that it is executed on the sync source using the oplog
// start hack (oplogReplay=true and $gt/$gte predicate over "ts").
auto ts = requiredOpTime.getTimestamp();
tailingQuery(rsOplogName.c_str(), BSON("ts" << BSON("$gte" << ts << "$lte" << ts)));
auto status = _compareRequiredOpTimeWithQueryResponse(requiredOpTime);
if (!status.isOK()) {
const auto blacklistDuration = Seconds(60);
const auto until = Date_t::now() + blacklistDuration;
warning() << "We cannot use " << candidate.toString()
<< " as a sync source because it does not contain the necessary "
"operations for us to reach a consistent state: "
<< status << " last fetched optime: " << lastOpTimeFetched
<< ". required optime: " << requiredOpTime
<< ". Blacklisting this sync source for " << blacklistDuration
<< " until: " << until;
resetConnection();
replCoord->blacklistSyncSource(candidate, until);
continue;
}
resetCursor();
}
// TODO: If we were too stale (recovering with maintenance mode on), then turn it off, to
// allow becoming secondary/etc.
// Got a valid sync source.
return;
} // while (true)
}