当前位置: 首页>>代码示例>>C++>>正文


C++ BatchedCommandRequest::getBatchType方法代码示例

本文整理汇总了C++中BatchedCommandRequest::getBatchType方法的典型用法代码示例。如果您正苦于以下问题:C++ BatchedCommandRequest::getBatchType方法的具体用法?C++ BatchedCommandRequest::getBatchType怎么用?C++ BatchedCommandRequest::getBatchType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在BatchedCommandRequest的用法示例。


在下文中一共展示了BatchedCommandRequest::getBatchType方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: bulkExecute

    void WriteBatchExecutor::bulkExecute( const BatchedCommandRequest& request,
                                          std::vector<BatchedUpsertDetail*>* upsertedIds,
                                          std::vector<WriteErrorDetail*>* errors ) {

        if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert ) {
            execInserts( request, errors );
        }
        else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) {
            for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {

                WriteErrorDetail* error = NULL;
                BSONObj upsertedId;
                execUpdate( BatchItemRef( &request, i ), &upsertedId, &error );

                if ( !upsertedId.isEmpty() ) {
                    BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail;
                    batchUpsertedId->setIndex( i );
                    batchUpsertedId->setUpsertedID( upsertedId );
                    upsertedIds->push_back( batchUpsertedId );
                }

                if ( error ) {
                    errors->push_back( error );
                    if ( request.getOrdered() )
                        break;
                }
            }
        }
        else {
            dassert( request.getBatchType() == BatchedCommandRequest::BatchType_Delete );
            for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {

                WriteErrorDetail* error = NULL;
                execRemove( BatchItemRef( &request, i ), &error );

                if ( error ) {
                    errors->push_back( error );
                    if ( request.getOrdered() )
                        break;
                }
            }
        }

        // Fill in stale version errors for unordered batches (update/delete can't do this on own)
        if ( !errors->empty() && !request.getOrdered() ) {

            const WriteErrorDetail* finalError = errors->back();

            if ( finalError->getErrCode() == ErrorCodes::StaleShardVersion ) {
                for ( size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++ ) {
                    WriteErrorDetail* dupStaleError = new WriteErrorDetail;
                    finalError->cloneTo( dupStaleError );
                    errors->push_back( dupStaleError );
                }
            }
        }
    }
开发者ID:AndrewCEmil,项目名称:mongo,代码行数:57,代码来源:batch_executor.cpp

示例2: containsNoIDUpsert

bool BatchedCommandRequest::containsNoIDUpsert(const BatchedCommandRequest& request) {
    if (request.getBatchType() != BatchedCommandRequest::BatchType_Update)
        return false;

    const vector<BatchedUpdateDocument*>& updates = request.getUpdateRequest()->getUpdates();

    for (vector<BatchedUpdateDocument*>::const_iterator it = updates.begin(); it != updates.end();
         ++it) {
        const BatchedUpdateDocument* updateDoc = *it;
        if (updateDoc->getUpsert() && updateDoc->getQuery()["_id"].eoo())
            return true;
    }

    return false;
}
开发者ID:qihsh,项目名称:mongo,代码行数:15,代码来源:batched_command_request.cpp

示例3: executeBatch


//.........这里部分代码省略.........

        if ( staleBatch ) {

            const BatchedRequestMetadata* requestMetadata = request.getMetadata();
            dassert( requestMetadata );

            // Make sure our shard name is set or is the same as what was set previously
            if ( shardingState.setShardName( requestMetadata->getShardName() ) ) {

                //
                // First, we refresh metadata if we need to based on the requested version.
                //

                ChunkVersion latestShardVersion;
                shardingState.refreshMetadataIfNeeded( request.getTargetingNS(),
                                                       requestMetadata->getShardVersion(),
                                                       &latestShardVersion );

                // Report if we're still changing our metadata
                // TODO: Better reporting per-collection
                if ( shardingState.inCriticalMigrateSection() ) {
                    noteInCriticalSection( writeErrors.back() );
                }

                if ( queueForMigrationCommit ) {

                    //
                    // Queue up for migration to end - this allows us to be sure that clients will
                    // not repeatedly try to refresh metadata that is not yet written to the config
                    // server.  Not necessary for correctness.
                    // Exposed as optional parameter to allow testing of queuing behavior with
                    // different network timings.
                    //

                    const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();

                    //
                    // Only wait if we're an older version (in the current collection epoch) and
                    // we're not write compatible, implying that the current migration is affecting
                    // writes.
                    //

                    if ( requestShardVersion.isOlderThan( latestShardVersion ) &&
                         !requestShardVersion.isWriteCompatibleWith( latestShardVersion ) ) {

                        while ( shardingState.inCriticalMigrateSection() ) {

                            log() << "write request to old shard version "
                                  << requestMetadata->getShardVersion().toString()
                                  << " waiting for migration commit" << endl;

                            shardingState.waitTillNotInCriticalSection( 10 /* secs */);
                        }
                    }
                }
            }
            else {
                // If our shard name is stale, our version must have been stale as well
                dassert( writeErrors.size() == request.sizeWriteOps() );
            }
        }

        //
        // Construct response
        //

        response->setOk( true );

        if ( !silentWC ) {

            if ( upserted.size() ) {
                response->setUpsertDetails( upserted );
            }

            if ( writeErrors.size() ) {
                response->setErrDetails( writeErrors );
            }

            if ( wcError.get() ) {
                response->setWriteConcernError( wcError.release() );
            }

            const repl::ReplicationCoordinator::Mode replMode =
                    repl::getGlobalReplicationCoordinator()->getReplicationMode();
            if (replMode != repl::ReplicationCoordinator::modeNone) {
                response->setLastOp( _client->getLastOp() );
                if (replMode == repl::ReplicationCoordinator::modeReplSet) {
                    response->setElectionId(repl::theReplSet->getElectionId());
                }
            }

            // Set the stats for the response
            response->setN( _stats->numInserted + _stats->numUpserted + _stats->numMatched
                            + _stats->numDeleted );
            if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update )
                response->setNModified( _stats->numModified );
        }

        dassert( response->isValid( NULL ) );
    }
开发者ID:AndrewCEmil,项目名称:mongo,代码行数:101,代码来源:batch_executor.cpp

示例4: executeBatch


//.........这里部分代码省略.........
                    // If the batch is NULL, we sent it previously, so skip
                    if ( nextBatch == NULL ) continue;

                    // Figure out what host we need to dispatch our targeted batch
                    ConnectionString shardHost;
                    Status resolveStatus = _resolver->chooseWriteHost( nextBatch->getEndpoint()
                                                                           .shardName,
                                                                       &shardHost );
                    if ( !resolveStatus.isOK() ) {

                        ++_stats->numResolveErrors;

                        // Record a resolve failure
                        // TODO: It may be necessary to refresh the cache if stale, or maybe just
                        // cancel and retarget the batch
                        WriteErrorDetail error;
                        buildErrorFrom( resolveStatus, &error );
                        batchOp.noteBatchError( *nextBatch, error );

                        // We're done with this batch
                        *it = NULL;
                        --numToSend;
                        continue;
                    }

                    // If we already have a batch for this host, wait until the next time
                    HostBatchMap::iterator pendingIt = pendingBatches.find( shardHost );
                    if ( pendingIt != pendingBatches.end() ) continue;

                    //
                    // We now have all the info needed to dispatch the batch
                    //

                    BatchedCommandRequest request( clientRequest.getBatchType() );
                    batchOp.buildBatchRequest( *nextBatch, &request );

                    // Internally we use full namespaces for request/response, but we send the
                    // command to a database with the collection name in the request.
                    NamespaceString nss( request.getNS() );
                    request.setNS( nss.coll() );

                    _dispatcher->addCommand( shardHost, nss.db(), request );

                    // Indicate we're done by setting the batch to NULL
                    // We'll only get duplicate hostEndpoints if we have broadcast and non-broadcast
                    // endpoints for the same host, so this should be pretty efficient without
                    // moving stuff around.
                    *it = NULL;

                    // Recv-side is responsible for cleaning up the nextBatch when used
                    pendingBatches.insert( make_pair( shardHost, nextBatch ) );
                }

                // Send them all out
                _dispatcher->sendAll();
                numSent += pendingBatches.size();

                //
                // Recv side
                //

                while ( _dispatcher->numPending() > 0 ) {

                    // Get the response
                    ConnectionString shardHost;
                    BatchedCommandResponse response;
开发者ID:504com,项目名称:mongo,代码行数:67,代码来源:batch_write_exec.cpp

示例5: executeBatch

    void BatchWriteExec::executeBatch( const BatchedCommandRequest& clientRequest,
                                       BatchedCommandResponse* clientResponse ) {

        BatchWriteOp batchOp;
        batchOp.initClientRequest( &clientRequest );

        int numTargetErrors = 0;
        int numStaleBatches = 0;

        for ( int rounds = 0; !batchOp.isFinished(); rounds++ ) {

            //
            // Refresh the targeter if we need to (no-op if nothing stale)
            //

            Status refreshStatus = _targeter->refreshIfNeeded();

            if ( !refreshStatus.isOK() ) {

                // It's okay if we can't refresh, we'll just record errors for the ops if
                // needed.
                warning() << "could not refresh targeter" << causedBy( refreshStatus.reason() )
                          << endl;
            }

            //
            // Get child batches to send
            //

            vector<TargetedWriteBatch*> childBatches;

            //
            // Targeting errors can be caused by remote metadata changing (the collection could have
            // been dropped and recreated, for example with a new shard key).  If a remote metadata
            // change occurs *before* a client sends us a batch, we need to make sure that we don't
            // error out just because we're staler than the client - otherwise mongos will be have
            // unpredictable behavior.
            //
            // (If a metadata change happens *during* or *after* a client sends us a batch, however,
            // we make no guarantees about delivery.)
            //
            // For this reason, we don't record targeting errors until we've refreshed our targeting
            // metadata at least once *after* receiving the client batch - at that point, we know:
            //
            // 1) our new metadata is the same as the metadata when the client sent a batch, and so
            //    targeting errors are real.
            // OR
            // 2) our new metadata is a newer version than when the client sent a batch, and so
            //    the metadata must have changed after the client batch was sent.  We don't need to
            //    deliver in this case, since for all the client knows we may have gotten the batch
            //    exactly when the metadata changed.
            //
            // If we've had a targeting error or stale error, we've refreshed the metadata once and
            // can record target errors.
            bool recordTargetErrors = numTargetErrors > 0 || numStaleBatches > 0;

            Status targetStatus = batchOp.targetBatch( *_targeter,
                                                       recordTargetErrors,
                                                       &childBatches );
            if ( !targetStatus.isOK() ) {
                _targeter->noteCouldNotTarget();
                ++numTargetErrors;
                continue;
            }

            //
            // Send all child batches
            //

            size_t numSent = 0;
            while ( numSent != childBatches.size() ) {

                // Collect batches out on the network, mapped by endpoint
                EndpointBatchMap pendingBatches;

                //
                // Send side
                //

                // Get as many batches as we can at once
                for ( vector<TargetedWriteBatch*>::iterator it = childBatches.begin();
                    it != childBatches.end(); ++it ) {

                    TargetedWriteBatch* nextBatch = *it;
                    // If the batch is NULL, we sent it previously, so skip
                    if ( nextBatch == NULL ) continue;
                    const ConnectionString& hostEndpoint = nextBatch->getEndpoint().shardHost;

                    EndpointBatchMap::iterator pendingIt = pendingBatches.find( &hostEndpoint );

                    // If we already have a batch for this endpoint, continue
                    if ( pendingIt != pendingBatches.end() ) continue;

                    // Otherwise send it out to the endpoint via a command to a database

                    BatchedCommandRequest request( clientRequest.getBatchType() );
                    batchOp.buildBatchRequest( *nextBatch, &request );

                    // Internally we use full namespaces for request/response, but we send the
                    // command to a database with the collection name in the request.
//.........这里部分代码省略.........
开发者ID:ChrisKozak,项目名称:mongo,代码行数:101,代码来源:batch_write_exec.cpp

示例6: batchErrorToLastError

bool batchErrorToLastError(const BatchedCommandRequest& request,
                           const BatchedCommandResponse& response,
                           LastError* error) {
    unique_ptr<WriteErrorDetail> commandError;
    WriteErrorDetail* lastBatchError = NULL;

    if (!response.getOk()) {
        // Command-level error, all writes failed

        commandError.reset(new WriteErrorDetail);
        buildErrorFromResponse(response, commandError.get());
        lastBatchError = commandError.get();
    } else if (response.isErrDetailsSet()) {
        // The last error in the batch is always reported - this matches expected COE
        // semantics for insert batches. For updates and deletes, error is only reported
        // if the error was on the last item.

        const bool lastOpErrored = response.getErrDetails().back()->getIndex() ==
            static_cast<int>(request.sizeWriteOps() - 1);
        if (request.getBatchType() == BatchedCommandRequest::BatchType_Insert || lastOpErrored) {
            lastBatchError = response.getErrDetails().back();
        }
    } else {
        // We don't care about write concern errors, these happen in legacy mode in GLE.
    }

    // Record an error if one exists
    if (lastBatchError) {
        string errMsg = lastBatchError->getErrMessage();
        error->setLastError(lastBatchError->getErrCode(),
                            errMsg.empty() ? "see code for details" : errMsg.c_str());
        return true;
    }

    // Record write stats otherwise
    // NOTE: For multi-write batches, our semantics change a little because we don't have
    // un-aggregated "n" stats.
    if (request.getBatchType() == BatchedCommandRequest::BatchType_Update) {
        BSONObj upsertedId;
        if (response.isUpsertDetailsSet()) {
            // Only report the very last item's upserted id if applicable
            if (response.getUpsertDetails().back()->getIndex() + 1 ==
                static_cast<int>(request.sizeWriteOps())) {
                upsertedId = response.getUpsertDetails().back()->getUpsertedID();
            }
        }

        int numUpserted = 0;
        if (response.isUpsertDetailsSet())
            numUpserted = response.sizeUpsertDetails();

        int numMatched = response.getN() - numUpserted;
        dassert(numMatched >= 0);

        // Wrap upserted id in "upserted" field
        BSONObj leUpsertedId;
        if (!upsertedId.isEmpty())
            leUpsertedId = upsertedId.firstElement().wrap(kUpsertedFieldName);

        error->recordUpdate(numMatched > 0, response.getN(), leUpsertedId);
    } else if (request.getBatchType() == BatchedCommandRequest::BatchType_Delete) {
        error->recordDelete(response.getN());
    }

    return false;
}
开发者ID:stevelyall,项目名称:mongol-db,代码行数:66,代码来源:batch_upconvert.cpp

示例7: executeBatch


//.........这里部分代码省略.........

        if ( staleBatch ) {

            const BatchedRequestMetadata* requestMetadata = request.getMetadata();
            dassert( requestMetadata );

            // Make sure our shard name is set or is the same as what was set previously
            if ( shardingState.setShardName( requestMetadata->getShardName() ) ) {

                //
                // First, we refresh metadata if we need to based on the requested version.
                //

                ChunkVersion latestShardVersion;
                shardingState.refreshMetadataIfNeeded( request.getTargetingNS(),
                                                       requestMetadata->getShardVersion(),
                                                       &latestShardVersion );

                // Report if we're still changing our metadata
                // TODO: Better reporting per-collection
                if ( shardingState.inCriticalMigrateSection() ) {
                    noteInCriticalSection( writeErrors.back() );
                }

                if ( queueForMigrationCommit ) {

                    //
                    // Queue up for migration to end - this allows us to be sure that clients will
                    // not repeatedly try to refresh metadata that is not yet written to the config
                    // server.  Not necessary for correctness.
                    // Exposed as optional parameter to allow testing of queuing behavior with
                    // different network timings.
                    //

                    const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();

                    //
                    // Only wait if we're an older version (in the current collection epoch) and
                    // we're not write compatible, implying that the current migration is affecting
                    // writes.
                    //

                    if ( requestShardVersion.isOlderThan( latestShardVersion ) &&
                         !requestShardVersion.isWriteCompatibleWith( latestShardVersion ) ) {

                        while ( shardingState.inCriticalMigrateSection() ) {

                            log() << "write request to old shard version "
                                  << requestMetadata->getShardVersion().toString()
                                  << " waiting for migration commit" << endl;

                            shardingState.waitTillNotInCriticalSection( 10 /* secs */);
                        }
                    }
                }
            }
            else {
                // If our shard name is stale, our version must have been stale as well
                dassert( writeErrors.size() == request.sizeWriteOps() );
            }
        }

        //
        // Construct response
        //

        response->setOk( true );

        if ( !silentWC ) {

            if ( upserted.size() ) {
                response->setUpsertDetails( upserted );
                upserted.clear();
            }

            if ( writeErrors.size() ) {
                response->setErrDetails( writeErrors );
                writeErrors.clear();
            }

            if ( wcError.get() ) {
                response->setWriteConcernError( wcError.release() );
            }

            if ( anyReplEnabled() ) {
                response->setLastOp( _client->getLastOp() );
                if (theReplSet) {
                    response->setElectionId( theReplSet->getElectionId() );
                }
            }

            // Set the stats for the response
            response->setN( _stats->numInserted + _stats->numUpserted + _stats->numMatched
                            + _stats->numDeleted );
            if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update )
                response->setNModified( _stats->numModified );
        }

        dassert( response->isValid( NULL ) );
    }
开发者ID:EddieWu,项目名称:mongo,代码行数:101,代码来源:batch_executor.cpp

示例8: executeBatch

    /**
     * The core config write functionality.
     *
     * Config writes run in two passes - the first is a quick check to ensure the config servers
     * are all reachable, the second runs the actual write.
     *
     * TODO: Upgrade and move this logic to the config servers, a state machine implementation
     * is probably the next step.
     */
    void ConfigCoordinator::executeBatch( const BatchedCommandRequest& clientRequest,
                                          BatchedCommandResponse* clientResponse,
                                          bool fsyncCheck ) {

        NamespaceString nss( clientRequest.getNS() );
        dassert( nss.db() == "config" || nss.db() == "admin" );
        dassert( clientRequest.sizeWriteOps() == 1u );

        if ( fsyncCheck ) {

            //
            // Sanity check that all configs are still reachable using fsync, preserving legacy
            // behavior
            //

            OwnedPointerVector<ConfigFsyncResponse> fsyncResponsesOwned;
            vector<ConfigFsyncResponse*>& fsyncResponses = fsyncResponsesOwned.mutableVector();

            //
            // Send side
            //

            for ( vector<ConnectionString>::iterator it = _configHosts.begin();
                it != _configHosts.end(); ++it ) {
                ConnectionString& configHost = *it;
                FsyncRequest fsyncRequest;
                _dispatcher->addCommand( configHost, "admin", fsyncRequest );
            }

            _dispatcher->sendAll();

            //
            // Recv side
            //

            bool fsyncError = false;
            while ( _dispatcher->numPending() > 0 ) {

                fsyncResponses.push_back( new ConfigFsyncResponse() );
                ConfigFsyncResponse& fsyncResponse = *fsyncResponses.back();
                Status dispatchStatus = _dispatcher->recvAny( &fsyncResponse.configHost,
                                                              &fsyncResponse.response );

                // We've got to recv everything, no matter what
                if ( !dispatchStatus.isOK() ) {
                    fsyncError = true;
                    buildFsyncErrorFrom( dispatchStatus, &fsyncResponse.response );
                }
                else if ( !fsyncResponse.response.getOk() ) {
                    fsyncError = true;
                }
            }

            if ( fsyncError ) {
                combineFsyncErrors( fsyncResponses, clientResponse );
                return;
            }
            else {
                fsyncResponsesOwned.clear();
            }
        }

        //
        // Do the actual writes
        //

        BatchedCommandRequest configRequest( clientRequest.getBatchType() );
        clientRequest.cloneTo( &configRequest );
        configRequest.setNS( nss.coll() );

        OwnedPointerVector<ConfigResponse> responsesOwned;
        vector<ConfigResponse*>& responses = responsesOwned.mutableVector();

        //
        // Send the actual config writes
        //

        // Get as many batches as we can at once
        for ( vector<ConnectionString>::iterator it = _configHosts.begin();
            it != _configHosts.end(); ++it ) {
            ConnectionString& configHost = *it;
            _dispatcher->addCommand( configHost, nss.db(), configRequest );
        }

        // Send them all out
        _dispatcher->sendAll();

        //
        // Recv side
        //

//.........这里部分代码省略.........
开发者ID:DanilSerd,项目名称:mongo,代码行数:101,代码来源:config_coordinator.cpp

示例9: safeWriteBatch

    void BatchSafeWriter::safeWriteBatch( DBClientBase* conn,
                                          const BatchedCommandRequest& request,
                                          BatchedCommandResponse* response ) {

        const NamespaceString nss( request.getNS() );

        // N starts at zero, and we add to it for each item
        response->setN( 0 );

        for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) {

            // Break on first error if we're ordered
            if ( request.getOrdered() && response->isErrDetailsSet() )
                break;

            BatchItemRef itemRef( &request, static_cast<int>( i ) );
            bool isLastItem = ( i == request.sizeWriteOps() - 1 );

            BSONObj writeConcern;
            if ( isLastItem && request.isWriteConcernSet() ) {
                writeConcern = request.getWriteConcern();
                // Pre-2.4.2 mongods react badly to 'w' being set on config servers
                if ( nss.db() == "config" )
                    writeConcern = fixWCForConfig( writeConcern );
            }

            BSONObj gleResult;
            GLEErrors errors;
            Status status = _safeWriter->safeWrite( conn, itemRef, writeConcern, &gleResult );
            if ( status.isOK() ) {
                status = extractGLEErrors( gleResult, &errors );
            }

            if ( !status.isOK() ) {
                response->clear();
                response->setOk( false );
                response->setErrCode( status.code() );
                response->setErrMessage( status.reason() );
                return;
            }

            //
            // STATS HANDLING
            //

            GLEStats stats;
            extractGLEStats( gleResult, &stats );

            // Special case for making legacy "n" field result for insert match the write
            // command result.
            if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert
                 && !errors.writeError.get() ) {
                // n is always 0 for legacy inserts.
                dassert( stats.n == 0 );
                stats.n = 1;
            }

            response->setN( response->getN() + stats.n );

            if ( !stats.upsertedId.isEmpty() ) {
                BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
                upsertedId->setIndex( i );
                upsertedId->setUpsertedID( stats.upsertedId );
                response->addToUpsertDetails( upsertedId );
            }

            response->setLastOp( stats.lastOp );

            //
            // WRITE ERROR HANDLING
            //

            // If any error occurs (except stale config) the previous GLE was not enforced
            bool enforcedWC = !errors.writeError.get()
                              || errors.writeError->getErrCode() == ErrorCodes::StaleShardVersion;

            // Save write error
            if ( errors.writeError.get() ) {
                errors.writeError->setIndex( i );
                response->addToErrDetails( errors.writeError.release() );
            }

            //
            // WRITE CONCERN ERROR HANDLING
            //

            // The last write is weird, since we enforce write concern and check the error through
            // the same GLE if possible.  If the last GLE was an error, the write concern may not
            // have been enforced in that same GLE, so we need to send another after resetting the
            // error.
            if ( isLastItem ) {

                // Try to enforce the write concern if everything succeeded (unordered or ordered)
                // OR if something succeeded and we're unordered.
                bool needToEnforceWC =
                    !response->isErrDetailsSet()
                    || ( !request.getOrdered()
                         && response->sizeErrDetails() < request.sizeWriteOps() );

                if ( !enforcedWC && needToEnforceWC ) {
//.........这里部分代码省略.........
开发者ID:AlanLiu-AI,项目名称:mongo,代码行数:101,代码来源:batch_downconvert.cpp

示例10: batchErrorToLastError

    void batchErrorToLastError( const BatchedCommandRequest& request,
                                const BatchedCommandResponse& response,
                                LastError* error ) {

        scoped_ptr<BatchedErrorDetail> topLevelError;
        BatchedErrorDetail* lastBatchError = NULL;

        if ( !response.getOk() ) {

            int code = response.getErrCode();

            // Check for batch error
            // We don't care about write concern errors, these happen in legacy mode in GLE
            if ( code != ErrorCodes::WriteConcernFailed && !response.isErrDetailsSet() ) {
                // Top-level error, all writes failed
                topLevelError.reset( new BatchedErrorDetail );
                buildErrorFromResponse( response, topLevelError.get() );
                lastBatchError = topLevelError.get();
            }
            else if ( response.isErrDetailsSet() ) {
                // The last error in the batch is always reported - this matches expected COE
                // semantics for insert batches and works for single writes
                lastBatchError = response.getErrDetails().back();
            }
        }

        // Record an error if one exists
        if ( lastBatchError ) {
            error->raiseError( lastBatchError->getErrCode(),
                               lastBatchError->getErrMessage().c_str() );
            return;
        }

        // Record write stats otherwise
        // NOTE: For multi-write batches, our semantics change a little because we don't have
        // un-aggregated "n" stats.
        if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) {

            BSONObj upsertedId;
            if ( response.isSingleUpsertedSet() ) upsertedId = response.getSingleUpserted();
            else if( response.isUpsertDetailsSet() ) {
                // Only report the very last item's upserted id if applicable
                if ( response.getUpsertDetails().back()->getIndex() + 1
                     == static_cast<int>( request.sizeWriteOps() ) ) {
                    upsertedId = response.getUpsertDetails().back()->getUpsertedID();
                }
            }

            int numUpserted = 0;
            if ( response.isSingleUpsertedSet() )
                ++numUpserted;
            else if ( response.isUpsertDetailsSet() )
                numUpserted += response.sizeUpsertDetails();

            int numUpdated = response.getN() - numUpserted;
            dassert( numUpdated >= 0 );
            error->recordUpdate( numUpdated > 0, response.getN(), upsertedId );
        }
        else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Delete ) {
            error->recordDelete( response.getN() );
        }
    }
开发者ID:Convey-Compliance,项目名称:mongo,代码行数:62,代码来源:batch_upconvert.cpp

示例11: executeBatch

    /**
     * The core config write functionality.
     *
     * Config writes run in two passes - the first is a quick check to ensure the config servers
     * are all reachable, the second runs the actual write.
     *
     * TODO: Upgrade and move this logic to the config servers, a state machine implementation
     * is probably the next step.
     */
    void ConfigCoordinator::executeBatch(const BatchedCommandRequest& clientRequest,
                                         BatchedCommandResponse* clientResponse) {

        const NamespaceString nss(clientRequest.getNS());

        // Should never use it for anything other than DBs residing on the config server
        dassert(nss.db() == "config" || nss.db() == "admin");
        dassert(clientRequest.sizeWriteOps() == 1u);

        // This is an opportunistic check that all config servers look healthy by calling
        // getLastError on each one of them. If there was some form of write/journaling error, get
        // last error would fail.
        {
            for (vector<ConnectionString>::iterator it = _configHosts.begin();
                 it != _configHosts.end();
                 ++it) {

                _dispatcher->addCommand(*it,
                                        "admin",
                                        RawBSONSerializable(BSON("getLastError" << true <<
                                                                 "fsync" << true)));
            }

            _dispatcher->sendAll();

            bool error = false;
            while (_dispatcher->numPending()) {
                ConnectionString host;
                RawBSONSerializable response;

                Status status = _dispatcher->recvAny(&host, &response);
                if (status.isOK()) {
                    BSONObj obj = response.toBSON();

                    LOG(3) << "Response " << obj.toString();

                    // If the ok field is anything other than 1, count it as error
                    if (!obj["ok"].trueValue()) {
                        error = true;
                        log() << "Config server check for host " << host
                              << " returned error: " << response;
                    }
                }
                else {
                    error = true;
                    log() << "Config server check for host " << host
                          << " failed with status: " << status;
                }
            }

            // All responses should have been gathered by this point
            if (error) {
                clientResponse->setOk(false);
                clientResponse->setErrCode(ErrorCodes::RemoteValidationError);
                clientResponse->setErrMessage("Could not verify that config servers were active"
                                              " and reachable before write");
                return;
            }
        }

        if (!_checkConfigString(clientResponse)) {
            return;
        }

        //
        // Do the actual writes
        //

        BatchedCommandRequest configRequest( clientRequest.getBatchType() );
        clientRequest.cloneTo( &configRequest );
        configRequest.setNS( nss.coll() );

        OwnedPointerVector<ConfigResponse> responsesOwned;
        vector<ConfigResponse*>& responses = responsesOwned.mutableVector();

        //
        // Send the actual config writes
        //

        // Get as many batches as we can at once
        for (vector<ConnectionString>::const_iterator it = _configHosts.begin();
             it != _configHosts.end();
             ++it) {

            const ConnectionString& configHost = *it;
            _dispatcher->addCommand(configHost, nss.db(), configRequest);
        }

        // Send them all out
        _dispatcher->sendAll();

//.........这里部分代码省略.........
开发者ID:7segments,项目名称:mongo-1,代码行数:101,代码来源:config_coordinator.cpp

示例12: applyWriteItem

bool WriteBatchExecutor::applyWriteItem( const BatchedCommandRequest& request,
        int index,
        WriteStats* stats,
        BatchedErrorDetail* error ) {
    const string& ns = request.getNS();

    // Clear operation's LastError before starting.
    _le->reset( true );

    //uint64_t itemTimeMicros = 0;
    bool opSuccess = true;

    // Each write operation executes in its own PageFaultRetryableSection.  This means that
    // a single batch can throw multiple PageFaultException's, which is not the case for
    // other operations.
    PageFaultRetryableSection s;
    while ( true ) {
        try {
            // Execute the write item as a child operation of the current operation.
            CurOp childOp( _client, _client->curop() );

            // TODO Modify CurOp "wrapped" constructor to take an opcode, so calling .reset()
            // is unneeded
            childOp.reset( _client->getRemote(), getOpCode( request.getBatchType() ) );

            childOp.ensureStarted();
            OpDebug& opDebug = childOp.debug();
            opDebug.ns = ns;
            {
                Client::WriteContext ctx( ns );

                switch ( request.getBatchType() ) {
                case BatchedCommandRequest::BatchType_Insert:
                    opSuccess =
                        applyInsert( ns,
                                     request.getInsertRequest()->getDocumentsAt( index ),
                                     &childOp,
                                     stats,
                                     error );
                    break;
                case BatchedCommandRequest::BatchType_Update:
                    opSuccess = applyUpdate( ns,
                                             *request.getUpdateRequest()->getUpdatesAt( index ),
                                             &childOp,
                                             stats,
                                             error );
                    break;
                default:
                    dassert( request.getBatchType() ==
                             BatchedCommandRequest::BatchType_Delete );
                    opSuccess = applyDelete( ns,
                                             *request.getDeleteRequest()->getDeletesAt( index ),
                                             &childOp,
                                             stats,
                                             error );
                    break;
                }
            }
            childOp.done();
            //itemTimeMicros = childOp.totalTimeMicros();

            opDebug.executionTime = childOp.totalTimeMillis();
            opDebug.recordStats();

            // Log operation if running with at least "-v", or if exceeds slow threshold.
            if ( logger::globalLogDomain()->shouldLog( logger::LogSeverity::Debug( 1 ) )
                    || opDebug.executionTime > cmdLine.slowMS + childOp.getExpectedLatencyMs() ) {

                MONGO_TLOG(1) << opDebug.report( childOp ) << endl;
            }

            // TODO Log operation if logLevel >= 3 and assertion thrown (as assembleResponse()
            // does).

            // Save operation to system.profile if shouldDBProfile().
            if ( childOp.shouldDBProfile( opDebug.executionTime ) ) {
                profile( *_client, getOpCode( request.getBatchType() ), childOp );
            }
            break;
        }
        catch ( PageFaultException& e ) {
            e.touch();
        }
    }

    return opSuccess;
}
开发者ID:Jiangew,项目名称:mongo,代码行数:87,代码来源:batch_executor.cpp

示例13: safeWriteBatch

    void BatchSafeWriter::safeWriteBatch( DBClientBase* conn,
                                          const BatchedCommandRequest& request,
                                          BatchedCommandResponse* response ) {

        const NamespaceString nss( request.getNS() );

        // N starts at zero, and we add to it for each item
        response->setN( 0 );

        // GLE path always sets nModified to -1 (sentinel) to indicate we should omit it later.
        response->setNModified(-1);

        for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) {

            // Break on first error if we're ordered
            if ( request.getOrdered() && response->isErrDetailsSet() )
                break;

            BatchItemRef itemRef( &request, static_cast<int>( i ) );

            BSONObj gleResult;
            GLEErrors errors;
            Status status = _safeWriter->safeWrite( conn,
                                                    itemRef,
                                                    WriteConcernOptions::Acknowledged,
                                                    &gleResult );

            if ( status.isOK() ) {
                status = extractGLEErrors( gleResult, &errors );
            }

            if ( !status.isOK() ) {
                response->clear();
                response->setOk( false );
                response->setErrCode( ErrorCodes::RemoteResultsUnavailable );
                
                StringBuilder builder;
                builder << "could not get write error from safe write";
                builder << causedBy( status.toString() );
                response->setErrMessage( builder.str() );
                return;
            }

            if ( errors.wcError.get() ) {
                response->setWriteConcernError( errors.wcError.release() );
            }

            //
            // STATS HANDLING
            //

            GLEStats stats;
            extractGLEStats( gleResult, &stats );

            // Special case for making legacy "n" field result for insert match the write
            // command result.
            if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert
                 && !errors.writeError.get() ) {
                // n is always 0 for legacy inserts.
                dassert( stats.n == 0 );
                stats.n = 1;
            }

            response->setN( response->getN() + stats.n );

            if ( !stats.upsertedId.isEmpty() ) {
                BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
                upsertedId->setIndex( i );
                upsertedId->setUpsertedID( stats.upsertedId );
                response->addToUpsertDetails( upsertedId );
            }

            response->setLastOp( stats.lastOp );

            // Save write error
            if ( errors.writeError.get() ) {
                errors.writeError->setIndex( i );
                response->addToErrDetails( errors.writeError.release() );
            }
        }

        //
        // WRITE CONCERN ERROR HANDLING
        //

        // The last write is weird, since we enforce write concern and check the error through
        // the same GLE if possible.  If the last GLE was an error, the write concern may not
        // have been enforced in that same GLE, so we need to send another after resetting the
        // error.

        BSONObj writeConcern;
        if ( request.isWriteConcernSet() ) {
            writeConcern = request.getWriteConcern();
            // Pre-2.4.2 mongods react badly to 'w' being set on config servers
            if ( nss.db() == "config" )
                writeConcern = fixWCForConfig( writeConcern );
        }

        bool needToEnforceWC = WriteConcernOptions::Acknowledged.woCompare(writeConcern) != 0 &&
                WriteConcernOptions::Unacknowledged.woCompare(writeConcern) != 0;
//.........这里部分代码省略.........
开发者ID:AmitChotaliya,项目名称:mongo,代码行数:101,代码来源:batch_downconvert.cpp


注:本文中的BatchedCommandRequest::getBatchType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。