当前位置: 首页>>代码示例>>C++>>正文


C++ OwnedPointerVector::mutableVector方法代码示例

本文整理汇总了C++中OwnedPointerVector::mutableVector方法的典型用法代码示例。如果您正苦于以下问题:C++ OwnedPointerVector::mutableVector方法的具体用法?C++ OwnedPointerVector::mutableVector怎么用?C++ OwnedPointerVector::mutableVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在OwnedPointerVector的用法示例。


在下文中一共展示了OwnedPointerVector::mutableVector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: cloned

const S2Polygon& BigSimplePolygon::GetPolygonBorder() const {
    if (_borderPoly)
        return *_borderPoly;

    unique_ptr<S2Loop> cloned(_loop->Clone());

    // Any loop in polygon should be than a hemisphere (2*Pi).
    cloned->Normalize();

    OwnedPointerVector<S2Loop> loops;
    loops.mutableVector().push_back(cloned.release());
    _borderPoly.reset(new S2Polygon(&loops.mutableVector()));
    return *_borderPoly;
}
开发者ID:AlexOreshkevich,项目名称:mongo,代码行数:14,代码来源:big_polygon.cpp

示例2: Contains

bool BigSimplePolygon::Contains(const S2Polyline& line) const {
    //
    // A line is contained within a loop if the result of subtracting the loop from the line is
    // nothing.
    //
    // Also, a line is contained within a loop if the result of clipping the line to the
    // complement of the loop is nothing.
    //
    // If we can't subtract the loop itself using S2, we clip (intersect) to the inverse.  Every
    // point in S2 is contained in exactly one of these loops.
    //
    // TODO: Polygon borders are actually kind of weird, and this is somewhat inconsistent with
    // Intersects().  A point might Intersect() a boundary exactly, but not be Contain()ed
    // within the Polygon.  Think the right thing to do here is custom intersection functions.
    //
    const S2Polygon& polyBorder = GetPolygonBorder();

    OwnedPointerVector<S2Polyline> clippedOwned;
    vector<S2Polyline*>& clipped = clippedOwned.mutableVector();

    if (_isNormalized) {
        // Polygon border is the same as the loop
        polyBorder.SubtractFromPolyline(&line, &clipped);
        return clipped.size() == 0;
    } else {
        // Polygon border is the complement of the loop
        polyBorder.IntersectWithPolyline(&line, &clipped);
        return clipped.size() == 0;
    }
}
开发者ID:AlexOreshkevich,项目名称:mongo,代码行数:30,代码来源:big_polygon.cpp

示例3: clone

    PlanCacheEntry* PlanCacheEntry::clone() const {
        OwnedPointerVector<QuerySolution> solutions;
        for (size_t i = 0; i < plannerData.size(); ++i) {
            QuerySolution* qs = new QuerySolution();
            qs->cacheData.reset(plannerData[i]->clone());
            solutions.mutableVector().push_back(qs);
        }
        PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());

        entry->backupSoln = backupSoln;

        // Copy query shape.
        entry->query = query.getOwned();
        entry->sort = sort.getOwned();
        entry->projection = projection.getOwned();

        // Copy performance stats.
        for (size_t i = 0; i < feedback.size(); ++i) {
            PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
            fb->stats.reset(feedback[i]->stats->clone());
            fb->score = feedback[i]->score;
            entry->feedback.push_back(fb);
        }
        entry->averageScore = averageScore;
        entry->stddevScore = stddevScore;
        return entry;
    }
开发者ID:pharrell84,项目名称:rtree-mongo-mf,代码行数:27,代码来源:plan_cache.cpp

示例4: getIndexBoundsForQuery

IndexBounds ChunkManager::getIndexBoundsForQuery(const BSONObj& key,
                                                 const CanonicalQuery& canonicalQuery) {
    // $text is not allowed in planning since we don't have text index on mongos.
    //
    // TODO: Treat $text query as a no-op in planning. So with shard key {a: 1},
    //       the query { a: 2, $text: { ... } } will only target to {a: 2}.
    if (QueryPlannerCommon::hasNode(canonicalQuery.root(), MatchExpression::TEXT)) {
        IndexBounds bounds;
        IndexBoundsBuilder::allValuesBounds(key, &bounds);  // [minKey, maxKey]
        return bounds;
    }

    // Consider shard key as an index
    string accessMethod = IndexNames::findPluginName(key);
    dassert(accessMethod == IndexNames::BTREE || accessMethod == IndexNames::HASHED);

    // Use query framework to generate index bounds
    QueryPlannerParams plannerParams;
    // Must use "shard key" index
    plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
    IndexEntry indexEntry(key,
                          accessMethod,
                          false /* multiKey */,
                          false /* sparse */,
                          false /* unique */,
                          "shardkey",
                          NULL /* filterExpr */,
                          BSONObj());
    plannerParams.indices.push_back(indexEntry);

    OwnedPointerVector<QuerySolution> solutions;
    Status status = QueryPlanner::plan(canonicalQuery, plannerParams, &solutions.mutableVector());
    uassert(status.code(), status.reason(), status.isOK());

    IndexBounds bounds;

    for (vector<QuerySolution*>::const_iterator it = solutions.begin();
         bounds.size() == 0 && it != solutions.end();
         it++) {
        // Try next solution if we failed to generate index bounds, i.e. bounds.size() == 0
        bounds = collapseQuerySolution((*it)->root.get());
    }

    if (bounds.size() == 0) {
        // We cannot plan the query without collection scan, so target to all shards.
        IndexBoundsBuilder::allValuesBounds(key, &bounds);  // [minKey, maxKey]
    }
    return bounds;
}
开发者ID:Andiry,项目名称:mongo,代码行数:49,代码来源:chunk_manager.cpp

示例5: writeOp

void Strategy::writeOp(OperationContext* txn, int op, Request& request) {
    // make sure we have a last error
    dassert(&LastError::get(cc()));

    OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
    vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();

    msgToBatchRequests(request.m(), &commandRequests);

    for (vector<BatchedCommandRequest*>::iterator it = commandRequests.begin();
         it != commandRequests.end();
         ++it) {
        // Multiple commands registered to last error as multiple requests
        if (it != commandRequests.begin())
            LastError::get(cc()).startRequest();

        BatchedCommandRequest* commandRequest = *it;

        // Adjust namespaces for command
        NamespaceString fullNS(commandRequest->getNS());
        string cmdNS = fullNS.getCommandNS();
        // We only pass in collection name to command
        commandRequest->setNS(fullNS);

        BSONObjBuilder builder;
        BSONObj requestBSON = commandRequest->toBSON();

        {
            // Disable the last error object for the duration of the write cmd
            LastError::Disabled disableLastError(&LastError::get(cc()));
            Command::runAgainstRegistered(txn, cmdNS.c_str(), requestBSON, builder, 0);
        }

        BatchedCommandResponse commandResponse;
        bool parsed = commandResponse.parseBSON(builder.done(), NULL);
        (void)parsed;  // for compile
        dassert(parsed && commandResponse.isValid(NULL));

        // Populate the lastError object based on the write response
        LastError::get(cc()).reset();
        bool hadError =
            batchErrorToLastError(*commandRequest, commandResponse, &LastError::get(cc()));

        // Check if this is an ordered batch and we had an error which should stop processing
        if (commandRequest->getOrdered() && hadError)
            break;
    }
}
开发者ID:CeperaCPP,项目名称:mongo,代码行数:48,代码来源:strategy.cpp

示例6: generateSection

    BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
        RangeDeleter* deleter = getDeleter();
        if (!deleter) {
            return BSONObj();
        }

        BSONObjBuilder result;

        OwnedPointerVector<DeleteJobStats> statsList;
        deleter->getStatsHistory(&statsList.mutableVector());
        BSONArrayBuilder oldStatsBuilder;
        for (OwnedPointerVector<DeleteJobStats>::const_iterator it = statsList.begin();
             it != statsList.end();
             ++it) {
            BSONObjBuilder entryBuilder;
            entryBuilder.append("deletedDocs", (*it)->deletedDocCount);

            if ((*it)->queueEndTS > Date_t()) {
                entryBuilder.append("queueStart", (*it)->queueStartTS);
                entryBuilder.append("queueEnd", (*it)->queueEndTS);
            }

            if ((*it)->deleteEndTS > Date_t()) {
                entryBuilder.append("deleteStart", (*it)->deleteStartTS);
                entryBuilder.append("deleteEnd", (*it)->deleteEndTS);

                if ((*it)->waitForReplEndTS > Date_t()) {
                    entryBuilder.append("waitForReplStart", (*it)->waitForReplStartTS);
                    entryBuilder.append("waitForReplEnd", (*it)->waitForReplEndTS);
                }
            }

            oldStatsBuilder.append(entryBuilder.obj());
        }
        result.append("lastDeleteStats", oldStatsBuilder.arr());

        return result.obj();
    }
开发者ID:stevelyall,项目名称:mongol-db,代码行数:38,代码来源:range_deleter_server_status.cpp

示例7:

/*static*/ int MongoFile::_flushAll(bool sync) {
    if (!sync) {
        int num = 0;
        LockMongoFilesShared lk;
        for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
            num++;
            MongoFile* mmf = *i;
            if (!mmf)
                continue;

            mmf->flush(sync);
        }
        return num;
    }

    // want to do it sync

    // get a thread-safe Flushable object for each file first in a single lock
    // so that we can iterate and flush without doing any locking here
    OwnedPointerVector<Flushable> thingsToFlushWrapper;
    vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
    {
        LockMongoFilesShared lk;
        for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
            MongoFile* mmf = *i;
            if (!mmf)
                continue;
            thingsToFlush.push_back(mmf->prepareFlush());
        }
    }

    for (size_t i = 0; i < thingsToFlush.size(); i++) {
        thingsToFlush[i]->flush();
    }

    return thingsToFlush.size();
}
开发者ID:Andiry,项目名称:mongo,代码行数:37,代码来源:mmap.cpp

示例8: list

// static
Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
    invariant(bob);

    // Format of BSON result:
    //
    // {
    //     hints: [
    //         {
    //             query: <query>,
    //             sort: <sort>,
    //             projection: <projection>,
    //             indexes: [<index1>, <index2>, <index3>, ...]
    //         }
    //  }
    BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
    OwnedPointerVector<AllowedIndexEntry> entries;
    entries.mutableVector() = querySettings.getAllAllowedIndices();
    for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
            i != entries.end(); ++i) {
        AllowedIndexEntry* entry = *i;
        invariant(entry);

        BSONObjBuilder hintBob(hintsBuilder.subobjStart());
        hintBob.append("query", entry->query);
        hintBob.append("sort", entry->sort);
        hintBob.append("projection", entry->projection);
        BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
        for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
                j != entry->indexKeyPatterns.end(); ++j) {
            const BSONObj& index = *j;
            indexesBuilder.append(index);
        }
        indexesBuilder.doneFast();
    }
    hintsBuilder.doneFast();
    return Status::OK();
}
开发者ID:jewkesy,项目名称:mongo,代码行数:38,代码来源:index_filter_commands.cpp

示例9: executeBatch

void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
                                  BatchedCommandResponse* clientResponse) {
    LOG(4) << "starting execution of write batch of size "
           << static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS()
           << endl;

    BatchWriteOp batchOp;
    batchOp.initClientRequest(&clientRequest);

    // Current batch status
    bool refreshedTargeter = false;
    int rounds = 0;
    int numCompletedOps = 0;
    int numRoundsWithoutProgress = 0;

    while (!batchOp.isFinished()) {
        //
        // Get child batches to send using the targeter
        //
        // Targeting errors can be caused by remote metadata changing (the collection could have
        // been dropped and recreated, for example with a new shard key).  If a remote metadata
        // change occurs *before* a client sends us a batch, we need to make sure that we don't
        // error out just because we're staler than the client - otherwise mongos will be have
        // unpredictable behavior.
        //
        // (If a metadata change happens *during* or *after* a client sends us a batch, however,
        // we make no guarantees about delivery.)
        //
        // For this reason, we don't record targeting errors until we've refreshed our targeting
        // metadata at least once *after* receiving the client batch - at that point, we know:
        //
        // 1) our new metadata is the same as the metadata when the client sent a batch, and so
        //    targeting errors are real.
        // OR
        // 2) our new metadata is a newer version than when the client sent a batch, and so
        //    the metadata must have changed after the client batch was sent.  We don't need to
        //    deliver in this case, since for all the client knows we may have gotten the batch
        //    exactly when the metadata changed.
        //

        OwnedPointerVector<TargetedWriteBatch> childBatchesOwned;
        vector<TargetedWriteBatch*>& childBatches = childBatchesOwned.mutableVector();

        // If we've already had a targeting error, we've refreshed the metadata once and can
        // record target errors definitively.
        bool recordTargetErrors = refreshedTargeter;
        Status targetStatus = batchOp.targetBatch(*_targeter, recordTargetErrors, &childBatches);
        if (!targetStatus.isOK()) {
            // Don't do anything until a targeter refresh
            _targeter->noteCouldNotTarget();
            refreshedTargeter = true;
            ++_stats->numTargetErrors;
            dassert(childBatches.size() == 0u);
        }

        //
        // Send all child batches
        //

        size_t numSent = 0;
        size_t numToSend = childBatches.size();
        bool remoteMetadataChanging = false;
        while (numSent != numToSend) {
            // Collect batches out on the network, mapped by endpoint
            OwnedHostBatchMap ownedPendingBatches;
            OwnedHostBatchMap::MapType& pendingBatches = ownedPendingBatches.mutableMap();

            //
            // Send side
            //

            // Get as many batches as we can at once
            for (vector<TargetedWriteBatch*>::iterator it = childBatches.begin();
                 it != childBatches.end();
                 ++it) {
                //
                // Collect the info needed to dispatch our targeted batch
                //

                TargetedWriteBatch* nextBatch = *it;
                // If the batch is NULL, we sent it previously, so skip
                if (nextBatch == NULL)
                    continue;

                // Figure out what host we need to dispatch our targeted batch
                ConnectionString shardHost;
                Status resolveStatus =
                    _resolver->chooseWriteHost(nextBatch->getEndpoint().shardName, &shardHost);
                if (!resolveStatus.isOK()) {
                    ++_stats->numResolveErrors;

                    // Record a resolve failure
                    // TODO: It may be necessary to refresh the cache if stale, or maybe just
                    // cancel and retarget the batch
                    WriteErrorDetail error;
                    buildErrorFrom(resolveStatus, &error);

                    LOG(4) << "unable to send write batch to " << shardHost.toString()
                           << causedBy(resolveStatus.toString()) << endl;

//.........这里部分代码省略.........
开发者ID:DavidAlphaFox,项目名称:mongodb,代码行数:101,代码来源:batch_write_exec.cpp

示例10: targetWrites

    Status WriteOp::targetWrites( const NSTargeter& targeter,
                                  std::vector<TargetedWrite*>* targetedWrites ) {

        bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
        bool isDelete = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Delete;

        // In case of error, don't leak.
        OwnedPointerVector<ShardEndpoint> endpointsOwned;
        vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();

        if ( isUpdate || isDelete ) {

            // Updates/deletes targeted by query

            BSONObj queryDoc =
                isUpdate ? _itemRef.getUpdate()->getQuery() : _itemRef.getDelete()->getQuery();

            Status targetStatus = targeter.targetQuery( queryDoc, &endpoints );

            if ( targetStatus.isOK() ) {
                targetStatus =
                    isUpdate ?
                        updateTargetsOk( *this, endpoints ) : deleteTargetsOk( *this, endpoints );
            }

            if ( !targetStatus.isOK() ) return targetStatus;
        }
        else {
            dassert( _itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert );

            // Inserts targeted by doc itself

            ShardEndpoint* endpoint = NULL;
            Status targetStatus = targeter.targetDoc( _itemRef.getDocument(), &endpoint );

            if ( !targetStatus.isOK() ) {
                dassert( NULL == endpoint );
                return targetStatus;
            }

            dassert( NULL != endpoint );
            endpoints.push_back( endpoint );
        }

        for ( vector<ShardEndpoint*>::iterator it = endpoints.begin(); it != endpoints.end();
            ++it ) {

            ShardEndpoint* endpoint = *it;

            _childOps.push_back( new ChildWriteOp( this ) );

            WriteOpRef ref( _itemRef.getItemIndex(), _childOps.size() - 1 );

            // For now, multiple endpoints imply no versioning
            if ( endpoints.size() == 1u ) {
                targetedWrites->push_back( new TargetedWrite( *endpoint, ref ) );
            }
            else {
                ShardEndpoint broadcastEndpoint( endpoint->shardName,
                                                 ChunkVersion::IGNORED(),
                                                 endpoint->shardHost );
                targetedWrites->push_back( new TargetedWrite( broadcastEndpoint, ref ) );
            }

            _childOps.back()->pendingWrite = targetedWrites->back();
            _childOps.back()->state = WriteOpState_Pending;
        }

        _state = WriteOpState_Pending;
        return Status::OK();
    }
开发者ID:ukd1,项目名称:mongo,代码行数:71,代码来源:write_op.cpp

示例11: mergeChunks


//.........这里部分代码省略.........

        dassert( metadata->getShardVersion().equals( shardVersion ) );

        if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {

            *errMsg = stream() << "could not merge chunks, the range "
                               << rangeToString( minKey, maxKey ) << " is not valid"
                               << " for collection " << nss.ns() << " with key pattern "
                               << metadata->getKeyPattern();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get merged chunk information
        //

        ChunkVersion mergeVersion = metadata->getCollVersion();
        mergeVersion.incMinor();

        OwnedPointerVector<ChunkType> chunksToMerge;

        ChunkType itChunk;
        itChunk.setMin( minKey );
        itChunk.setMax( minKey );
        itChunk.setNS( nss.ns() );
        itChunk.setShard( shardingState.getShardName() );

        while ( itChunk.getMax().woCompare( maxKey ) < 0 &&
                metadata->getNextChunk( itChunk.getMax(), &itChunk ) ) {
            auto_ptr<ChunkType> saved( new ChunkType );
            itChunk.cloneTo( saved.get() );
            chunksToMerge.mutableVector().push_back( saved.release() );
        }

        if ( chunksToMerge.empty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " and ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Validate the range starts and ends at chunks and has no holes, error if not valid
        //

        BSONObj firstDocMin = ( *chunksToMerge.begin() )->getMin();
        BSONObj firstDocMax = ( *chunksToMerge.begin() )->getMax();
        // minKey is inclusive
        bool minKeyInRange = rangeContains( firstDocMin, firstDocMax, minKey );

        if ( !minKeyInRange ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }
开发者ID:leonidbl91,项目名称:mongo,代码行数:66,代码来源:d_merge.cpp

示例12: updateRecord

    /** Note: if the object shrinks a lot, we don't free up space, we leave extra at end of the record.
     */
    const DiskLoc DataFileMgr::updateRecord(
        const char *ns,
        Collection* collection,
        Record *toupdate, const DiskLoc& dl,
        const char *_buf, int _len, OpDebug& debug,  bool god) {

        dassert( toupdate == dl.rec() );

        BSONObj objOld = BSONObj::make(toupdate);
        BSONObj objNew(_buf);
        DEV verify( objNew.objsize() == _len );
        DEV verify( objNew.objdata() == _buf );

        if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
            /* add back the old _id value if the update removes it.  Note this implementation is slow
               (copies entire object multiple times), but this shouldn't happen often, so going for simple
               code, not speed.
            */
            BSONObjBuilder b;
            BSONElement e;
            verify( objOld.getObjectID(e) );
            b.append(e); // put _id first, for best performance
            b.appendElements(objNew);
            objNew = b.obj();
        }

        NamespaceString nsstring(ns);
        if (nsstring.coll() == "system.users") {
            V2UserDocumentParser parser;
            uassertStatusOK(parser.checkValidUserDocument(objNew));
        }

        uassert( 13596 , str::stream() << "cannot change _id of a document old:" << objOld << " new:" << objNew,
                objNew["_id"] == objOld["_id"]);

        /* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
           below.  that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
        */
        OwnedPointerVector<UpdateTicket> updateTickets;
        updateTickets.mutableVector().resize(collection->details()->getTotalIndexCount());
        for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
            auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
            auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
            InsertDeleteOptions options;
            options.logIfError = false;
            options.dupsAllowed = !(KeyPattern::isIdKeyPattern(descriptor->keyPattern())
                                    || descriptor->unique())
                                  || ignoreUniqueIndex(descriptor->getOnDisk());
            updateTickets.mutableVector()[i] = new UpdateTicket();
            Status ret = iam->validateUpdate(objOld, objNew, dl, options,
                                             updateTickets.mutableVector()[i]);

            if (Status::OK() != ret) {
                uasserted(ASSERT_ID_DUPKEY, "Update validation failed: " + ret.toString());
            }
        }

        if ( toupdate->netLength() < objNew.objsize() ) {
            // doesn't fit.  reallocate -----------------------------------------------------
            moveCounter.increment();
            uassert( 10003,
                     "failing update: objects in a capped ns cannot grow",
                     !(collection && collection->details()->isCapped()));
            collection->details()->paddingTooSmall();
            deleteRecord(ns, toupdate, dl);
            DiskLoc res = insert(ns, objNew.objdata(), objNew.objsize(), false, god);

            if (debug.nmoved == -1) // default of -1 rather than 0
                debug.nmoved = 1;
            else
                debug.nmoved += 1;

            return res;
        }

        collection->infoCache()->notifyOfWriteOp();
        collection->details()->paddingFits();

        debug.keyUpdates = 0;

        for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
            auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
            auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
            int64_t updatedKeys;
            Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys);
            if (Status::OK() != ret) {
                // This shouldn't happen unless something disastrous occurred.
                massert(16799, "update failed: " + ret.toString(), false);
            }
            debug.keyUpdates += updatedKeys;
        }

        //  update in place
        int sz = objNew.objsize();
        memcpy(getDur().writingPtr(toupdate->data(), sz), objNew.objdata(), sz);
        return dl;
    }
开发者ID:ChrisKozak,项目名称:mongo,代码行数:99,代码来源:pdfile.cpp

示例13: executeBatch

    void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
                                           BatchedCommandResponse* response ) {

        // TODO: Lift write concern parsing out of this entirely.
        WriteConcernOptions writeConcern;
        Status status = Status::OK();

        BSONObj wcDoc;
        if ( request.isWriteConcernSet() ) {
            wcDoc = request.getWriteConcern();
        }

        if ( wcDoc.isEmpty() ) {
            status = writeConcern.parse( _defaultWriteConcern );
        }
        else {
            status = writeConcern.parse( wcDoc );
        }

        if ( status.isOK() ) {
            status = validateWriteConcern( writeConcern );
        }

        if ( !status.isOK() ) {
            response->setErrCode( status.code() );
            response->setErrMessage( status.reason() );
            response->setOk( false );
            dassert( response->isValid(NULL) );
            return;
        }

        bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0
                        && writeConcern.syncMode == WriteConcernOptions::NONE;

        Timer commandTimer;

        OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
        vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();

        OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
        vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();

        //
        // Apply each batch item, possibly bulking some items together in the write lock.
        // Stops on error if batch is ordered.
        //

        bulkExecute( request, &upserted, &writeErrors );

        //
        // Try to enforce the write concern if everything succeeded (unordered or ordered)
        // OR if something succeeded and we're unordered.
        //

        auto_ptr<WCErrorDetail> wcError;
        bool needToEnforceWC = writeErrors.empty()
                               || ( !request.getOrdered()
                                    && writeErrors.size() < request.sizeWriteOps() );

        if ( needToEnforceWC ) {

            _client->curop()->setMessage( "waiting for write concern" );

            WriteConcernResult res;
            status = waitForWriteConcern( writeConcern, _client->getLastOp(), &res );

            if ( !status.isOK() ) {
                wcError.reset( toWriteConcernError( status, res ) );
            }
        }

        //
        // Refresh metadata if needed
        //

        bool staleBatch = !writeErrors.empty()
                          && writeErrors.back()->getErrCode() == ErrorCodes::StaleShardVersion;

        if ( staleBatch ) {

            const BatchedRequestMetadata* requestMetadata = request.getMetadata();
            dassert( requestMetadata );

            // Make sure our shard name is set or is the same as what was set previously
            if ( shardingState.setShardName( requestMetadata->getShardName() ) ) {

                //
                // First, we refresh metadata if we need to based on the requested version.
                //

                ChunkVersion latestShardVersion;
                shardingState.refreshMetadataIfNeeded( request.getTargetingNS(),
                                                       requestMetadata->getShardVersion(),
                                                       &latestShardVersion );

                // Report if we're still changing our metadata
                // TODO: Better reporting per-collection
                if ( shardingState.inCriticalMigrateSection() ) {
                    noteInCriticalSection( writeErrors.back() );
                }
//.........这里部分代码省略.........
开发者ID:EddieWu,项目名称:mongo,代码行数:101,代码来源:batch_executor.cpp

示例14: computeGeoNearDistance

    static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
                                                     WorkingSetMember* member) {

        //
        // Generic GeoNear distance computation
        // Distances are computed by projecting the stored geometry into the query CRS, and
        // computing distance in that CRS.
        //

        // Must have an object in order to get geometry out of it.
        invariant(member->hasObj());

        CRS queryCRS = nearParams.nearQuery.centroid.crs;

        // Extract all the geometries out of this document for the near query
        OwnedPointerVector<StoredGeometry> geometriesOwned;
        vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
        extractGeometries(member->obj, nearParams.nearQuery.field, &geometries);

        // Compute the minimum distance of all the geometries in the document
        double minDistance = -1;
        BSONObj minDistanceObj;
        for (vector<StoredGeometry*>::iterator it = geometries.begin(); it != geometries.end();
            ++it) {

            StoredGeometry& stored = **it;

            // NOTE: A stored document with STRICT_SPHERE CRS is treated as a malformed document
            // and ignored. Since GeoNear requires an index, there's no stored STRICT_SPHERE shape.
            // So we don't check it here.

            // NOTE: For now, we're sure that if we get this far in the query we'll have an
            // appropriate index which validates the type of geometry we're pulling back here.
            // TODO: It may make sense to change our semantics and, by default, only return
            // shapes in the same CRS from $geoNear.
            if (!stored.geometry.supportsProject(queryCRS))
                continue;
            stored.geometry.projectInto(queryCRS);

            double nextDistance = stored.geometry.minDistance(nearParams.nearQuery.centroid);

            if (minDistance < 0 || nextDistance < minDistance) {
                minDistance = nextDistance;
                minDistanceObj = stored.element.Obj();
            }
        }

        if (minDistance < 0) {
            // No distance to report
            return StatusWith<double>(-1);
        }

        if (nearParams.addDistMeta) {
            if (nearParams.nearQuery.unitsAreRadians) {
                // Hack for nearSphere
                // TODO: Remove nearSphere?
                invariant(SPHERE == queryCRS);
                member->addComputed(new GeoDistanceComputedData(minDistance
                                                                / kRadiusOfEarthInMeters));
            }
            else {
                member->addComputed(new GeoDistanceComputedData(minDistance));
            }
        }

        if (nearParams.addPointMeta) {
            member->addComputed(new GeoNearPointComputedData(minDistanceObj));
        }

        return StatusWith<double>(minDistance);
    }
开发者ID:Mickael-van-der-Beek,项目名称:mongo,代码行数:71,代码来源:geo_near.cpp

示例15: executeBatch

    /**
     * The core config write functionality.
     *
     * Config writes run in two passes - the first is a quick check to ensure the config servers
     * are all reachable, the second runs the actual write.
     *
     * TODO: Upgrade and move this logic to the config servers, a state machine implementation
     * is probably the next step.
     */
    void ConfigCoordinator::executeBatch(const BatchedCommandRequest& clientRequest,
                                         BatchedCommandResponse* clientResponse) {

        const NamespaceString nss(clientRequest.getNS());

        // Should never use it for anything other than DBs residing on the config server
        dassert(nss.db() == "config" || nss.db() == "admin");
        dassert(clientRequest.sizeWriteOps() == 1u);

        // This is an opportunistic check that all config servers look healthy by calling
        // getLastError on each one of them. If there was some form of write/journaling error, get
        // last error would fail.
        {
            for (vector<ConnectionString>::iterator it = _configHosts.begin();
                 it != _configHosts.end();
                 ++it) {

                _dispatcher->addCommand(*it,
                                        "admin",
                                        RawBSONSerializable(BSON("getLastError" << true <<
                                                                 "fsync" << true)));
            }

            _dispatcher->sendAll();

            bool error = false;
            while (_dispatcher->numPending()) {
                ConnectionString host;
                RawBSONSerializable response;

                Status status = _dispatcher->recvAny(&host, &response);
                if (status.isOK()) {
                    BSONObj obj = response.toBSON();

                    LOG(3) << "Response " << obj.toString();

                    // If the ok field is anything other than 1, count it as error
                    if (!obj["ok"].trueValue()) {
                        error = true;
                        log() << "Config server check for host " << host
                              << " returned error: " << response;
                    }
                }
                else {
                    error = true;
                    log() << "Config server check for host " << host
                          << " failed with status: " << status;
                }
            }

            // All responses should have been gathered by this point
            if (error) {
                clientResponse->setOk(false);
                clientResponse->setErrCode(ErrorCodes::RemoteValidationError);
                clientResponse->setErrMessage("Could not verify that config servers were active"
                                              " and reachable before write");
                return;
            }
        }

        if (!_checkConfigString(clientResponse)) {
            return;
        }

        //
        // Do the actual writes
        //

        BatchedCommandRequest configRequest( clientRequest.getBatchType() );
        clientRequest.cloneTo( &configRequest );
        configRequest.setNS( nss.coll() );

        OwnedPointerVector<ConfigResponse> responsesOwned;
        vector<ConfigResponse*>& responses = responsesOwned.mutableVector();

        //
        // Send the actual config writes
        //

        // Get as many batches as we can at once
        for (vector<ConnectionString>::const_iterator it = _configHosts.begin();
             it != _configHosts.end();
             ++it) {

            const ConnectionString& configHost = *it;
            _dispatcher->addCommand(configHost, nss.db(), configRequest);
        }

        // Send them all out
        _dispatcher->sendAll();

//.........这里部分代码省略.........
开发者ID:7segments,项目名称:mongo-1,代码行数:101,代码来源:config_coordinator.cpp


注:本文中的OwnedPointerVector::mutableVector方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。