本文整理汇总了C++中BSONObj::getFieldDottedOrArray方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::getFieldDottedOrArray方法的具体用法?C++ BSONObj::getFieldDottedOrArray怎么用?C++ BSONObj::getFieldDottedOrArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::getFieldDottedOrArray方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: extractNextElement
/**
* @param arrayNestedArray - set if the returned element is an array nested directly within arr.
*/
BSONElement extractNextElement( const BSONObj &obj, const BSONObj &arr, const char *&field, bool &arrayNestedArray ) const {
string firstField = mongoutils::str::before( field, '.' );
bool haveObjField = !obj.getField( firstField ).eoo();
BSONElement arrField = arr.getField( firstField );
bool haveArrField = !arrField.eoo();
// An index component field name cannot exist in both a document array and one of that array's children.
uassert( 15855,
mongoutils::str::stream() <<
"Ambiguous field name found in array (do not use numeric field names in "
"embedded elements in an array), field: '" << arrField.fieldName() <<
"' for array: " << arr,
!haveObjField || !haveArrField );
arrayNestedArray = false;
if ( haveObjField ) {
return obj.getFieldDottedOrArray( field );
}
else if ( haveArrField ) {
if ( arrField.type() == Array ) {
arrayNestedArray = true;
}
return arr.getFieldDottedOrArray( field );
}
return BSONElement();
}
示例2: getHashKeys
// static
void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
const string& hashedField,
HashSeed seed,
int hashVersion,
bool isSparse,
const CollatorInterface* collator,
BSONObjSet* keys) {
const char* cstr = hashedField.c_str();
BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
// Convert strings to comparison keys.
BSONObj fieldValObj;
if (!fieldVal.eoo()) {
BSONObjBuilder bob;
CollationIndexKey::collationAwareIndexKeyAppend(fieldVal, collator, &bob);
fieldValObj = bob.obj();
fieldVal = fieldValObj.firstElement();
}
uassert(16766,
"Error: hashed indexes do not currently support array values",
fieldVal.type() != Array);
if (!fieldVal.eoo()) {
BSONObj key = BSON("" << makeSingleHashKey(fieldVal, seed, hashVersion));
keys->insert(key);
} else if (!isSparse) {
BSONObj nullObj = BSON("" << BSONNULL);
keys->insert(BSON("" << makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)));
}
}
示例3: phraseMatch
/**
* Checks if phrase is exactly matched in obj, returns true if so, false otherwise
* @param phrase, the string to be matched
* @param obj, document in the collection to match against
*/
bool FTSMatcher::phraseMatch( const string& phrase, const BSONObj& obj ) const {
if ( _spec.wildcard() ) {
// case where everything is indexed (all fields)
return _phraseRecurse( phrase, obj );
}
for ( Weights::const_iterator i = _spec.weights().begin();
i != _spec.weights().end();
++i ) {
// figure out what the indexed field is.. ie. is it "field" or "field.subfield" etc.
const char * leftOverName = i->first.c_str();
BSONElement e = obj.getFieldDottedOrArray(leftOverName);
if ( e.type() == Array ) {
BSONObjIterator j( e.Obj() );
while ( j.more() ) {
BSONElement x = j.next();
if ( leftOverName[0] && x.isABSONObj() )
x = x.Obj().getFieldDotted( leftOverName );
if ( x.type() == String )
if ( _phraseMatches( phrase, x.String() ) )
return true;
}
}
else if ( e.type() == String ) {
if ( _phraseMatches( phrase, e.String() ) )
return true;
}
}
return false;
}
示例4: BSONElement
BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj& obj,
const PositionalPathInfo& positionalInfo,
const char** field,
bool* arrayNestedArray) const {
std::string firstField = mongoutils::str::before(*field, '.');
bool haveObjField = !obj.getField(firstField).eoo();
BSONElement arrField = positionalInfo.positionallyIndexedElt;
// An index component field name cannot exist in both a document
// array and one of that array's children.
uassert(16746,
mongoutils::str::stream()
<< "Ambiguous field name found in array (do not use numeric field names in "
"embedded elements in an array), field: '" << arrField.fieldName()
<< "' for array: " << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
if (haveObjField) {
return obj.getFieldDottedOrArray(*field);
} else if (positionalInfo.hasPositionallyIndexedElt()) {
if (arrField.type() == Array) {
*arrayNestedArray = true;
}
*field = positionalInfo.remainingPath;
return positionalInfo.dottedElt;
}
return BSONElement();
}
示例5: getKeys
void HashAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) {
const char* cstr = _hashedField.c_str();
BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
uassert(16766, "Error: hashed indexes do not currently support array values",
fieldVal.type() != Array );
if (!fieldVal.eoo()) {
BSONObj key = BSON( "" << makeSingleKey( fieldVal , _seed , _hashVersion ) );
keys->insert( key );
} else if (!_descriptor->isSparse()) {
keys->insert( _missingKey.copy() );
}
}
示例6: getKeys
void HashedIndexType::getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
string hashedFieldCopy = string( _hashedField );
const char* hashedFieldCopyPtr = hashedFieldCopy.c_str();
BSONElement fieldVal = obj.getFieldDottedOrArray( hashedFieldCopyPtr );
uassert( 16244 , "Error: hashed indexes do not currently support array values" , fieldVal.type() != Array );
if ( ! fieldVal.eoo() ) {
BSONObj key = BSON( "" << makeSingleKey( fieldVal , _seed , _hashVersion ) );
keys.insert( key );
}
else if (! _isSparse ) {
keys.insert( _missingKey.copy() );
}
}
示例7: getKeysImpl
// static
void HashAccessMethod::getKeysImpl(const BSONObj& obj, const string& hashedField, HashSeed seed,
int hashVersion, bool isSparse, BSONObjSet* keys) {
const char* cstr = hashedField.c_str();
BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
uassert(16766, "Error: hashed indexes do not currently support array values",
fieldVal.type() != Array );
if (!fieldVal.eoo()) {
BSONObj key = BSON( "" << makeSingleKey(fieldVal, seed, hashVersion));
keys->insert(key);
}
else if (!isSparse) {
BSONObj nullObj = BSON("" << BSONNULL);
keys->insert(BSON("" << makeSingleKey(nullObj.firstElement(), seed, hashVersion)));
}
}
示例8: scoreDocument
/*
* Calculates the score for all terms in a document of a collection
* @param obj, the document in the collection being parsed
* @param term_freqs, map<string,double> to fill up
*/
void FTSSpec::scoreDocument( const BSONObj& obj, TermFrequencyMap* term_freqs ) const {
string language = getLanguageToUse( obj );
Stemmer stemmer(language);
Tools tools(language);
tools.stemmer = &stemmer;
tools.stopwords = StopWords::getStopWords( language );
if ( wildcard() ) {
// if * is specified for weight, we can recurse over all fields.
_scoreRecurse(tools, obj, term_freqs);
return;
}
// otherwise, we need to remember the different weights for each field
// and act accordingly (in other words, call _score)
for ( Weights::const_iterator i = _weights.begin(); i != _weights.end(); i++ ) {
const char * leftOverName = i->first.c_str();
// name of field
BSONElement e = obj.getFieldDottedOrArray(leftOverName);
// weight associated to name of field
double weight = i->second;
if ( e.eoo() ) {
// do nothing
}
else if ( e.type() == Array ) {
BSONObjIterator j( e.Obj() );
while ( j.more() ) {
BSONElement x = j.next();
if ( leftOverName[0] && x.isABSONObj() )
x = x.Obj().getFieldDotted( leftOverName );
if ( x.type() == String )
_scoreString( tools, x.valuestr(), term_freqs, weight );
}
}
else if ( e.type() == String ) {
_scoreString( tools, e.valuestr(), term_freqs, weight );
}
}
}
示例9: hasNegativeTerm
/*
* Checks if the obj contains any of the negTerms, if so returns true, otherwise false
* @param obj, object to be checked
*/
bool FTSMatcher::hasNegativeTerm(const BSONObj& obj ) const {
// called during search. deals with the case in which we have a term
// flagged for exclusion, i.e. "hello -world" we want to remove all
// results that include "world"
if ( _query.getNegatedTerms().size() == 0 )
return false;
if ( _spec.wildcard() ) {
return _hasNegativeTerm_recurse(obj);
}
/* otherwise look at fields where weights are defined */
for ( Weights::const_iterator i = _spec.weights().begin();
i != _spec.weights().end();
i++ ) {
const char * leftOverName = i->first.c_str();
BSONElement e = obj.getFieldDottedOrArray(leftOverName);
if ( e.type() == Array ) {
BSONObjIterator j( e.Obj() );
while ( j.more() ) {
BSONElement x = j.next();
if ( leftOverName[0] && x.isABSONObj() )
x = x.Obj().getFieldDotted( leftOverName );
if ( x.type() == String )
if ( _hasNegativeTerm_string( x.String() ) )
return true;
}
}
else if ( e.type() == String ) {
if ( _hasNegativeTerm_string( e.String() ) )
return true;
}
}
return false;
}
示例10: _getKeys
void IndexSpec::_getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
BSONElement arrElt;
unsigned arrIdx = ~0;
for( unsigned i = 0; i < fieldNames.size(); ++i ) {
if ( *fieldNames[ i ] == '\0' )
continue;
BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
if ( e.eoo() )
e = _nullElt; // no matching field
if ( e.type() != Array )
fieldNames[ i ] = ""; // no matching field or non-array match
if ( *fieldNames[ i ] == '\0' )
fixed[ i ] = e; // no need for further object expansion (though array expansion still possible)
if ( e.type() == Array && arrElt.eoo() ) { // we only expand arrays on a single path -- track the path here
arrIdx = i;
arrElt = e;
}
// enforce single array path here
uassert( 10088 , "cannot index parallel arrays", e.type() != Array || e.rawdata() == arrElt.rawdata() );
}
bool allFound = true; // have we found elements for all field names in the key spec?
for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ){
if ( **i != '\0' ){
allFound = false;
break;
}
}
bool insertArrayNull = false;
if ( allFound ) {
if ( arrElt.eoo() ) {
// no terminal array element to expand
BSONObjBuilder b(_sizeTracker);
for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
b.appendAs( *i, "" );
keys.insert( b.obj() );
}
else {
// terminal array element to expand, so generate all keys
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ){
while( i.more() ) {
BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
if ( j == arrIdx )
b.appendAs( i.next(), "" );
else
b.appendAs( fixed[ j ], "" );
}
keys.insert( b.obj() );
}
}
else if ( fixed.size() > 1 ){
insertArrayNull = true;
}
}
} else {
// nonterminal array element to expand, so recurse
assert( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ){
while( i.more() ) {
BSONElement e = i.next();
if ( e.type() == Object ){
_getKeys( fieldNames, fixed, e.embeddedObject(), keys );
}
}
}
else {
insertArrayNull = true;
}
}
if ( insertArrayNull ) {
// x : [] - need to insert undefined
BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
if ( j == arrIdx ){
b.appendUndefined( "" );
}
else {
BSONElement e = fixed[j];
if ( e.eoo() )
b.appendNull( "" );
else
b.appendAs( e , "" );
}
}
keys.insert( b.obj() );
}
}
示例11: b
void BtreeKeyGeneratorV0::getKeysImpl(vector<const char*> fieldNames, vector<BSONElement> fixed,
const BSONObj &obj, BSONObjSet *keys) const {
BSONElement arrElt;
unsigned arrIdx = ~0;
unsigned numNotFound = 0;
for ( unsigned i = 0; i < fieldNames.size(); ++i ) {
if ( *fieldNames[ i ] == '\0' )
continue;
BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
if ( e.eoo() ) {
e = _nullElt; // no matching field
numNotFound++;
}
if ( e.type() != Array )
fieldNames[ i ] = ""; // no matching field or non-array match
if ( *fieldNames[ i ] == '\0' )
// no need for further object expansion (though array expansion still possible)
fixed[ i ] = e;
if ( e.type() == Array && arrElt.eoo() ) {
// we only expand arrays on a single path -- track the path here
arrIdx = i;
arrElt = e;
}
// enforce single array path here
if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
assertParallelArrays( e.fieldName(), arrElt.fieldName() );
}
}
bool allFound = true; // have we found elements for all field names in the key spec?
for (vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
++i ) {
if ( **i != '\0' ) {
allFound = false;
break;
}
}
if ( _isSparse && numNotFound == _fieldNames.size()) {
// we didn't find any fields
// so we're not going to index this document
return;
}
bool insertArrayNull = false;
if ( allFound ) {
if ( arrElt.eoo() ) {
// no terminal array element to expand
BSONObjBuilder b(_sizeTracker);
for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
b.appendAs( *i, "" );
keys->insert( b.obj() );
}
else {
// terminal array element to expand, so generate all keys
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ) {
while( i.more() ) {
BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
if ( j == arrIdx )
b.appendAs( i.next(), "" );
else
b.appendAs( fixed[ j ], "" );
}
keys->insert( b.obj() );
}
}
else if ( fixed.size() > 1 ) {
insertArrayNull = true;
}
}
}
else {
// nonterminal array element to expand, so recurse
verify( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ) {
while( i.more() ) {
BSONElement e = i.next();
if ( e.type() == Object ) {
getKeysImpl( fieldNames, fixed, e.embeddedObject(), keys );
}
}
}
else {
insertArrayNull = true;
}
}
if ( insertArrayNull ) {
// x : [] - need to insert undefined
//.........这里部分代码省略.........
示例12: _getKeys
// PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN__GETKEYS, "_ixmKeyGenerator::_getKeys" )
INT32 _getKeys( vector<const CHAR *> &fieldNames,
const BSONObj &obj,
BSONObjSet &keys,
BSONElement *arrEle ) const
{
INT32 rc = SDB_OK ;
PD_TRACE_ENTRY ( SDB__IXMKEYGEN__GETKEYS );
#define IXM_DEFAULT_FIELD_NUM 3
BSONElement eleOnStack[IXM_DEFAULT_FIELD_NUM] ;
BSONElement *keyEles = NULL ;
const CHAR *arrEleName = NULL ;
UINT32 arrElePos = 0 ;
UINT32 eooNum = 0 ;
if ( IXM_DEFAULT_FIELD_NUM < fieldNames.size() )
{
keyEles = new(std::nothrow) BSONElement[fieldNames.size()] ;
if ( NULL == keyEles )
{
PD_LOG( PDERROR, "failed to allocalte mem." ) ;
rc = SDB_OOM ;
goto error ;
}
}
else
{
keyEles = ( BSONElement* )eleOnStack ;
}
for ( UINT32 i = 0; i < fieldNames.size(); i++ )
{
const CHAR *name = fieldNames.at( i ) ;
SDB_ASSERT( '\0' != name[0], "can not be empty" ) ;
BSONElement &e = keyEles[i] ;
e = obj.getFieldDottedOrArray( name ) ;
if ( e.eoo() )
{
++eooNum ;
continue ;
}
else if ( Array == e.type() )
{
if ( !arrEle->eoo() )
{
PD_LOG( PDERROR, "At most one array can be in the key:",
arrEle->fieldName(), e.fieldName() ) ;
rc = SDB_IXM_MULTIPLE_ARRAY ;
goto error ;
}
else
{
*arrEle = e ;
arrEleName = name ;
arrElePos = i ;
}
}
else
{
continue ;
}
}
if ( fieldNames.size() == eooNum )
{
rc = SDB_OK ;
goto done ;
}
else if ( !arrEle->eoo() )
{
rc = _genKeyWithArrayEle( keyEles, fieldNames.size(),
arrEle,
arrEleName, arrElePos,
keys ) ;
if ( SDB_OK != rc )
{
PD_LOG( PDERROR, "failed to gen keys with array element:%d", rc ) ;
goto error ;
}
}
else
{
rc = _genKeyWithNormalEle( keyEles, fieldNames.size(), keys ) ;
if ( SDB_OK != rc )
{
PD_LOG( PDERROR, "failed to gen keys with normal element:%d", rc ) ;
goto error ;
}
}
done:
if ( IXM_DEFAULT_FIELD_NUM < fieldNames.size() &&
NULL != keyEles )
{
delete []keyEles ;
}
PD_TRACE_EXITRC ( SDB__IXMKEYGEN__GETKEYS, rc );
return rc ;
error:
goto done ;
}
示例13: arrComponents
//.........这里部分代码省略.........
true,
_emptyPositionalInfo,
multikeyPaths);
} else {
BSONObj arrObj = arrElt.embeddedObject();
// For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
// and then traverse the remainder of the field path up front. This prevents us from
// having to look up the indexed element again on each recursive call (i.e. once per
// array element).
std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
for (size_t i = 0; i < fieldNames.size(); ++i) {
const bool fieldIsArray = arrIdxs.find(i) != arrIdxs.end();
if (*fieldNames[i] == '\0') {
// We've reached the end of the path.
if (multikeyPaths && fieldIsArray && mayExpandArrayUnembedded) {
// The 'arrElt' array value isn't expanded into multiple elements when the last
// component of the indexed field is positional and 'arrElt' contains nested
// array values. In all other cases, the 'arrElt' array value may be expanded
// into multiple element and can therefore cause the index to be multikey.
arrComponents[i] = _pathLengths[i] - 1;
}
continue;
}
// The earlier call to BSONObj::getFieldDottedOrArray(fieldNames[i]) modified
// fieldNames[i] to refer to the suffix of the path immediately following the 'arrElt'
// array value. If we haven't reached the end of this indexed field yet, then we must
// have traversed through 'arrElt'.
invariant(fieldIsArray);
StringData part = fieldNames[i];
part = part.substr(0, part.find('.'));
subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
// We aren't indexing a particular element of the 'arrElt' array value, so it may be
// expanded into multiple elements. It can therefore cause the index to be multikey.
if (multikeyPaths) {
// We need to determine which component of the indexed field causes the index to
// be multikey as a result of the 'arrElt' array value. Since
//
// NumComponents("<pathPrefix>") + NumComponents("<pathSuffix>")
// = NumComponents("<pathPrefix>.<pathSuffix>"),
//
// we can compute the number of components in a prefix of the indexed field by
// subtracting the number of components in the suffix 'fieldNames[i]' from the
// number of components in the indexed field '_fieldNames[i]'.
//
// For example, consider the indexed field "a.b.c" and the suffix "c". The path
// "a.b.c" has 3 components and the suffix "c" has 1 component. Subtracting the
// latter from the former yields the number of components in the prefix "a.b",
// i.e. 2.
size_t fullPathLength = _pathLengths[i];
size_t suffixPathLength = FieldRef{fieldNames[i]}.numParts();
invariant(suffixPathLength < fullPathLength);
arrComponents[i] = fullPathLength - suffixPathLength - 1;
}
continue;
}
// We're indexing an array element by its position. Traverse the remainder of the
// field path now.
//
// Indexing an array element by its position selects a particular element of the
// 'arrElt' array value when generating keys. It therefore cannot cause the index to be
// multikey.
subPositionalInfo[i].arrayObj = arrObj;
subPositionalInfo[i].remainingPath = fieldNames[i];
subPositionalInfo[i].dottedElt =
arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
}
// Generate a key for each element of the indexed array.
size_t nArrObjFields = 0;
for (const auto arrObjElem : arrObj) {
_getKeysArrEltFixed(&fieldNames,
&fixed,
arrObjElem,
keys,
numNotFound,
arrElt,
arrIdxs,
mayExpandArrayUnembedded,
subPositionalInfo,
multikeyPaths);
++nArrObjFields;
}
if (multikeyPaths && nArrObjFields > 1) {
// The 'arrElt' array value contains multiple elements, so we say that it causes the
// index to be multikey.
for (size_t i = 0; i < arrComponents.size(); ++i) {
if (auto arrComponent = arrComponents[i]) {
(*multikeyPaths)[i].insert(*arrComponent);
}
}
}
}
}
示例14: insert
int dmsFile::insert ( BSONObj &record, BSONObj &outRecord, dmsRecordID &rid )
{
int rc = EDB_OK ;
PAGEID pageID = 0 ;
char *page = NULL ;
dmsPageHeader *pageHeader = NULL ;
int recordSize = 0 ;
SLOTOFF offsetTemp = 0 ;
const char *pGKeyFieldName = NULL ;
dmsRecord recordHeader ;
recordSize = record.objsize() ;
// when we attempt to insert record, first we have to verify it include _id field
if ( (unsigned int)recordSize > DMS_MAX_RECORD )
{
rc = EDB_INVALIDARG ;
PD_LOG ( PDERROR, "record cannot bigger than 4MB" ) ;
goto error ;
}
pGKeyFieldName = gKeyFieldName ;
// make sure _id exists
if ( record.getFieldDottedOrArray ( pGKeyFieldName ).eoo () )
{
rc = EDB_INVALIDARG ;
PD_LOG ( PDERROR, "record must be with _id" ) ;
goto error ;
}
retry :
// lock the database
_mutex.get() ;
// and then we should get the required record size
pageID = _findPage ( recordSize + sizeof(dmsRecord) ) ;
// if there's not enough space in any existing pages, let's release db lock
if ( DMS_INVALID_PAGEID == pageID )
{
_mutex.release () ;
// if there's not enough space in any existing pages, let's release db lock and
// try to allocate a new segment by calling _extendSegment
if ( _extendMutex.try_get() )
{
// calling _extendSegment
rc = _extendSegment () ;
if ( rc )
{
PD_LOG ( PDERROR, "Failed to extend segment, rc = %d", rc ) ;
_extendMutex.release () ;
goto error ;
}
}
else
{
// if we cannot get the extendmutex, that means someone else is trying to extend
// so let's wait until getting the mutex, and release it and try again
_extendMutex.get() ;
}
_extendMutex.release () ;
goto retry ;
}
// find the in-memory offset for the page
page = pageToOffset ( pageID ) ;
// if something wrong, let's return error
if ( !page )
{
rc = EDB_SYS ;
PD_LOG ( PDERROR, "Failed to find the page" ) ;
goto error_releasemutex ;
}
// set page header
pageHeader = (dmsPageHeader *)page ;
if ( memcmp ( pageHeader->_eyeCatcher, DMS_PAGE_EYECATCHER,
DMS_PAGE_EYECATCHER_LEN ) != 0 )
{
rc = EDB_SYS ;
PD_LOG ( PDERROR, "Invalid page header" ) ;
goto error_releasemutex ;
}
// slot offset is the last byte of slots
// free offset is the first byte of data
// so freeOffset - slotOffset is the actual free space excluding holes
if (
// make sure there's still holes to recover
( pageHeader->_freeSpace >
pageHeader->_freeOffset - pageHeader->_slotOffset ) &&
// if there's no free space excluding holes
( pageHeader->_slotOffset + recordSize + sizeof(dmsRecord) + sizeof(SLOTID) >
pageHeader->_freeOffset )
)
{
// recover empty hole from page
_recoverSpace ( page ) ;
}
if (
// make sure there is enough free space
( pageHeader->_freeSpace < recordSize + sizeof(dmsRecord) + sizeof(SLOTID) ) ||
( pageHeader->_freeOffset - pageHeader->_slotOffset <
recordSize + sizeof(dmsRecord) + sizeof(SLOTID) )
)
{
//.........这里部分代码省略.........