本文整理匯總了C++中AllocSetContextCreate函數的典型用法代碼示例。如果您正苦於以下問題:C++ AllocSetContextCreate函數的具體用法?C++ AllocSetContextCreate怎麽用?C++ AllocSetContextCreate使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了AllocSetContextCreate函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: vacuum
/*
* Primary entry point for VACUUM and ANALYZE commands.
*
* relid is normally InvalidOid; if it is not, then it provides the relation
* OID to be processed, and vacstmt->relation is ignored. (The non-invalid
* case is currently only used by autovacuum.)
*
* do_toast is passed as FALSE by autovacuum, because it processes TOAST
* tables separately.
*
* for_wraparound is used by autovacuum to let us know when it's forcing
* a vacuum for wraparound, which should not be auto-cancelled.
*
* bstrategy is normally given as NULL, but in autovacuum it can be passed
* in to use the same buffer strategy object across multiple vacuum() calls.
*
* isTopLevel should be passed down from ProcessUtility.
*
* It is the caller's responsibility that vacstmt and bstrategy
* (if given) be allocated in a memory context that won't disappear
* at transaction commit.
*/
void
vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
BufferAccessStrategy bstrategy, bool for_wraparound, bool isTopLevel)
{
const char *stmttype;
volatile bool all_rels,
in_outer_xact,
use_own_xacts;
List *relations;
/* sanity checks on options */
Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE));
Assert((vacstmt->options & VACOPT_VACUUM) ||
!(vacstmt->options & (VACOPT_FULL | VACOPT_FREEZE)));
Assert((vacstmt->options & VACOPT_ANALYZE) || vacstmt->va_cols == NIL);
stmttype = (vacstmt->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
/*
* We cannot run VACUUM inside a user transaction block; if we were inside
* a transaction, then our commit- and start-transaction-command calls
* would not have the intended effect! There are numerous other subtle
* dependencies on this, too.
*
* ANALYZE (without VACUUM) can run either way.
*/
if (vacstmt->options & VACOPT_VACUUM)
{
PreventTransactionChain(isTopLevel, stmttype);
in_outer_xact = false;
}
else
in_outer_xact = IsInTransactionChain(isTopLevel);
/*
* Send info about dead objects to the statistics collector, unless we are
* in autovacuum --- autovacuum.c does this for itself.
*/
if ((vacstmt->options & VACOPT_VACUUM) && !IsAutoVacuumWorkerProcess())
pgstat_vacuum_stat();
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of PortalContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* If caller didn't give us a buffer strategy object, make one in the
* cross-transaction memory context.
*/
if (bstrategy == NULL)
{
MemoryContext old_context = MemoryContextSwitchTo(vac_context);
bstrategy = GetAccessStrategy(BAS_VACUUM);
MemoryContextSwitchTo(old_context);
}
vac_strategy = bstrategy;
/* Remember whether we are processing everything in the DB */
all_rels = (!OidIsValid(relid) && vacstmt->relation == NULL);
/*
* Build list of relations to process, unless caller gave us one. (If we
* build one, we put it in vac_context for safekeeping.)
*/
relations = get_rel_oids(relid, vacstmt->relation);
/*
* Decide whether we need to start/commit our own transactions.
*
//.........這裏部分代碼省略.........
示例2: ExecInitSetOp
/* ----------------------------------------------------------------
* ExecInitSetOp
*
* This initializes the setop node state structures and
* the node's subplan.
* ----------------------------------------------------------------
*/
SetOpState *
ExecInitSetOp(SetOp *node, EState *estate, int eflags)
{
SetOpState *setopstate;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
* create state structure
*/
setopstate = makeNode(SetOpState);
setopstate->ps.plan = (Plan *) node;
setopstate->ps.state = estate;
setopstate->ps.ps_OuterTupleSlot = NULL;
setopstate->subplan_done = false;
setopstate->numOutput = 0;
/*
* Miscellaneous initialization
*
* SetOp nodes have no ExprContext initialization because they never call
* ExecQual or ExecProject. But they do need a per-tuple memory context
* anyway for calling execTuplesMatch.
*/
setopstate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
"SetOp",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
#define SETOP_NSLOTS 1
/*
* Tuple table initialization
*/
ExecInitResultTupleSlot(estate, &setopstate->ps);
/*
* then initialize outer plan
*/
outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
/*
* setop nodes do no projections, so initialize projection info for this
* node appropriately
*/
ExecAssignResultTypeFromTL(&setopstate->ps);
setopstate->ps.ps_ProjInfo = NULL;
/*
* Precompute fmgr lookup data for inner loop
*/
setopstate->eqfunctions =
execTuplesMatchPrepare(ExecGetResultType(&setopstate->ps),
node->numCols,
node->dupColIdx);
return setopstate;
}
示例3: hash_create
/*
* hash_create -- create a new dynamic hash table
*
* tabname: a name for the table (for debugging purposes)
* nelem: maximum number of elements expected
* *info: additional table parameters, as indicated by flags
* flags: bitmask indicating which parameters to take from *info
*
* Note: for a shared-memory hashtable, nelem needs to be a pretty good
* estimate, since we can't expand the table on the fly. But an unshared
* hashtable can be expanded on-the-fly, so it's better for nelem to be
* on the small side and let the table grow if it's exceeded. An overly
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
/*
* For shared hash tables, we have a local hash header (HTAB struct) that
* we allocate in TopMemoryContext; all else is in shared memory.
*
* For non-shared hash tables, everything including the hash header is in
* a memory context created specially for the hash table --- this makes
* hash_destroy very simple. The memory context is made a child of either
* a context specified by the caller, or TopMemoryContext if nothing is
* specified.
*/
if (flags & HASH_SHARED_MEM)
{
/* Set up to allocate the hash header */
CurrentDynaHashCxt = TopMemoryContext;
}
else
{
/* Create the hash table's private memory context */
if (flags & HASH_CONTEXT)
CurrentDynaHashCxt = info->hcxt;
else
CurrentDynaHashCxt = TopMemoryContext;
CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,
tabname,
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
}
/* Initialize the hash header, plus a copy of the table name */
hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);
MemSet(hashp, 0, sizeof(HTAB));
hashp->tabname = (char *) (hashp + 1);
strcpy(hashp->tabname, tabname);
if (flags & HASH_FUNCTION)
hashp->hash = info->hash;
else
hashp->hash = string_hash; /* default hash function */
/*
* If you don't specify a match function, it defaults to string_compare if
* you used string_hash (either explicitly or by default) and to memcmp
* otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.)
*/
if (flags & HASH_COMPARE)
hashp->match = info->match;
else if (hashp->hash == string_hash)
hashp->match = (HashCompareFunc) string_compare;
else
hashp->match = memcmp;
/*
* Similarly, the key-copying function defaults to strlcpy or memcpy.
*/
if (flags & HASH_KEYCOPY)
hashp->keycopy = info->keycopy;
else if (hashp->hash == string_hash)
hashp->keycopy = (HashCopyFunc) strlcpy;
else
hashp->keycopy = memcpy;
if (flags & HASH_ALLOC)
hashp->alloc = info->alloc;
else
hashp->alloc = DynaHashAlloc;
if (flags & HASH_SHARED_MEM)
{
/*
* ctl structure and directory are preallocated for shared memory
* tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
hashp->hcxt = NULL;
hashp->isshared = true;
//.........這裏部分代碼省略.........
示例4: ginInsertCleanup
/*
* Move tuples from pending pages into regular GIN structure.
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
* either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
* bad will happen. Likewise, if two backends simultaneously try to post
* a pending entry into the main index, one will succeed and one will do
* nothing. We try to notice when someone else is a little bit ahead of
* us in the process, but that's just to avoid wasting cycles. Only the
* action of removing a page from the pending list really needs exclusive
* lock.
*
* vac_delay indicates that ginInsertCleanup is called from vacuum process,
* so call vacuum_delay_point() periodically.
* If stats isn't null, we count deleted pending pages into the counts.
*/
void
ginInsertCleanup(GinState *ginstate,
bool vac_delay, IndexBulkDeleteResult *stats)
{
Relation index = ginstate->index;
Buffer metabuffer,
buffer;
Page metapage,
page;
GinMetaPageData *metadata;
MemoryContext opCtx,
oldCtx;
BuildAccumulator accum;
KeyArray datums;
BlockNumber blkno;
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
LockBuffer(metabuffer, GIN_SHARE);
metapage = BufferGetPage(metabuffer);
metadata = GinPageGetMeta(metapage);
if (metadata->head == InvalidBlockNumber)
{
/* Nothing to do */
UnlockReleaseBuffer(metabuffer);
return;
}
/*
* Read and lock head of pending list
*/
blkno = metadata->head;
buffer = ReadBuffer(index, blkno);
LockBuffer(buffer, GIN_SHARE);
page = BufferGetPage(buffer);
LockBuffer(metabuffer, GIN_UNLOCK);
/*
* Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
oldCtx = MemoryContextSwitchTo(opCtx);
initKeyArray(&datums, 128);
ginInitBA(&accum);
accum.ginstate = ginstate;
/*
* At the top of this loop, we have pin and lock on the current page of
* the pending list. However, we'll release that before exiting the loop.
* Note we also have pin but not lock on the metapage.
*/
for (;;)
{
if (GinPageIsDeleted(page))
{
/* another cleanup process is running concurrently */
UnlockReleaseBuffer(buffer);
break;
}
/*
* read page's datums into accum
*/
processPendingPage(&accum, &datums, page, FirstOffsetNumber);
vacuum_delay_point();
/*
* Is it time to flush memory to disk? Flush if we are at the end of
* the pending list, or if we have a full row and memory is getting
* full.
*
* XXX using up maintenance_work_mem here is probably unreasonably
//.........這裏部分代碼省略.........
示例5: ginbuild
Datum
ginbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
GinBuildState buildstate;
Buffer RootBuffer,
MetaBuffer;
ItemPointerData *list;
Datum entry;
uint32 nlist;
MemoryContext oldCtx;
OffsetNumber attnum;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
initGinState(&buildstate.ginstate, index);
/* initialize the meta page */
MetaBuffer = GinNewBuffer(index);
/* initialize the root page */
RootBuffer = GinNewBuffer(index);
START_CRIT_SECTION();
GinInitMetabuffer(MetaBuffer);
MarkBufferDirty(MetaBuffer);
GinInitBuffer(RootBuffer, GIN_LEAF);
MarkBufferDirty(RootBuffer);
if (!index->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData rdata;
Page page;
rdata.buffer = InvalidBuffer;
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.next = NULL;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX, &rdata);
page = BufferGetPage(RootBuffer);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
page = BufferGetPage(MetaBuffer);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
UnlockReleaseBuffer(MetaBuffer);
UnlockReleaseBuffer(RootBuffer);
END_CRIT_SECTION();
/* build the index */
buildstate.indtuples = 0;
/*
* create a temporary memory context that is reset once for each tuple
* inserted into the index
*/
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
buildstate.funcCtx = AllocSetContextCreate(buildstate.tmpCtx,
"Gin build temporary context for user-defined function",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
buildstate.accum.ginstate = &buildstate.ginstate;
ginInitBA(&buildstate.accum);
/*
* Do the heap scan. We disallow sync scan here because dataPlaceToPage
* prefers to receive tuples in TID order.
*/
reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,
ginBuildCallback, (void *) &buildstate);
/* dump remaining entries to the index */
oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx);
ginBeginBAScan(&buildstate.accum);
while ((list = ginGetEntry(&buildstate.accum, &attnum, &entry, &nlist)) != NULL)
{
/* there could be many entries, so be willing to abort here */
CHECK_FOR_INTERRUPTS();
ginEntryInsert(index, &buildstate.ginstate, attnum, entry, list, nlist, TRUE);
}
MemoryContextSwitchTo(oldCtx);
//.........這裏部分代碼省略.........
示例6: Type_invokeSRF
Datum Type_invokeSRF(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS)
{
bool hasRow;
CallContextData* ctxData;
FuncCallContext* context;
MemoryContext currCtx;
/* stuff done only on the first call of the function
*/
if(SRF_IS_FIRSTCALL())
{
jobject tmp;
/* create a function context for cross-call persistence
*/
context = SRF_FIRSTCALL_INIT();
currCtx = MemoryContextSwitchTo(context->multi_call_memory_ctx);
/* Call the declared Java function. It returns an instance that can produce
* the rows.
*/
tmp = Type_getSRFProducer(self, cls, method, args);
if(tmp == 0)
{
Invocation_assertDisconnect();
MemoryContextSwitchTo(currCtx);
fcinfo->isnull = true;
SRF_RETURN_DONE(context);
}
ctxData = (CallContextData*)palloc(sizeof(CallContextData));
context->user_fctx = ctxData;
ctxData->elemType = self;
ctxData->rowProducer = JNI_newGlobalRef(tmp);
JNI_deleteLocalRef(tmp);
/* Some row producers will need a writable result set in order
* to produce the row. If one is needed, it's created here.
*/
tmp = Type_getSRFCollector(self, fcinfo);
if(tmp == 0)
ctxData->rowCollector = 0;
else
{
ctxData->rowCollector = JNI_newGlobalRef(tmp);
JNI_deleteLocalRef(tmp);
}
ctxData->trusted = currentInvocation->trusted;
ctxData->hasConnected = currentInvocation->hasConnected;
ctxData->invocation = currentInvocation->invocation;
if(ctxData->hasConnected)
ctxData->spiContext = CurrentMemoryContext;
else
ctxData->spiContext = 0;
ctxData->rowContext = AllocSetContextCreate(context->multi_call_memory_ctx,
"PL/Java row context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* Register callback to be called when the function ends
*/
RegisterExprContextCallback(((ReturnSetInfo*)fcinfo->resultinfo)->econtext, _endOfSetCB, PointerGetDatum(ctxData));
MemoryContextSwitchTo(currCtx);
}
context = SRF_PERCALL_SETUP();
ctxData = (CallContextData*)context->user_fctx;
MemoryContextReset(ctxData->rowContext);
currCtx = MemoryContextSwitchTo(ctxData->rowContext);
currentInvocation->hasConnected = ctxData->hasConnected;
currentInvocation->invocation = ctxData->invocation;
hasRow = Type_hasNextSRF(self, ctxData->rowProducer, ctxData->rowCollector, (jint)context->call_cntr);
ctxData->hasConnected = currentInvocation->hasConnected;
ctxData->invocation = currentInvocation->invocation;
currentInvocation->hasConnected = false;
currentInvocation->invocation = 0;
if(hasRow)
{
Datum result = Type_nextSRF(self, ctxData->rowProducer, ctxData->rowCollector);
MemoryContextSwitchTo(currCtx);
SRF_RETURN_NEXT(context, result);
}
MemoryContextSwitchTo(currCtx);
/* Unregister this callback and call it manually. We do this because
* otherwise it will be called when the backend is in progress of
* cleaning up Portals. If we close cursors (i.e. drop portals) in
* the close, then that mechanism fails since attempts are made to
* delete portals more then once.
*/
UnregisterExprContextCallback(
((ReturnSetInfo*)fcinfo->resultinfo)->econtext,
//.........這裏部分代碼省略.........
示例7: WalWriterMain
/*
* Main entry point for walwriter process
*
* This is invoked from BootstrapMain, which has already created the basic
* execution environment, but not enabled signals yet.
*/
void
WalWriterMain(void)
{
sigjmp_buf local_sigjmp_buf;
MemoryContext walwriter_context;
/*
* If possible, make this process a group leader, so that the postmaster
* can signal any child processes too. (walwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
elog(FATAL, "setsid() failed: %m");
#endif
/*
* Properly accept or ignore signals the postmaster might send us
*
* We have no particular use for SIGINT at the moment, but seems
* reasonable to treat like SIGTERM.
*/
pqsignal(SIGHUP, WalSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, WalShutdownHandler); /* request shutdown */
pqsignal(SIGTERM, WalShutdownHandler); /* request shutdown */
pqsignal(SIGQUIT, wal_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, SIG_IGN); /* reserve for ProcSignal */
pqsignal(SIGUSR2, SIG_IGN); /* not used */
/*
* Reset some signals that are accepted by postmaster but not here
*/
pqsignal(SIGCHLD, SIG_DFL);
pqsignal(SIGTTIN, SIG_DFL);
pqsignal(SIGTTOU, SIG_DFL);
pqsignal(SIGCONT, SIG_DFL);
pqsignal(SIGWINCH, SIG_DFL);
/* We allow SIGQUIT (quickdie) at all times */
sigdelset(&BlockSig, SIGQUIT);
/*
* Create a resource owner to keep track of our resources (not clear that
* we need this, but may as well have one).
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "Wal Writer");
/*
* Create a memory context that we will do all our work in. We do this so
* that we can reset the context during error recovery and thereby avoid
* possible memory leaks. Formerly this code just ran in
* TopMemoryContext, but resetting that would be a really bad idea.
*/
walwriter_context = AllocSetContextCreate(TopMemoryContext,
"Wal Writer",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
MemoryContextSwitchTo(walwriter_context);
/*
* If an exception is encountered, processing resumes here.
*
* This code is heavily based on bgwriter.c, q.v.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
/* Since not using PG_TRY, must reset error stack by hand */
error_context_stack = NULL;
/* Prevent interrupts while cleaning up */
HOLD_INTERRUPTS();
/* Report the error to the server log */
EmitErrorReport();
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
* about in walwriter, but we do have LWLocks, and perhaps buffers?
*/
LWLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
ResourceOwnerRelease(CurrentResourceOwner,
RESOURCE_RELEASE_BEFORE_LOCKS,
false, true);
/* we needn't bother with the other ResourceOwnerRelease phases */
AtEOXact_Buffers(false);
AtEOXact_Files();
//.........這裏部分代碼省略.........
示例8: ginInsertCleanup
/*
* Move tuples from pending pages into regular GIN structure.
*
* On first glance it looks completely not crash-safe. But if we crash
* after posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
* bad will happen.
*
* fill_fsm indicates that ginInsertCleanup should add deleted pages
* to FSM otherwise caller is responsible to put deleted pages into
* FSM.
*
* If stats isn't null, we count deleted pending pages into the counts.
*/
void
ginInsertCleanup(GinState *ginstate, bool full_clean,
bool fill_fsm, IndexBulkDeleteResult *stats)
{
Relation index = ginstate->index;
Buffer metabuffer,
buffer;
Page metapage,
page;
GinMetaPageData *metadata;
MemoryContext opCtx,
oldCtx;
BuildAccumulator accum;
KeyArray datums;
BlockNumber blkno,
blknoFinish;
bool cleanupFinish = false;
bool fsm_vac = false;
Size workMemory;
bool inVacuum = (stats == NULL);
/*
* We would like to prevent concurrent cleanup process. For that we will
* lock metapage in exclusive mode using LockPage() call. Nobody other
* will use that lock for metapage, so we keep possibility of concurrent
* insertion into pending list
*/
if (inVacuum)
{
/*
* We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* and we would like to wait concurrent cleanup to finish.
*/
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
autovacuum_work_mem : maintenance_work_mem;
}
else
{
/*
* We are called from regular insert and if we see concurrent cleanup
* just exit in hope that concurrent process will clean up pending
* list.
*/
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return;
workMemory = work_mem;
}
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
LockBuffer(metabuffer, GIN_SHARE);
metapage = BufferGetPage(metabuffer);
metadata = GinPageGetMeta(metapage);
if (metadata->head == InvalidBlockNumber)
{
/* Nothing to do */
UnlockReleaseBuffer(metabuffer);
UnlockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
return;
}
/*
* Remember a tail page to prevent infinite cleanup if other backends add
* new tuples faster than we can cleanup.
*/
blknoFinish = metadata->tail;
/*
* Read and lock head of pending list
*/
blkno = metadata->head;
buffer = ReadBuffer(index, blkno);
LockBuffer(buffer, GIN_SHARE);
page = BufferGetPage(buffer);
LockBuffer(metabuffer, GIN_UNLOCK);
/*
* Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
ALLOCSET_DEFAULT_MINSIZE,
//.........這裏部分代碼省略.........
示例9: brininsert
/*
* A tuple in the heap is being inserted. To keep a brin index up to date,
* we need to obtain the relevant index tuple and compare its stored values
* with those of the new tuple. If the tuple values are not consistent with
* the summary tuple, we need to update the index tuple.
*
* If the range is not currently summarized (i.e. the revmap returns NULL for
* it), there's nothing to do.
*/
bool
brininsert(Relation idxRel, Datum *values, bool *nulls,
ItemPointer heaptid, Relation heapRel,
IndexUniqueCheck checkUnique)
{
BlockNumber pagesPerRange;
BrinDesc *bdesc = NULL;
BrinRevmap *revmap;
Buffer buf = InvalidBuffer;
MemoryContext tupcxt = NULL;
MemoryContext oldcxt = NULL;
revmap = brinRevmapInitialize(idxRel, &pagesPerRange, NULL);
for (;;)
{
bool need_insert = false;
OffsetNumber off;
BrinTuple *brtup;
BrinMemTuple *dtup;
BlockNumber heapBlk;
int keyno;
CHECK_FOR_INTERRUPTS();
heapBlk = ItemPointerGetBlockNumber(heaptid);
/* normalize the block number to be the first block in the range */
heapBlk = (heapBlk / pagesPerRange) * pagesPerRange;
brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, NULL,
BUFFER_LOCK_SHARE, NULL);
/* if range is unsummarized, there's nothing to do */
if (!brtup)
break;
/* First time through? */
if (bdesc == NULL)
{
bdesc = brin_build_desc(idxRel);
tupcxt = AllocSetContextCreate(CurrentMemoryContext,
"brininsert cxt",
ALLOCSET_DEFAULT_SIZES);
oldcxt = MemoryContextSwitchTo(tupcxt);
}
dtup = brin_deform_tuple(bdesc, brtup);
/*
* Compare the key values of the new tuple to the stored index values;
* our deformed tuple will get updated if the new tuple doesn't fit
* the original range (note this means we can't break out of the loop
* early). Make a note of whether this happens, so that we know to
* insert the modified tuple later.
*/
for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
{
Datum result;
BrinValues *bval;
FmgrInfo *addValue;
bval = &dtup->bt_columns[keyno];
addValue = index_getprocinfo(idxRel, keyno + 1,
BRIN_PROCNUM_ADDVALUE);
result = FunctionCall4Coll(addValue,
idxRel->rd_indcollation[keyno],
PointerGetDatum(bdesc),
PointerGetDatum(bval),
values[keyno],
nulls[keyno]);
/* if that returned true, we need to insert the updated tuple */
need_insert |= DatumGetBool(result);
}
if (!need_insert)
{
/*
* The tuple is consistent with the new values, so there's nothing
* to do.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
}
else
{
Page page = BufferGetPage(buf);
ItemId lp = PageGetItemId(page, off);
Size origsz;
BrinTuple *origtup;
Size newsz;
BrinTuple *newtup;
bool samepage;
//.........這裏部分代碼省略.........
示例10: pcp_worker_main
/*
* main entry pont of pcp worker child process
*/
void
pcp_worker_main(int port)
{
sigjmp_buf local_sigjmp_buf;
MemoryContext PCPMemoryContext;
int authenticated = 0;
char salt[4];
int random_salt = 0;
struct timeval uptime;
char tos;
int rsize;
char *buf = NULL;
ereport(DEBUG1,
(errmsg("I am PCP worker child with pid:%d",getpid())));
/* Identify myself via ps */
init_ps_display("", "", "", "");
gettimeofday(&uptime, NULL);
srandom((unsigned int) (getpid() ^ uptime.tv_usec));
/* set up signal handlers */
signal(SIGTERM, die);
signal(SIGINT, die);
signal(SIGQUIT, die);
signal(SIGCHLD, SIG_DFL);
signal(SIGUSR2, wakeup_handler_child);
signal(SIGUSR1, SIG_IGN);
signal(SIGHUP, SIG_IGN);
signal(SIGPIPE, SIG_IGN);
signal(SIGALRM, SIG_IGN);
/* Create per loop iteration memory context */
PCPMemoryContext = AllocSetContextCreate(TopMemoryContext,
"PCP_worker_main_loop",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
MemoryContextSwitchTo(TopMemoryContext);
/*
* install the call back for preparation of pcp worker child exit
*/
on_system_exit(pcp_worker_will_go_down, (Datum)NULL);
/* Initialize my backend status */
pool_initialize_private_backend_status();
/* Initialize process context */
pool_init_process_context();
pcp_frontend = pcp_open(port);
unset_nonblock(pcp_frontend->fd);
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
error_context_stack = NULL;
EmitErrorReport();
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
}
/* We can now handle ereport(ERROR) */
PG_exception_stack = &local_sigjmp_buf;
for(;;)
{
MemoryContextSwitchTo(PCPMemoryContext);
MemoryContextResetAndDeleteChildren(PCPMemoryContext);
errno = 0;
/* read a PCP packet */
do_pcp_read(pcp_frontend, &tos, 1);
do_pcp_read(pcp_frontend, &rsize, sizeof(int));
rsize = ntohl(rsize);
if ((rsize - sizeof(int)) > 0)
{
buf = (char *)palloc(rsize - sizeof(int));
do_pcp_read(pcp_frontend, buf, rsize - sizeof(int));
}
ereport(DEBUG1,
(errmsg("received PCP packet"),
errdetail("PCP packet type of service '%c'", tos)));
if (tos == 'R') /* authentication */
{
set_ps_display("PCP: processing authentication", false);
process_authentication(pcp_frontend, buf,salt, &random_salt);
authenticated = 1;
continue;
}
if (tos == 'M') /* md5 salt */
//.........這裏部分代碼省略.........
示例11: btvacuumscan
/*
* btvacuumscan --- scan the index for VACUUMing purposes
*
* This combines the functions of looking for leaf tuples that are deletable
* according to the vacuum callback, looking for empty pages that can be
* deleted, and looking for old deleted pages that can be recycled. Both
* btbulkdelete and btvacuumcleanup invoke this (the latter only if no
* btbulkdelete call occurred).
*
* The caller is responsible for initially allocating/zeroing a stats struct
* and for obtaining a vacuum cycle ID if necessary.
*/
static void
btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state,
BTCycleId cycleid)
{
Relation rel = info->index;
BTVacState vstate;
BlockNumber num_pages;
BlockNumber blkno;
bool needLock;
/*
* Reset counts that will be incremented during the scan; needed in case
* of multiple scans during a single VACUUM command
*/
stats->estimated_count = false;
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
/* Set up info to pass down to btvacuumpage */
vstate.info = info;
vstate.stats = stats;
vstate.callback = callback;
vstate.callback_state = callback_state;
vstate.cycleid = cycleid;
vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
vstate.lastBlockLocked = BTREE_METAPAGE;
vstate.totFreePages = 0;
/* Create a temporary memory context to run _bt_pagedel in */
vstate.pagedelcontext = AllocSetContextCreate(CurrentMemoryContext,
"_bt_pagedel",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* The outer loop iterates over all index pages except the metapage, in
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. Hence, we must repeatedly check the
* relation length. We must acquire the relation-extension lock while
* doing so to avoid a race condition: if someone else is extending the
* relation, there is a window where bufmgr/smgr have created a new___
* all-zero page but it hasn't yet been write-locked by _bt_getbuf(). If
* we manage to scan such a page here, we'll improperly assume it can be
* recycled. Taking the lock synchronizes things enough to prevent a
* problem: either num_pages won't include the new___ page, or _bt_getbuf
* already has write lock on the buffer and it will be fully initialized
* before we can examine it. (See also vacuumlazy.c, which has the same
* issue.) Also, we need not worry if a page is added immediately after
* we look; the page splitting code already has write-lock on the left
* page before it adds a right page, so we must already have processed any
* tuples due to be moved into such a page.
*
* We can skip locking for new___ or temp relations, however, since no one
* else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
blkno = BTREE_METAPAGE + 1;
for (;;)
{
/* Get the current relation length */
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
num_pages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
/* Quit if we've scanned the whole relation */
if (blkno >= num_pages)
break;
/* Iterate over pages, then loop back to recheck length */
for (; blkno < num_pages; blkno++)
{
btvacuumpage(&vstate, blkno, blkno);
}
}
/*
* If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here.
* However, we won't issue any WAL records about pages that have no items
* to be deleted. For pages between pages we've vacuumed, the replay code
* will take locks under the direction of the lastBlockVacuumed fields in
* the XLOG_BTREE_VACUUM WAL records. To cover pages after the last one
//.........這裏部分代碼省略.........
示例12: geqo_eval
/*
* geqo_eval
*
* Returns cost of a query tree as an individual of the population.
*/
Cost
geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
{
MemoryContext mycontext;
MemoryContext oldcxt;
RelOptInfo *joinrel;
Path *best_path;
Cost fitness;
int savelength;
struct HTAB *savehash;
/*
* Create a private memory context that will hold all temp storage
* allocated inside gimme_tree().
*
* Since geqo_eval() will be called many times, we can't afford to let all
* that memory go unreclaimed until end of statement. Note we make the
* temp context a child of the planner's normal context, so that it will
* be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
oldcxt = MemoryContextSwitchTo(mycontext);
/*
* gimme_tree will add entries to root->join_rel_list, which may or may
* not already contain some entries. The newly added entries will be
* recycled by the MemoryContextDelete below, so we must ensure that the
* list is restored to its former state before exiting. We can do this by
* truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
* We also must take care not to mess up the outer join_rel_hash, if there
* is one. We can do this by just temporarily setting the link to NULL.
* (If we are dealing with enough join rels, which we very likely are, a
* new hash table will get built and used locally.)
*
* join_rel_level[] shouldn't be in use, so just Assert it isn't.
*/
savelength = list_length(root->join_rel_list);
savehash = root->join_rel_hash;
Assert(root->join_rel_level == NULL);
root->join_rel_hash = NULL;
/* construct the best path for the given combination of relations */
joinrel = gimme_tree(root, tour, num_gene);
best_path = joinrel->cheapest_total_path;
/*
* compute fitness
*
* XXX geqo does not currently support optimization for partial result
* retrieval, nor do we take any cognizance of possible use of
* parameterized paths --- how to fix?
*/
fitness = best_path->total_cost;
/*
* Restore join_rel_list to its former state, and put back original
* hashtable if any.
*/
root->join_rel_list = list_truncate(root->join_rel_list,
savelength);
root->join_rel_hash = savehash;
/* release all the memory acquired within gimme_tree */
MemoryContextSwitchTo(oldcxt);
MemoryContextDelete(mycontext);
return fitness;
}
示例13: ExecHashTableCreate
/* ----------------------------------------------------------------
* ExecHashTableCreate
*
* create an empty hashtable data structure for hashjoin.
* ----------------------------------------------------------------
*/
HashJoinTable
ExecHashTableCreate(HashState *hashState, HashJoinState *hjstate, List *hashOperators, uint64 operatorMemKB)
{
HashJoinTable hashtable;
Plan *outerNode;
int nbuckets;
int nbatch;
int nkeys;
int i;
ListCell *ho;
MemoryContext oldcxt;
START_MEMORY_ACCOUNT(hashState->ps.plan->memoryAccount);
{
Hash *node = (Hash *) hashState->ps.plan;
/*
* Get information about the size of the relation to be hashed (it's the
* "outer" subtree of this node, but the inner relation of the hashjoin).
* Compute the appropriate size of the hash table.
*/
outerNode = outerPlan(node);
/*
* Initialize the hash table control block.
*
* The hashtable control block is just palloc'd from the executor's
* per-query memory context.
*/
hashtable = (HashJoinTable)palloc0(sizeof(HashJoinTableData));
hashtable->buckets = NULL;
hashtable->bloom = NULL;
hashtable->curbatch = 0;
hashtable->growEnabled = true;
hashtable->totalTuples = 0;
hashtable->batches = NULL;
hashtable->work_set = NULL;
hashtable->state_file = NULL;
hashtable->spaceAllowed = operatorMemKB * 1024L;
hashtable->stats = NULL;
hashtable->eagerlyReleased = false;
hashtable->hjstate = hjstate;
/*
* Create temporary memory contexts in which to keep the hashtable working
* storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
"HashBatchContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* CDB */ /* track temp buf file allocations in separate context */
hashtable->bfCxt = AllocSetContextCreate(CurrentMemoryContext,
"hbbfcxt",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width,
&hashtable->nbuckets, &hashtable->nbatch, operatorMemKB);
nbuckets = hashtable->nbuckets;
nbatch = hashtable->nbatch;
hashtable->nbatch_original = nbatch;
hashtable->nbatch_outstart = nbatch;
#ifdef HJDEBUG
elog(LOG, "HJ: nbatch = %d, nbuckets = %d\n", nbatch, nbuckets);
#endif
/*
* Get info about the hash functions to be used for each hash key.
* Also remember whether the join operators are strict.
*/
nkeys = list_length(hashOperators);
hashtable->outer_hashfunctions =
(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
hashtable->inner_hashfunctions =
(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
i = 0;
foreach(ho, hashOperators)
{
//.........這裏部分代碼省略.........
示例14: spgbuild
/*
* Build an SP-GiST index.
*/
Datum
spgbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
SpGistBuildState buildstate;
Buffer metabuffer,
rootbuffer;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/*
* Initialize the meta page and root page
*/
metabuffer = SpGistNewBuffer(index);
rootbuffer = SpGistNewBuffer(index);
Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_HEAD_BLKNO);
START_CRIT_SECTION();
SpGistInitMetapage(BufferGetPage(metabuffer));
MarkBufferDirty(metabuffer);
SpGistInitBuffer(rootbuffer, SPGIST_LEAF);
MarkBufferDirty(rootbuffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata;
/* WAL data is just the relfilenode */
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, &rdata);
PageSetLSN(BufferGetPage(metabuffer), recptr);
PageSetTLI(BufferGetPage(metabuffer), ThisTimeLineID);
PageSetLSN(BufferGetPage(rootbuffer), recptr);
PageSetTLI(BufferGetPage(rootbuffer), ThisTimeLineID);
}
END_CRIT_SECTION();
UnlockReleaseBuffer(metabuffer);
UnlockReleaseBuffer(rootbuffer);
/*
* Now insert all the heap data into the index
*/
initSpGistState(&buildstate.spgstate, index);
buildstate.spgstate.isBuild = true;
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"SP-GiST build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
spgistBuildCallback, (void *) &buildstate);
MemoryContextDelete(buildstate.tmpCtx);
SpGistUpdateMetaPage(index);
result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult));
result->heap_tuples = result->index_tuples = reltuples;
PG_RETURN_POINTER(result);
}
示例15: CopyIntoCStoreTable
/*
* CopyIntoCStoreTable handles a "COPY cstore_table FROM" statement. This
* function uses the COPY command's functions to read and parse rows from
* the data source specified in the COPY statement. The function then writes
* each row to the file specified in the cstore foreign table options. Finally,
* the function returns the number of copied rows.
*/
static uint64
CopyIntoCStoreTable(const CopyStmt *copyStatement, const char *queryString)
{
uint64 processedRowCount = 0;
Relation relation = NULL;
Oid relationId = InvalidOid;
TupleDesc tupleDescriptor = NULL;
uint32 columnCount = 0;
CopyState copyState = NULL;
bool nextRowFound = true;
Datum *columnValues = NULL;
bool *columnNulls = NULL;
TableWriteState *writeState = NULL;
CStoreFdwOptions *cstoreFdwOptions = NULL;
MemoryContext tupleContext = NULL;
List *columnNameList = copyStatement->attlist;
if (columnNameList != NULL)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("copy column list is not supported")));
}
/*
* We disallow copy from file or program except to superusers. These checks
* are based on the checks in DoCopy() function of copy.c.
*/
if (copyStatement->filename != NULL && !superuser())
{
if (copyStatement->is_program)
{
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a program"),
errhint("Anyone can COPY to stdout or from stdin. "
"psql's \\copy command also works for anyone.")));
}
else
{
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
"psql's \\copy command also works for anyone.")));
}
}
Assert(copyStatement->relation != NULL);
/*
* Open and lock the relation. We acquire ExclusiveLock to allow concurrent
* reads, but block concurrent writes.
*/
relation = heap_openrv(copyStatement->relation, ExclusiveLock);
relationId = RelationGetRelid(relation);
/* allocate column values and nulls arrays */
tupleDescriptor = RelationGetDescr(relation);
columnCount = tupleDescriptor->natts;
columnValues = palloc0(columnCount * sizeof(Datum));
columnNulls = palloc0(columnCount * sizeof(bool));
cstoreFdwOptions = CStoreGetOptions(relationId);
/*
* We create a new memory context called tuple context, and read and write
* each row's values within this memory context. After each read and write,
* we reset the memory context. That way, we immediately release memory
* allocated for each row, and don't bloat memory usage with large input
* files.
*/
tupleContext = AllocSetContextCreate(CurrentMemoryContext,
"CStore COPY Row Memory Context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* init state to read from COPY data source */
copyState = BeginCopyFrom(relation, copyStatement->filename,
copyStatement->is_program, NIL,
copyStatement->options);
/* init state to write to the cstore file */
writeState = CStoreBeginWrite(cstoreFdwOptions->filename,
cstoreFdwOptions->compressionType,
cstoreFdwOptions->stripeRowCount,
cstoreFdwOptions->blockRowCount,
tupleDescriptor);
while (nextRowFound)
{
/* read the next row in tupleContext */
MemoryContext oldContext = MemoryContextSwitchTo(tupleContext);
nextRowFound = NextCopyFrom(copyState, NULL, columnValues, columnNulls, NULL);
MemoryContextSwitchTo(oldContext);
//.........這裏部分代碼省略.........