本文整理汇总了C++中palloc函数的典型用法代码示例。如果您正苦于以下问题:C++ palloc函数的具体用法?C++ palloc怎么用?C++ palloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了palloc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EnumValuesCreate
/*
* EnumValuesCreate
* Create an entry in pg_enum for each of the supplied enum values.
*
* vals is a list of Value strings.
*/
void
EnumValuesCreate(Oid enumTypeOid, List *vals)
{
Relation pg_enum;
NameData enumlabel;
Oid *oids;
int elemno,
num_elems;
Datum values[Natts_pg_enum];
bool nulls[Natts_pg_enum];
ListCell *lc;
HeapTuple tup;
num_elems = list_length(vals);
/*
* We do not bother to check the list of values for duplicates --- if
* you have any, you'll get a less-than-friendly unique-index violation.
* It is probably not worth trying harder.
*/
pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
/*
* Allocate OIDs for the enum's members.
*
* While this method does not absolutely guarantee that we generate no
* duplicate OIDs (since we haven't entered each oid into the table
* before allocating the next), trouble could only occur if the OID
* counter wraps all the way around before we finish. Which seems
* unlikely.
*/
oids = (Oid *) palloc(num_elems * sizeof(Oid));
for (elemno = 0; elemno < num_elems; elemno++)
{
/*
* We assign even-numbered OIDs to all the new enum labels. This
* tells the comparison functions the OIDs are in the correct sort
* order and can be compared directly.
*/
Oid new_oid;
do {
new_oid = GetNewOid(pg_enum);
} while (new_oid & 1);
oids[elemno] = new_oid;
}
/* sort them, just in case OID counter wrapped from high to low */
qsort(oids, num_elems, sizeof(Oid), oid_cmp);
/* and make the entries */
memset(nulls, false, sizeof(nulls));
elemno = 0;
foreach(lc, vals)
{
char *lab = strVal(lfirst(lc));
/*
* labels are stored in a name field, for easier syscache lookup, so
* check the length to make sure it's within range.
*/
if (strlen(lab) > (NAMEDATALEN - 1))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("invalid enum label \"%s\"", lab),
errdetail("Labels must be %d characters or less.",
NAMEDATALEN - 1)));
values[Anum_pg_enum_enumtypid - 1] = ObjectIdGetDatum(enumTypeOid);
values[Anum_pg_enum_enumsortorder - 1] = Float4GetDatum(elemno + 1);
namestrcpy(&enumlabel, lab);
values[Anum_pg_enum_enumlabel - 1] = NameGetDatum(&enumlabel);
tup = heap_form_tuple(RelationGetDescr(pg_enum), values, nulls);
HeapTupleSetOid(tup, oids[elemno]);
simple_heap_insert(pg_enum, tup);
CatalogUpdateIndexes(pg_enum, tup);
heap_freetuple(tup);
elemno++;
}
示例2: worker_spi_main
void
worker_spi_main(Datum main_arg)
{
int index = DatumGetInt32(main_arg);
worktable *table;
StringInfoData buf;
char name[20];
table = palloc(sizeof(worktable));
sprintf(name, "schema%d", index);
table->schema = pstrdup(name);
table->name = pstrdup("counted");
/* Establish signal handlers before unblocking signals. */
pqsignal(SIGHUP, worker_spi_sighup);
pqsignal(SIGTERM, worker_spi_sigterm);
/* We're now ready to receive signals */
BackgroundWorkerUnblockSignals();
/* Connect to our database */
BackgroundWorkerInitializeConnection("postgres", NULL);
elog(LOG, "%s initialized with %s.%s",
MyBgworkerEntry->bgw_name, table->schema, table->name);
initialize_worker_spi(table);
/*
* Quote identifiers passed to us. Note that this must be done after
* initialize_worker_spi, because that routine assumes the names are not
* quoted.
*
* Note some memory might be leaked here.
*/
table->schema = quote_identifier(table->schema);
table->name = quote_identifier(table->name);
initStringInfo(&buf);
appendStringInfo(&buf,
"WITH deleted AS (DELETE "
"FROM %s.%s "
"WHERE type = 'delta' RETURNING value), "
"total AS (SELECT coalesce(sum(value), 0) as sum "
"FROM deleted) "
"UPDATE %s.%s "
"SET value = %s.value + total.sum "
"FROM total WHERE type = 'total' "
"RETURNING %s.value",
table->schema, table->name,
table->schema, table->name,
table->name,
table->name);
/*
* Main loop: do this until the SIGTERM handler tells us to terminate
*/
while (!got_sigterm)
{
int ret;
int rc;
/*
* Background workers mustn't call usleep() or any direct equivalent:
* instead, they may wait on their process latch, which sleeps as
* necessary, but is awakened if postmaster dies. That way the
* background process goes away immediately in an emergency.
*/
rc = WaitLatch(&MyProc->procLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
worker_spi_naptime * 1000L);
ResetLatch(&MyProc->procLatch);
/* emergency bailout if postmaster has died */
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
/*
* In case of a SIGHUP, just reload the configuration.
*/
if (got_sighup)
{
got_sighup = false;
ProcessConfigFile(PGC_SIGHUP);
}
/*
* Start a transaction on which we can run queries. Note that each
* StartTransactionCommand() call should be preceded by a
* SetCurrentStatementStartTimestamp() call, which sets both the time
* for the statement we're about the run, and also the transaction
* start time. Also, each other query sent to SPI should probably be
* preceded by SetCurrentStatementStartTimestamp(), so that statement
* start time is always up to date.
*
* The SPI_connect() call lets us run queries through the SPI manager,
* and the PushActiveSnapshot() call creates an "active" snapshot
* which is necessary for queries to have MVCC data to work on.
*
* The pgstat_report_activity() call makes our activity visible
* through the pgstat views.
//.........这里部分代码省略.........
示例3: palloc_bench
Datum
palloc_bench(PG_FUNCTION_ARGS)
{
/* info for anyelement */
int i = 0;
int32 ncontexts = PG_GETARG_INT32(0);
int32 niterations = PG_GETARG_INT32(1);
int32 allocsize = PG_GETARG_INT32(2);
struct timeval start_time, end_time;
/* memory contexts */
MemoryContext initctx = CurrentMemoryContext;
MemoryContext ctx = initctx;
/* switch to the per-group hash-table memory context */
for (i = 0; i < ncontexts; i++) {
char name[256];
sprintf(name, "test context %d", i);
#ifdef TRACKING_FLAG
ctx = AllocSetContextCreateTracked(ctx,
name,
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE,
true);
#else
ctx = AllocSetContextCreate(ctx,
name,
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
#endif
}
MemoryContextSwitchTo(ctx);
gettimeofday(&start_time, NULL);
for (i = 0; i < niterations; i++) {
char * p = palloc(allocsize);
if (p != NULL)
pfree(p);
}
gettimeofday(&end_time, NULL);
MemoryContextSwitchTo(initctx);
elog(WARNING, "duration = %.2f ms", (end_time.tv_sec - start_time.tv_sec) * 1000 + (end_time.tv_usec - start_time.tv_usec) / 1000.0);
PG_RETURN_VOID();
}
示例4: xpath_table
//.........这里部分代码省略.........
/*
* Create the tuplestore - work_mem is the max in-memory size before a
* file is created on disk to hold it.
*/
tupstore =
tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
false, work_mem);
MemoryContextSwitchTo(oldcontext);
/* get the requested return tuple description */
ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);
/* must have at least one output column (for the pkey) */
if (ret_tupdesc->natts < 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("xpath_table must have at least one output column")));
/*
* At the moment we assume that the returned attributes make sense for the
* XPath specififed (i.e. we trust the caller). It's not fatal if they get
* it wrong - the input function for the column type will raise an error
* if the path result can't be converted into the correct binary
* representation.
*/
attinmeta = TupleDescGetAttInMetadata(ret_tupdesc);
/* Set return mode and allocate value space. */
rsinfo->returnMode = SFRM_Materialize;
rsinfo->setDesc = ret_tupdesc;
values = (char **) palloc(ret_tupdesc->natts * sizeof(char *));
xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *));
/*
* Split XPaths. xpathset is a writable CString.
*
* Note that we stop splitting once we've done all needed for tupdesc
*/
numpaths = 0;
pos = xpathset;
while (numpaths < (ret_tupdesc->natts - 1))
{
xpaths[numpaths++] = (xmlChar *) pos;
pos = strstr(pos, pathsep);
if (pos != NULL)
{
*pos = '\0';
pos++;
}
else
break;
}
/* Now build query */
initStringInfo(&query_buf);
/* Build initial sql statement */
appendStringInfo(&query_buf, "SELECT %s, %s FROM %s WHERE %s",
pkeyfield,
xmlfield,
relname,
condition);
示例5: enum_range_internal
static ArrayType *
enum_range_internal(Oid enumtypoid, Oid lower, Oid upper)
{
ArrayType *result;
Relation enum_rel;
Relation enum_idx;
SysScanDesc enum_scan;
HeapTuple enum_tuple;
ScanKeyData skey;
Datum *elems;
int max,
cnt;
bool left_found;
/*
* Scan the enum members in order using pg_enum_typid_sortorder_index.
* Note we must not use the syscache, and must use an MVCC snapshot here.
* See comments for RenumberEnumType in catalog/pg_enum.c for more info.
*/
ScanKeyInit(&skey,
Anum_pg_enum_enumtypid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(enumtypoid));
enum_rel = heap_open(EnumRelationId, AccessShareLock);
enum_idx = index_open(EnumTypIdSortOrderIndexId, AccessShareLock);
enum_scan = systable_beginscan_ordered(enum_rel, enum_idx,
GetTransactionSnapshot(),
1, &skey);
max = 64;
elems = (Datum *) palloc(max * sizeof(Datum));
cnt = 0;
left_found = !OidIsValid(lower);
while (HeapTupleIsValid(enum_tuple = systable_getnext_ordered(enum_scan, ForwardScanDirection)))
{
Oid enum_oid = HeapTupleGetOid(enum_tuple);
if (!left_found && lower == enum_oid)
left_found = true;
if (left_found)
{
if (cnt >= max)
{
max *= 2;
elems = (Datum *) repalloc(elems, max * sizeof(Datum));
}
elems[cnt++] = ObjectIdGetDatum(enum_oid);
}
if (OidIsValid(upper) && upper == enum_oid)
break;
}
systable_endscan_ordered(enum_scan);
index_close(enum_idx, AccessShareLock);
heap_close(enum_rel, AccessShareLock);
/* and build the result array */
/* note this hardwires some details about the representation of Oid */
result = construct_array(elems, cnt, enumtypoid, sizeof(Oid), true, 'i');
pfree(elems);
return result;
}
示例6: compute_array_stats
//.........这里部分代码省略.........
if (slot_idx > STATISTIC_NUM_SLOTS - 2)
elog(ERROR, "insufficient pg_statistic slots for array stats");
/* We can only compute real stats if we found some non-null values. */
if (analyzed_rows > 0)
{
int nonnull_cnt = analyzed_rows;
int count_items_count;
int i;
TrackItem **sort_table;
int track_len;
int64 cutoff_freq;
int64 minfreq,
maxfreq;
/*
* We assume the standard stats code already took care of setting
* stats_valid, stanullfrac, stawidth, stadistinct. We'd have to
* re-compute those values if we wanted to not store the standard
* stats.
*/
/*
* Construct an array of the interesting hashtable items, that is,
* those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
* frequency is 9*N / bucket_width.
*/
cutoff_freq = 9 * element_no / bucket_width;
i = hash_get_num_entries(elements_tab); /* surely enough space */
sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
hash_seq_init(&scan_status, elements_tab);
track_len = 0;
minfreq = element_no;
maxfreq = 0;
while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
{
if (item->frequency > cutoff_freq)
{
sort_table[track_len++] = item;
minfreq = Min(minfreq, item->frequency);
maxfreq = Max(maxfreq, item->frequency);
}
}
Assert(track_len <= i);
/* emit some statistics for debug purposes */
elog(DEBUG3, "compute_array_stats: target # mces = %d, "
"bucket width = %d, "
"# elements = " INT64_FORMAT ", hashtable size = %d, "
"usable entries = %d",
num_mcelem, bucket_width, element_no, i, track_len);
/*
* If we obtained more elements than we really want, get rid of those
* with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
{
qsort(sort_table, track_len, sizeof(TrackItem *),
trackitem_compare_frequencies_desc);
示例7: CreateConstraintEntry
/*
* CreateConstraintEntry
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
* constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
CreateConstraintEntry(const char *constraintName,
Oid constraintNamespace,
char constraintType,
bool isDeferrable,
bool isDeferred,
Oid relId,
const int16 *constraintKey,
int constraintNKeys,
Oid domainId,
Oid indexRelId,
Oid foreignRelId,
const int16 *foreignKey,
const Oid *pfEqOp,
const Oid *ppEqOp,
const Oid *ffEqOp,
int foreignNKeys,
char foreignUpdateType,
char foreignDeleteType,
char foreignMatchType,
const Oid *exclOp,
Node *conExpr,
const char *conBin,
const char *conSrc,
bool conIsLocal,
int conInhCount)
{
Relation conDesc;
Oid conOid;
HeapTuple tup;
bool nulls[Natts_pg_constraint];
Datum values[Natts_pg_constraint];
ArrayType *conkeyArray;
ArrayType *confkeyArray;
ArrayType *conpfeqopArray;
ArrayType *conppeqopArray;
ArrayType *conffeqopArray;
ArrayType *conexclopArray;
NameData cname;
int i;
ObjectAddress conobject;
conDesc = heap_open(ConstraintRelationId, RowExclusiveLock);
Assert(constraintName);
namestrcpy(&cname, constraintName);
/*
* Convert C arrays into Postgres arrays.
*/
if (constraintNKeys > 0)
{
Datum *conkey;
conkey = (Datum *) palloc(constraintNKeys * sizeof(Datum));
for (i = 0; i < constraintNKeys; i++)
conkey[i] = Int16GetDatum(constraintKey[i]);
conkeyArray = construct_array(conkey, constraintNKeys,
INT2OID, 2, true, 's');
}
else
conkeyArray = NULL;
if (foreignNKeys > 0)
{
Datum *fkdatums;
fkdatums = (Datum *) palloc(foreignNKeys * sizeof(Datum));
for (i = 0; i < foreignNKeys; i++)
fkdatums[i] = Int16GetDatum(foreignKey[i]);
confkeyArray = construct_array(fkdatums, foreignNKeys,
INT2OID, 2, true, 's');
for (i = 0; i < foreignNKeys; i++)
fkdatums[i] = ObjectIdGetDatum(pfEqOp[i]);
conpfeqopArray = construct_array(fkdatums, foreignNKeys,
OIDOID, sizeof(Oid), true, 'i');
for (i = 0; i < foreignNKeys; i++)
fkdatums[i] = ObjectIdGetDatum(ppEqOp[i]);
conppeqopArray = construct_array(fkdatums, foreignNKeys,
OIDOID, sizeof(Oid), true, 'i');
for (i = 0; i < foreignNKeys; i++)
fkdatums[i] = ObjectIdGetDatum(ffEqOp[i]);
conffeqopArray = construct_array(fkdatums, foreignNKeys,
OIDOID, sizeof(Oid), true, 'i');
}
else
{
confkeyArray = NULL;
conpfeqopArray = NULL;
conppeqopArray = NULL;
conffeqopArray = NULL;
}
//.........这里部分代码省略.........
示例8: nodeRead
//.........这里部分代码省略.........
if (token[0] == ')')
break;
val = (int) strtol(token, &endptr, 10);
if (endptr != token + tok_len)
elog(ERROR, "unrecognized integer: \"%.*s\"",
tok_len, token);
l = lappend_int(l, val);
}
}
else if (tok_len == 1 && token[0] == 'o')
{
/* List of OIDs */
for (;;)
{
Oid val;
char *endptr;
token = pg_strtok(&tok_len);
if (token == NULL)
elog(ERROR, "unterminated List structure");
if (token[0] == ')')
break;
val = (Oid) strtoul(token, &endptr, 10);
if (endptr != token + tok_len)
elog(ERROR, "unrecognized OID: \"%.*s\"",
tok_len, token);
l = lappend_oid(l, val);
}
}
else
{
/* List of other node types */
for (;;)
{
/* We have already scanned next token... */
if (token[0] == ')')
break;
l = lappend(l, nodeRead(token, tok_len));
token = pg_strtok(&tok_len);
if (token == NULL)
elog(ERROR, "unterminated List structure");
}
}
result = (Node *) l;
break;
}
case RIGHT_PAREN:
elog(ERROR, "unexpected right parenthesis");
result = NULL; /* keep compiler happy */
break;
case OTHER_TOKEN:
if (tok_len == 0)
{
/* must be "<>" --- represents a null pointer */
result = NULL;
}
else
{
elog(ERROR, "unrecognized token: \"%.*s\"", tok_len, token);
result = NULL; /* keep compiler happy */
}
break;
case T_Integer:
/*
* we know that the token terminates on a char atol will stop at
*/
result = (Node *) makeInteger(atol(token));
break;
case T_Float:
{
char *fval = (char *) palloc(tok_len + 1);
memcpy(fval, token, tok_len);
fval[tok_len] = '\0';
result = (Node *) makeFloat(fval);
}
break;
case T_String:
/* need to remove leading and trailing quotes, and backslashes */
result = (Node *) makeString(debackslash(token + 1, tok_len - 2));
break;
case T_BitString:
{
char *val = palloc(tok_len);
/* skip leading 'b' */
memcpy(val, token + 1, tok_len - 1);
val[tok_len - 1] = '\0';
result = (Node *) makeBitString(val);
break;
}
default:
elog(ERROR, "unrecognized node type: %d", (int) type);
result = NULL; /* keep compiler happy */
break;
}
return (void *) result;
}
示例9: lowerstr_with_len
/*
* lowerstr_with_len --- fold string to lower case
*
* Input string need not be null-terminated.
*
* Returned string is palloc'd
*/
char *
lowerstr_with_len(const char *str, int len)
{
char *out;
if (len == 0)
return pstrdup("");
#ifdef USE_WIDE_UPPER_LOWER
/*
* Use wide char code only when max encoding length > 1 and ctype != C.
* Some operating systems fail with multi-byte encodings and a C locale.
* Also, for a C locale there is no need to process as multibyte. From
* backend/utils/adt/oracle_compat.c Teodor
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
wchar_t *wstr,
*wptr;
int wlen;
/*
* alloc number of wchar_t for worst case, len contains number of
* bytes >= number of characters and alloc 1 wchar_t for 0, because
* wchar2char wants zero-terminated string
*/
wptr = wstr = (wchar_t *) palloc(sizeof(wchar_t) * (len + 1));
wlen = char2wchar(wstr, len + 1, str, len);
Assert(wlen <= len);
while (*wptr)
{
*wptr = towlower((wint_t) *wptr);
wptr++;
}
/*
* Alloc result string for worst case + '\0'
*/
len = pg_database_encoding_max_length() * wlen + 1;
out = (char *) palloc(len);
wlen = wchar2char(out, wstr, len);
pfree(wstr);
if (wlen < 0)
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("conversion from wchar_t to server encoding failed: %m")));
Assert(wlen < len);
}
else
#endif /* USE_WIDE_UPPER_LOWER */
{
const char *ptr = str;
char *outptr;
outptr = out = (char *) palloc(sizeof(char) * (len + 1));
while ((ptr - str) < len && *ptr)
{
*outptr++ = tolower(TOUCHAR(ptr));
ptr++;
}
*outptr = '\0';
}
return out;
}
示例10: get_crosstab_tuplestore
/*
* create and populate the crosstab tuplestore using the provided source query
*/
static Tuplestorestate *
get_crosstab_tuplestore(char *sql,
HTAB *crosstab_hash,
TupleDesc tupdesc,
MemoryContext per_query_ctx,
bool randomAccess)
{
Tuplestorestate *tupstore;
int num_categories = hash_get_num_entries(crosstab_hash);
AttInMetadata *attinmeta = TupleDescGetAttInMetadata(tupdesc);
char **values;
HeapTuple tuple;
int ret;
int proc;
/* initialize our tuplestore (while still in query context!) */
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
/* internal error */
elog(ERROR, "get_crosstab_tuplestore: SPI_connect returned %d", ret);
/* Now retrieve the crosstab source rows */
ret = SPI_execute(sql, true, 0);
proc = SPI_processed;
/* Check for qualifying tuples */
if ((ret == SPI_OK_SELECT) && (proc > 0))
{
SPITupleTable *spi_tuptable = SPI_tuptable;
TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
int ncols = spi_tupdesc->natts;
char *rowid;
char *lastrowid = NULL;
bool firstpass = true;
int i,
j;
int result_ncols;
if (num_categories == 0)
{
/* no qualifying category tuples */
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("provided \"categories\" SQL must " \
"return 1 column of at least one row")));
}
/*
* The provided SQL query must always return at least three columns:
*
* 1. rowname the label for each row - column 1 in the final result
* 2. category the label for each value-column in the final result 3.
* value the values used to populate the value-columns
*
* If there are more than three columns, the last two are taken as
* "category" and "values". The first column is taken as "rowname".
* Additional columns (2 thru N-2) are assumed the same for the same
* "rowname", and are copied into the result tuple from the first time
* we encounter a particular rowname.
*/
if (ncols < 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid source data SQL statement"),
errdetail("The provided SQL must return 3 " \
" columns; rowid, category, and values.")));
result_ncols = (ncols - 2) + num_categories;
/* Recheck to make sure we tuple descriptor still looks reasonable */
if (tupdesc->natts != result_ncols)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Query-specified return " \
"tuple has %d columns but crosstab " \
"returns %d.", tupdesc->natts, result_ncols)));
/* allocate space */
values = (char **) palloc(result_ncols * sizeof(char *));
/* and make sure it's clear */
memset(values, '\0', result_ncols * sizeof(char *));
for (i = 0; i < proc; i++)
{
HeapTuple spi_tuple;
crosstab_cat_desc *catdesc;
char *catname;
/* get the next sql result tuple */
spi_tuple = spi_tuptable->vals[i];
/* get the rowid from the current sql result tuple */
rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
//.........这里部分代码省略.........
示例11: gtsvector_compress
Datum
gtsvector_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if (entry->leafkey)
{ /* tsvector */
SignTSVector *res;
TSVector val = DatumGetTSVector(entry->key);
int32 len;
int32 *arr;
WordEntry *ptr = ARRPTR(val);
char *words = STRPTR(val);
len = CALCGTSIZE(ARRKEY, val->size);
res = (SignTSVector *) palloc(len);
SET_VARSIZE(res, len);
res->flag = ARRKEY;
arr = GETARR(res);
len = val->size;
while (len--)
{
pg_crc32 c;
INIT_LEGACY_CRC32(c);
COMP_LEGACY_CRC32(c, words + ptr->pos, ptr->len);
FIN_LEGACY_CRC32(c);
*arr = *(int32 *) &c;
arr++;
ptr++;
}
len = uniqueint(GETARR(res), val->size);
if (len != val->size)
{
/*
* there is a collision of hash-function; len is always less than
* val->size
*/
len = CALCGTSIZE(ARRKEY, len);
res = (SignTSVector *) repalloc((void *) res, len);
SET_VARSIZE(res, len);
}
/* make signature, if array is too long */
if (VARSIZE(res) > TOAST_INDEX_TARGET)
{
SignTSVector *ressign;
len = CALCGTSIZE(SIGNKEY, 0);
ressign = (SignTSVector *) palloc(len);
SET_VARSIZE(ressign, len);
ressign->flag = SIGNKEY;
makesign(GETSIGN(ressign), res);
res = ressign;
}
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
}
else if (ISSIGNKEY(DatumGetPointer(entry->key)) &&
!ISALLTRUE(DatumGetPointer(entry->key)))
{
int32 i,
len;
SignTSVector *res;
BITVECP sign = GETSIGN(DatumGetPointer(entry->key));
LOOPBYTE
{
if ((sign[i] & 0xff) != 0xff)
PG_RETURN_POINTER(retval);
}
len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0);
res = (SignTSVector *) palloc(len);
SET_VARSIZE(res, len);
res->flag = SIGNKEY | ALLISTRUE;
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
}
示例12: load_categories_hash
/*
* load up the categories hash table
*/
static HTAB *
load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
{
HTAB *crosstab_hash;
HASHCTL ctl;
int ret;
int proc;
MemoryContext SPIcontext;
/* initialize the category hash table */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = MAX_CATNAME_LEN;
ctl.entrysize = sizeof(crosstab_HashEnt);
ctl.hcxt = per_query_ctx;
/*
* use INIT_CATS, defined above as a guess of how many hash table entries
* to create, initially
*/
crosstab_hash = hash_create("crosstab hash",
INIT_CATS,
&ctl,
HASH_ELEM | HASH_CONTEXT);
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
/* internal error */
elog(ERROR, "load_categories_hash: SPI_connect returned %d", ret);
/* Retrieve the category name rows */
ret = SPI_execute(cats_sql, true, 0);
proc = SPI_processed;
/* Check for qualifying tuples */
if ((ret == SPI_OK_SELECT) && (proc > 0))
{
SPITupleTable *spi_tuptable = SPI_tuptable;
TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
int i;
/*
* The provided categories SQL query must always return one column:
* category - the label or identifier for each column
*/
if (spi_tupdesc->natts != 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("provided \"categories\" SQL must " \
"return 1 column of at least one row")));
for (i = 0; i < proc; i++)
{
crosstab_cat_desc *catdesc;
char *catname;
HeapTuple spi_tuple;
/* get the next sql result tuple */
spi_tuple = spi_tuptable->vals[i];
/* get the category from the current sql result tuple */
catname = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
SPIcontext = MemoryContextSwitchTo(per_query_ctx);
catdesc = (crosstab_cat_desc *) palloc(sizeof(crosstab_cat_desc));
catdesc->catname = catname;
catdesc->attidx = i;
/* Add the proc description block to the hashtable */
crosstab_HashTableInsert(crosstab_hash, catdesc);
MemoryContextSwitchTo(SPIcontext);
}
}
if (SPI_finish() != SPI_OK_FINISH)
/* internal error */
elog(ERROR, "load_categories_hash: SPI_finish() failed");
return crosstab_hash;
}
示例13: normal_rand
Datum
normal_rand(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
int call_cntr;
int max_calls;
normal_rand_fctx *fctx;
float8 mean;
float8 stddev;
float8 carry_val;
bool use_carry;
MemoryContext oldcontext;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
* switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* total number of tuples to be returned */
funcctx->max_calls = PG_GETARG_UINT32(0);
/* allocate memory for user context */
fctx = (normal_rand_fctx *) palloc(sizeof(normal_rand_fctx));
/*
* Use fctx to keep track of upper and lower bounds from call to call.
* It will also be used to carry over the spare value we get from the
* Box-Muller algorithm so that we only actually calculate a new value
* every other call.
*/
fctx->mean = PG_GETARG_FLOAT8(1);
fctx->stddev = PG_GETARG_FLOAT8(2);
fctx->carry_val = 0;
fctx->use_carry = false;
funcctx->user_fctx = fctx;
MemoryContextSwitchTo(oldcontext);
}
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
call_cntr = funcctx->call_cntr;
max_calls = funcctx->max_calls;
fctx = funcctx->user_fctx;
mean = fctx->mean;
stddev = fctx->stddev;
carry_val = fctx->carry_val;
use_carry = fctx->use_carry;
if (call_cntr < max_calls) /* do when there is more left to send */
{
float8 result;
if (use_carry)
{
/*
* reset use_carry and use second value obtained on last pass
*/
fctx->use_carry = false;
result = carry_val;
}
else
{
float8 normval_1;
float8 normval_2;
/* Get the next two normal values */
get_normal_pair(&normval_1, &normval_2);
/* use the first */
result = mean + (stddev * normval_1);
/* and save the second */
fctx->carry_val = mean + (stddev * normval_2);
fctx->use_carry = true;
}
/* send the result */
SRF_RETURN_NEXT(funcctx, Float8GetDatum(result));
}
else
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
示例14: build_tuplestore_recursively
static Tuplestorestate *
build_tuplestore_recursively(char *key_fld,
char *parent_key_fld,
char *relname,
char *orderby_fld,
char *branch_delim,
char *start_with,
char *branch,
int level,
int *serial,
int max_depth,
bool show_branch,
bool show_serial,
MemoryContext per_query_ctx,
AttInMetadata *attinmeta,
Tuplestorestate *tupstore)
{
TupleDesc tupdesc = attinmeta->tupdesc;
int ret;
int proc;
int serial_column;
StringInfoData sql;
char **values;
char *current_key;
char *current_key_parent;
char current_level[INT32_STRLEN];
char serial_str[INT32_STRLEN];
char *current_branch;
HeapTuple tuple;
if (max_depth > 0 && level > max_depth)
return tupstore;
initStringInfo(&sql);
/* Build initial sql statement */
if (!show_serial)
{
appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s",
key_fld,
parent_key_fld,
relname,
parent_key_fld,
quote_literal_cstr(start_with),
key_fld, key_fld, parent_key_fld);
serial_column = 0;
}
else
{
appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s ORDER BY %s",
key_fld,
parent_key_fld,
relname,
parent_key_fld,
quote_literal_cstr(start_with),
key_fld, key_fld, parent_key_fld,
orderby_fld);
serial_column = 1;
}
if (show_branch)
values = (char **) palloc((CONNECTBY_NCOLS + serial_column) * sizeof(char *));
else
values = (char **) palloc((CONNECTBY_NCOLS_NOBRANCH + serial_column) * sizeof(char *));
/* First time through, do a little setup */
if (level == 0)
{
/* root value is the one we initially start with */
values[0] = start_with;
/* root value has no parent */
values[1] = NULL;
/* root level is 0 */
sprintf(current_level, "%d", level);
values[2] = current_level;
/* root branch is just starting root value */
if (show_branch)
values[3] = start_with;
/* root starts the serial with 1 */
if (show_serial)
{
sprintf(serial_str, "%d", (*serial)++);
if (show_branch)
values[4] = serial_str;
else
values[3] = serial_str;
}
/* construct the tuple */
tuple = BuildTupleFromCStrings(attinmeta, values);
/* now store it */
tuplestore_puttuple(tupstore, tuple);
/* increment level */
level++;
//.........这里部分代码省略.........
示例15: g_int_picksplit
/*
** The GiST PickSplit method for _intments
** We use Guttman's poly time split algorithm
*/
Datum
g_int_picksplit(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber i,
j;
ArrayType *datum_alpha,
*datum_beta;
ArrayType *datum_l,
*datum_r;
ArrayType *union_d,
*union_dl,
*union_dr;
ArrayType *inter_d;
bool firsttime;
float size_alpha,
size_beta,
size_union,
size_inter;
float size_waste,
waste;
float size_l,
size_r;
int nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
OffsetNumber maxoff;
SPLITCOST *costvector;
#ifdef GIST_DEBUG
elog(DEBUG3, "--------picksplit %d", (VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY));
#endif
maxoff = ((VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY)) - 2;
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
v->spl_left = (OffsetNumber *) palloc(nbytes);
v->spl_right = (OffsetNumber *) palloc(nbytes);
firsttime = true;
waste = 0.0;
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i))
{
datum_alpha = (ArrayType *) DatumGetPointer(((GISTENTRY *) VARDATA(entryvec))[i].key);
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j))
{
datum_beta = (ArrayType *) DatumGetPointer(((GISTENTRY *) VARDATA(entryvec))[j].key);
/* compute the wasted space by unioning these guys */
/* size_waste = size_union - size_inter; */
union_d = inner_int_union(datum_alpha, datum_beta);
rt__int_size(union_d, &size_union);
inter_d = inner_int_inter(datum_alpha, datum_beta);
rt__int_size(inter_d, &size_inter);
size_waste = size_union - size_inter;
pfree(union_d);
if (inter_d != (ArrayType *) NULL)
pfree(inter_d);
/*
* are these a more promising split that what we've already
* seen?
*/
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = i;
seed_2 = j;
firsttime = false;
}
}
}
left = v->spl_left;
v->spl_nleft = 0;
right = v->spl_right;
v->spl_nright = 0;
if (seed_1 == 0 || seed_2 == 0)
{
seed_1 = 1;
seed_2 = 2;
}
datum_alpha = (ArrayType *) DatumGetPointer(((GISTENTRY *) VARDATA(entryvec))[seed_1].key);
datum_l = copy_intArrayType(datum_alpha);
rt__int_size(datum_l, &size_l);
datum_beta = (ArrayType *) DatumGetPointer(((GISTENTRY *) VARDATA(entryvec))[seed_2].key);
datum_r = copy_intArrayType(datum_beta);
rt__int_size(datum_r, &size_r);
maxoff = OffsetNumberNext(maxoff);
//.........这里部分代码省略.........