本文整理汇总了C++中ReleaseBuffer函数的典型用法代码示例。如果您正苦于以下问题:C++ ReleaseBuffer函数的具体用法?C++ ReleaseBuffer怎么用?C++ ReleaseBuffer使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ReleaseBuffer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: XLogReadBufferExtended
/*
* XLogReadBufferExtended
* Read a page during XLOG replay
*
* This is functionally comparable to ReadBufferExtended. There's some
* differences in the behavior wrt. the "mode" argument:
*
* In RBM_NORMAL mode, if the page doesn't exist, or contains all-zeroes, we
* return InvalidBuffer. In this case the caller should silently skip the
* update on this page. (In this situation, we expect that the page was later
* dropped or truncated. If we don't see evidence of that later in the WAL
* sequence, we'll complain at the end of WAL replay.)
*
* In RBM_ZERO and RBM_ZERO_ON_ERROR modes, if the page doesn't exist, the
* relation is extended with all-zeroes pages up to the given block number.
*
* In RBM_NORMAL_NO_LOG mode, we return InvalidBuffer if the page doesn't
* exist, and we don't check for all-zeroes. Thus, no log entry is made
* to imply that the page should be dropped or truncated later.
*/
Buffer
XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
BlockNumber blkno, ReadBufferMode mode)
{
BlockNumber lastblock;
Buffer buffer;
SMgrRelation smgr;
Assert(blkno != P_NEW);
/* Open the relation at smgr level */
smgr = smgropen(rnode, InvalidBackendId);
/*
* Create the target file if it doesn't already exist. This lets us cope
* if the replay sequence contains writes to a relation that is later
* deleted. (The original coding of this routine would instead suppress
* the writes, but that seems like it risks losing valuable data if the
* filesystem loses an inode during a crash. Better to write the data
* until we are actually told to delete the file.)
*/
smgrcreate(smgr, forknum, true);
lastblock = smgrnblocks(smgr, forknum);
if (blkno < lastblock)
{
/* page exists in file */
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
mode, NULL);
}
else
{
/* hm, page doesn't exist in file */
if (mode == RBM_NORMAL)
{
log_invalid_page(rnode, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
return InvalidBuffer;
/* OK to extend the file */
/* we do this in recovery only - no rel-extension lock needed */
Assert(InRecovery);
buffer = InvalidBuffer;
do
{
if (buffer != InvalidBuffer)
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, forknum,
P_NEW, mode, NULL);
}
while (BufferGetBlockNumber(buffer) < blkno);
/* Handle the corner case that P_NEW returns non-consecutive pages */
if (BufferGetBlockNumber(buffer) != blkno)
{
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
mode, NULL);
}
}
if (mode == RBM_NORMAL)
{
/* check that page has been initialized */
Page page = (Page) BufferGetPage(buffer);
/*
* We assume that PageIsNew is safe without a lock. During recovery,
* there should be no other backends that could modify the buffer at
* the same time.
*/
if (PageIsNew(page))
{
ReleaseBuffer(buffer);
log_invalid_page(rnode, forknum, blkno, true);
return InvalidBuffer;
}
}
//.........这里部分代码省略.........
示例2: bt_metap
/* ------------------------------------------------
* bt_metap()
*
* Get a btree meta-page information
*
* Usage: SELECT * FROM bt_metap('t1_pkey')
* ------------------------------------------------
*/
Datum
bt_metap(PG_FUNCTION_ARGS)
{
text *relname = PG_GETARG_TEXT_P(0);
Buffer buffer;
Relation rel;
RangeVar *relrv;
Datum result;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use pgstattuple functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
if (!IS_INDEX(rel) || !IS_BTREE(rel))
elog(ERROR, "bt_metap() can be used only on b-tree index.");
/*
* Reject attempts to read non-local temporary relations; we would
* be likely to get wrong data since we have no visibility into the
* owning session's local buffers.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
buffer = ReadBuffer(rel, 0);
{
BTMetaPageData *metad;
TupleDesc tupleDesc;
int j;
char *values[BTMETAP_NCOLUMNS];
HeapTuple tuple;
Page page = BufferGetPage(buffer);
metad = BTPageGetMeta(page);
tupleDesc = RelationNameGetTupleDesc(BTMETAP_TYPE);
j = 0;
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_magic);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_version);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_root);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_level);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_fastroot);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", metad->btm_fastlevel);
tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc),
values);
result = TupleGetDatum(TupleDescGetSlot(tupleDesc), tuple);
}
ReleaseBuffer(buffer);
relation_close(rel, AccessShareLock);
PG_RETURN_DATUM(result);
}
示例3: SpGistGetBuffer
/*
* Get a buffer of the type and parity specified by flags, having at least
* as much free space as indicated by needSpace. We use the lastUsedPages
* cache to assign the same buffer previously requested when possible.
* The returned buffer is already pinned and exclusive-locked.
*
* *isNew is set true if the page was initialized here, false if it was
* already valid.
*/
Buffer
SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
{
SpGistCache *cache = spgGetCache(index);
SpGistLastUsedPage *lup;
/* Bail out if even an empty page wouldn't meet the demand */
if (needSpace > SPGIST_PAGE_CAPACITY)
elog(ERROR, "desired SPGiST tuple size is too big");
/*
* If possible, increase the space request to include relation's
* fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
*/
needSpace += RelationGetTargetPageFreeSpace(index,
SPGIST_DEFAULT_FILLFACTOR);
needSpace = Min(needSpace, SPGIST_PAGE_CAPACITY);
/* Get the cache entry for this flags setting */
lup = GET_LUP(cache, flags);
/* If we have nothing cached, just turn it over to allocNewBuffer */
if (lup->blkno == InvalidBlockNumber)
{
*isNew = true;
return allocNewBuffer(index, flags);
}
/* fixed pages should never be in cache */
Assert(!SpGistBlockIsFixed(lup->blkno));
/* If cached freeSpace isn't enough, don't bother looking at the page */
if (lup->freeSpace >= needSpace)
{
Buffer buffer;
Page page;
buffer = ReadBuffer(index, lup->blkno);
if (!ConditionalLockBuffer(buffer))
{
/*
* buffer is locked by another process, so return a new buffer
*/
ReleaseBuffer(buffer);
*isNew = true;
return allocNewBuffer(index, flags);
}
page = BufferGetPage(buffer);
if (PageIsNew(page) || SpGistPageIsDeleted(page) || PageIsEmpty(page))
{
/* OK to initialize the page */
uint16 pageflags = 0;
if (GBUF_REQ_LEAF(flags))
pageflags |= SPGIST_LEAF;
if (GBUF_REQ_NULLS(flags))
pageflags |= SPGIST_NULLS;
SpGistInitBuffer(buffer, pageflags);
lup->freeSpace = PageGetExactFreeSpace(page) - needSpace;
*isNew = true;
return buffer;
}
/*
* Check that page is of right type and has enough space. We must
* recheck this since our cache isn't necessarily up to date.
*/
if ((GBUF_REQ_LEAF(flags) ? SpGistPageIsLeaf(page) : !SpGistPageIsLeaf(page)) &&
(GBUF_REQ_NULLS(flags) ? SpGistPageStoresNulls(page) : !SpGistPageStoresNulls(page)))
{
int freeSpace = PageGetExactFreeSpace(page);
if (freeSpace >= needSpace)
{
/* Success, update freespace info and return the buffer */
lup->freeSpace = freeSpace - needSpace;
*isNew = false;
return buffer;
}
}
/*
* fallback to allocation of new buffer
*/
UnlockReleaseBuffer(buffer);
//.........这里部分代码省略.........
示例4: PersistentStore_DiagnoseDumpTable
static void PersistentStore_DiagnoseDumpTable(
PersistentStoreData *storeData,
PersistentStoreSharedData *storeSharedData)
{
if (disable_persistent_diagnostic_dump)
{
return;
}
MIRROREDLOCK_BUFMGR_DECLARE;
PersistentStoreScan storeScan;
ItemPointerData persistentTid;
int64 persistentSerialNum;
Datum *values;
BlockNumber lastDisplayedBlockNum;
bool displayedOne;
BlockNumber currentBlockNum;
elog(LOG,
"Diagnostic dump of persistent table ('%s'): maximum in-use serial number " INT64_FORMAT ", maximum free order number " INT64_FORMAT ", free TID %s, maximum known TID %s",
storeData->tableName,
storeSharedData->maxInUseSerialNum,
storeSharedData->maxFreeOrderNum,
ItemPointerToString(&storeSharedData->freeTid),
ItemPointerToString2(&storeSharedData->maxTid));
values = (Datum*)palloc(storeData->numAttributes * sizeof(Datum));
PersistentStore_BeginScan(
storeData,
storeSharedData,
&storeScan);
lastDisplayedBlockNum = 0;
displayedOne = false;
while (PersistentStore_GetNext(
&storeScan,
values,
&persistentTid,
&persistentSerialNum))
{
/*
* Use the BlockIdGetBlockNumber routine because ItemPointerGetBlockNumber
* asserts for valid TID.
*/
currentBlockNum = BlockIdGetBlockNumber(&persistentTid.ip_blkid);
if (!displayedOne || currentBlockNum != lastDisplayedBlockNum)
{
Buffer buffer;
PageHeader page;
XLogRecPtr lsn;
/*
* Fetch the block and display the LSN.
*/
// -------- MirroredLock ----------
MIRROREDLOCK_BUFMGR_LOCK;
buffer = ReadBuffer(
storeScan.persistentRel,
currentBlockNum);
page = (PageHeader) BufferGetPage(buffer);
lsn = PageGetLSN(page);
ReleaseBuffer(buffer);
MIRROREDLOCK_BUFMGR_UNLOCK;
// -------- MirroredLock ----------
elog(LOG, "Diagnostic LSN %s of page %u",
XLogLocationToString(&lsn),
currentBlockNum);
lastDisplayedBlockNum = currentBlockNum;
displayedOne = true;
}
/*
* Display the persistent tuple.
*/
(*storeData->printTupleCallback)(
LOG,
"DIAGNOSE",
&persistentTid,
values);
}
PersistentStore_EndScan(&storeScan);
pfree(values);
}
示例5: bt_page_stats
/* -----------------------------------------------
* bt_page()
*
* Usage: SELECT * FROM bt_page('t1_pkey', 0);
* -----------------------------------------------
*/
Datum
bt_page_stats(PG_FUNCTION_ARGS)
{
text *relname = PG_GETARG_TEXT_P(0);
uint32 blkno = PG_GETARG_UINT32(1);
Buffer buffer;
Relation rel;
RangeVar *relrv;
Datum result;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use pgstattuple functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
if (!IS_INDEX(rel) || !IS_BTREE(rel))
elog(ERROR, "bt_page_stats() can be used only on b-tree index.");
/*
* Reject attempts to read non-local temporary relations; we would
* be likely to get wrong data since we have no visibility into the
* owning session's local buffers.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
if (blkno == 0)
elog(ERROR, "Block 0 is a meta page.");
CHECK_RELATION_BLOCK_RANGE(rel, blkno);
buffer = ReadBuffer(rel, blkno);
{
HeapTuple tuple;
TupleDesc tupleDesc;
int j;
char *values[BTPAGESTATS_NCOLUMNS];
BTPageStat stat;
GetBTPageStatistics(blkno, buffer, &stat);
tupleDesc = RelationNameGetTupleDesc(BTPAGESTATS_TYPE);
j = 0;
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.blkno);
values[j] = palloc(32);
snprintf(values[j++], 32, "%c", stat.type);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.live_items);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.dead_items);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.avg_item_size);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.page_size);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.free_size);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.btpo_prev);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.btpo_next);
values[j] = palloc(32);
if (stat.type == 'd')
snprintf(values[j++], 32, "%d", stat.btpo.xact);
else
snprintf(values[j++], 32, "%d", stat.btpo.level);
values[j] = palloc(32);
snprintf(values[j++], 32, "%d", stat.btpo_flags);
tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc),
values);
result = TupleGetDatum(TupleDescGetSlot(tupleDesc), tuple);
}
ReleaseBuffer(buffer);
relation_close(rel, AccessShareLock);
PG_RETURN_DATUM(result);
}
示例6: RelationGetBufferForTuple
//.........这里部分代码省略.........
* done a bit of extra work for no gain, but there's no real harm
* done.
*/
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer,
targetBlock, otherBlock, vmbuffer,
vmbuffer_other);
else
GetVisibilityMapPins(relation, otherBuffer, buffer,
otherBlock, targetBlock, vmbuffer_other,
vmbuffer);
/*
* Now we can check to see if there's enough free space here. If so,
* we're done.
*/
page = BufferGetPage(buffer);
pageFreeSpace = PageGetHeapFreeSpace(page);
if (len + saveFreeSpace <= pageFreeSpace)
{
/* use this page as future insert target, too */
RelationSetTargetBlock(relation, targetBlock);
return buffer;
}
/*
* Not enough space, so we must give up our page locks and pin (if
* any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (otherBuffer == InvalidBuffer)
ReleaseBuffer(buffer);
else if (otherBlock != targetBlock)
{
LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
/* Without FSM, always fall out of the loop and extend */
if (!use_fsm)
break;
/*
* Update FSM as to condition of this page, and ask for another page
* to try.
*/
targetBlock = RecordAndGetPageWithFreeSpace(relation,
targetBlock,
pageFreeSpace,
len + saveFreeSpace);
}
/*
* Have to extend the relation.
*
* We have to use a lock to ensure no one else is extending the rel at the
* same time, else we will both try to initialize the same new page. We
* can skip locking for new or temp relations, however, since no one else
* could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(relation);
if (needLock)
LockRelationForExtension(relation, ExclusiveLock);
示例7: PersistentStore_ReplaceTuple
void PersistentStore_ReplaceTuple(
PersistentStoreData *storeData,
PersistentStoreSharedData *storeSharedData,
ItemPointer persistentTid,
/* TID of the stored tuple. */
HeapTuple tuple,
Datum *newValues,
bool *replaces,
bool flushToXLog)
/* When true, the XLOG record for this change will be flushed to disk. */
{
Relation persistentRel;
bool *nulls;
HeapTuple replacementTuple = NULL;
XLogRecPtr xlogUpdateEndLoc;
#ifdef USE_ASSERT_CHECKING
if (storeSharedData == NULL ||
!PersistentStoreSharedData_EyecatcherIsValid(storeSharedData))
elog(ERROR, "Persistent store shared-memory not valid");
#endif
if (Debug_persistent_store_print)
elog(PersistentStore_DebugPrintLevel(),
"PersistentStore_ReplaceTuple: Going to replace set of columns in tuple at TID %s ('%s', shared data %p)",
ItemPointerToString(persistentTid),
storeData->tableName,
storeSharedData);
persistentRel = (*storeData->openRel)();
/*
* In order to keep the tuples the exact same size to enable direct reuse of
* free tuples, we do not use NULLs.
*/
nulls = (bool*)palloc0(storeData->numAttributes * sizeof(bool));
/*
* Modify the tuple.
*/
replacementTuple = heap_modify_tuple(tuple, persistentRel->rd_att,
newValues, nulls, replaces);
replacementTuple->t_self = *persistentTid;
frozen_heap_inplace_update(persistentRel, replacementTuple);
/*
* Return the XLOG location of the UPDATE tuple's XLOG record.
*/
xlogUpdateEndLoc = XLogLastInsertEndLoc();
heap_freetuple(replacementTuple);
pfree(nulls);
if (Debug_persistent_store_print)
{
Datum *readValues;
bool *readNulls;
HeapTupleData readTuple;
Buffer buffer;
HeapTuple readTupleCopy;
elog(PersistentStore_DebugPrintLevel(),
"PersistentStore_ReplaceTuple: Replaced set of columns in tuple at TID %s ('%s')",
ItemPointerToString(persistentTid),
storeData->tableName);
readValues = (Datum*)palloc(storeData->numAttributes * sizeof(Datum));
readNulls = (bool*)palloc(storeData->numAttributes * sizeof(bool));
readTuple.t_self = *persistentTid;
if (!heap_fetch(persistentRel, SnapshotAny,
&readTuple, &buffer, false, NULL))
{
elog(ERROR, "Failed to fetch persistent tuple at %s ('%s')",
ItemPointerToString(&readTuple.t_self),
storeData->tableName);
}
readTupleCopy = heaptuple_copy_to(&readTuple, NULL, NULL);
ReleaseBuffer(buffer);
heap_deform_tuple(readTupleCopy, persistentRel->rd_att, readValues, readNulls);
(*storeData->printTupleCallback)(
PersistentStore_DebugPrintLevel(),
"STORE REPLACED TUPLE",
persistentTid,
readValues);
heap_freetuple(readTupleCopy);
pfree(readValues);
pfree(readNulls);
}
(*storeData->closeRel)(persistentRel);
//.........这里部分代码省略.........
示例8: _hash_dropbuf
/*
* _hash_dropbuf() -- release an unlocked buffer.
*
* This is used to unpin a buffer on which we hold no lock.
*/
void
_hash_dropbuf(Relation rel, Buffer buf)
{
ReleaseBuffer(buf);
}
示例9: GetBuffer
STDMETHODIMP CDecWMV9::ProcessOutput()
{
HRESULT hr = S_OK;
DWORD dwStatus = 0;
BYTE *pBuffer = GetBuffer(m_pRawBufferSize);
CMediaBuffer *pOutBuffer = new CMediaBuffer(pBuffer, m_pRawBufferSize, true);
pOutBuffer->SetLength(0);
DMO_OUTPUT_DATA_BUFFER OutputBufferStructs[1];
memset(&OutputBufferStructs[0], 0, sizeof(DMO_OUTPUT_DATA_BUFFER));
OutputBufferStructs[0].pBuffer = pOutBuffer;
hr = m_pDMO->ProcessOutput(0, 1, OutputBufferStructs, &dwStatus);
if (FAILED(hr)) {
ReleaseBuffer(pBuffer);
DbgLog((LOG_TRACE, 10, L"-> ProcessOutput failed with hr: %x", hr));
return S_FALSE;
}
if (hr == S_FALSE) {
ReleaseBuffer(pBuffer);
return S_FALSE;
}
LAVFrame *pFrame = nullptr;
AllocateFrame(&pFrame);
BITMAPINFOHEADER *pBMI = nullptr;
videoFormatTypeHandler(mtOut, &pBMI);
pFrame->width = pBMI->biWidth;
pFrame->height = pBMI->biHeight;
pFrame->format = m_OutPixFmt;
pFrame->key_frame = (OutputBufferStructs[0].dwStatus & DMO_OUTPUT_DATA_BUFFERF_SYNCPOINT);
AVRational display_aspect_ratio;
int64_t num = (int64_t)m_StreamAR.num * pBMI->biWidth;
int64_t den = (int64_t)m_StreamAR.den * pBMI->biHeight;
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, INT_MAX);
BYTE contentType = 0;
DWORD dwPropSize = 1;
pOutBuffer->GetProperty(WM_SampleExtensionGUID_ContentType, &contentType, &dwPropSize);
pFrame->interlaced = !!(contentType & WM_CT_INTERLACED);
pFrame->repeat = !!(contentType & WM_CT_REPEAT_FIRST_FIELD);
LAVDeintFieldOrder fo = m_pSettings->GetDeintFieldOrder();
pFrame->tff = (fo == DeintFieldOrder_Auto) ? !!(contentType & WM_CT_TOP_FIELD_FIRST) : (fo == DeintFieldOrder_TopFieldFirst);
if (pFrame->interlaced && !m_bInterlaced)
m_bInterlaced = TRUE;
pFrame->interlaced = (pFrame->interlaced || (m_bInterlaced && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);
if (m_bManualReorder) {
if (!m_timestampQueue.empty()) {
pFrame->rtStart = m_timestampQueue.front();
m_timestampQueue.pop();
if (OutputBufferStructs[0].dwStatus & DMO_OUTPUT_DATA_BUFFERF_TIMELENGTH) {
pFrame->rtStop = pFrame->rtStart + OutputBufferStructs[0].rtTimelength;
}
}
} else {
if (OutputBufferStructs[0].dwStatus & DMO_OUTPUT_DATA_BUFFERF_TIME) {
pFrame->rtStart = OutputBufferStructs[0].rtTimestamp;
if (OutputBufferStructs[0].dwStatus & DMO_OUTPUT_DATA_BUFFERF_TIMELENGTH) {
pFrame->rtStop = pFrame->rtStart + OutputBufferStructs[0].rtTimelength;
}
}
}
// Check alignment
// If not properly aligned, we need to make the data aligned.
int alignment = (m_OutPixFmt == LAVPixFmt_NV12) ? 16 : 32;
if ((pFrame->width % alignment) != 0) {
AllocLAVFrameBuffers(pFrame);
size_t ySize = pFrame->width * pFrame->height;
memcpy_plane(pFrame->data[0], pBuffer, pFrame->width, pFrame->stride[0], pFrame->height);
if (m_OutPixFmt == LAVPixFmt_NV12) {
memcpy_plane(pFrame->data[1], pBuffer+ySize, pFrame->width, pFrame->stride[1], pFrame->height / 2);
} else if (m_OutPixFmt == LAVPixFmt_YUV420) {
size_t uvSize = ySize / 4;
memcpy_plane(pFrame->data[2], pBuffer+ySize, pFrame->width / 2, pFrame->stride[2], pFrame->height / 2);
memcpy_plane(pFrame->data[1], pBuffer+ySize+uvSize, pFrame->width / 2, pFrame->stride[1], pFrame->height / 2);
}
ReleaseBuffer(pBuffer);
} else {
if (m_OutPixFmt == LAVPixFmt_NV12) {
pFrame->data[0] = pBuffer;
pFrame->data[1] = pBuffer + pFrame->width * pFrame->height;
pFrame->stride[0] = pFrame->stride[1] = pFrame->width;
} else if (m_OutPixFmt == LAVPixFmt_YUV420) {
pFrame->data[0] = pBuffer;
pFrame->data[2] = pBuffer + pFrame->width * pFrame->height;
pFrame->data[1] = pFrame->data[2] + (pFrame->width / 2) * (pFrame->height / 2);
pFrame->stride[0] = pFrame->width;
pFrame->stride[1] = pFrame->stride[2] = pFrame->width / 2;
}
pFrame->destruct = wmv9_buffer_destruct;
pFrame->priv_data = this;
}
//.........这里部分代码省略.........
示例10: RelationFindReplTupleByIndex
/*
* Search the relation 'rel' for tuple using the index.
*
* If a matching tuple is found, lock it with lockmode, fill the slot with its
* contents, and return true. Return false otherwise.
*/
bool
RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
LockTupleMode lockmode,
TupleTableSlot *searchslot,
TupleTableSlot *outslot)
{
HeapTuple scantuple;
ScanKeyData skey[INDEX_MAX_KEYS];
IndexScanDesc scan;
SnapshotData snap;
TransactionId xwait;
Relation idxrel;
bool found;
/* Open the index. */
idxrel = index_open(idxoid, RowExclusiveLock);
/* Start an index scan. */
InitDirtySnapshot(snap);
scan = index_beginscan(rel, idxrel, &snap,
IndexRelationGetNumberOfKeyAttributes(idxrel),
0);
/* Build scan key. */
build_replindex_scan_key(skey, rel, idxrel, searchslot);
retry:
found = false;
index_rescan(scan, skey, IndexRelationGetNumberOfKeyAttributes(idxrel), NULL, 0);
/* Try to find the tuple */
if ((scantuple = index_getnext(scan, ForwardScanDirection)) != NULL)
{
found = true;
ExecStoreTuple(scantuple, outslot, InvalidBuffer, false);
ExecMaterializeSlot(outslot);
xwait = TransactionIdIsValid(snap.xmin) ?
snap.xmin : snap.xmax;
/*
* If the tuple is locked, wait for locking transaction to finish and
* retry.
*/
if (TransactionIdIsValid(xwait))
{
XactLockTableWait(xwait, NULL, NULL, XLTW_None);
goto retry;
}
}
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
ItemPointerCopy(&outslot->tts_tuple->t_self, &locktup.t_self);
PushActiveSnapshot(GetLatestSnapshot());
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
PopActiveSnapshot();
switch (res)
{
case HeapTupleMayBeUpdated:
break;
case HeapTupleUpdated:
/* XXX: Improve handling here */
if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
else
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
case HeapTupleInvisible:
elog(ERROR, "attempted to lock invisible tuple");
default:
elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
break;
//.........这里部分代码省略.........
示例11: RelationFindReplTupleSeq
/*
* Search the relation 'rel' for tuple using the sequential scan.
*
* If a matching tuple is found, lock it with lockmode, fill the slot with its
* contents, and return true. Return false otherwise.
*
* Note that this stops on the first matching tuple.
*
* This can obviously be quite slow on tables that have more than few rows.
*/
bool
RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
TupleTableSlot *searchslot, TupleTableSlot *outslot)
{
HeapTuple scantuple;
HeapScanDesc scan;
SnapshotData snap;
TransactionId xwait;
bool found;
TupleDesc desc = RelationGetDescr(rel);
Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));
/* Start a heap scan. */
InitDirtySnapshot(snap);
scan = heap_beginscan(rel, &snap, 0, NULL);
retry:
found = false;
heap_rescan(scan, NULL);
/* Try to find the tuple */
while ((scantuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
if (!tuple_equals_slot(desc, scantuple, searchslot))
continue;
found = true;
ExecStoreTuple(scantuple, outslot, InvalidBuffer, false);
ExecMaterializeSlot(outslot);
xwait = TransactionIdIsValid(snap.xmin) ?
snap.xmin : snap.xmax;
/*
* If the tuple is locked, wait for locking transaction to finish and
* retry.
*/
if (TransactionIdIsValid(xwait))
{
XactLockTableWait(xwait, NULL, NULL, XLTW_None);
goto retry;
}
}
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
ItemPointerCopy(&outslot->tts_tuple->t_self, &locktup.t_self);
PushActiveSnapshot(GetLatestSnapshot());
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
PopActiveSnapshot();
switch (res)
{
case HeapTupleMayBeUpdated:
break;
case HeapTupleUpdated:
/* XXX: Improve handling here */
if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
else
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
case HeapTupleInvisible:
elog(ERROR, "attempted to lock invisible tuple");
default:
elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
break;
}
}
//.........这里部分代码省略.........
示例12: assert
void C_TCPServerConnection::run()
{
assert(m_pMsgConsolePrompter);
m_pMsgConsolePrompter->PromptMessageLineBy(_T("TCP_CONNECTION_0"));
socket().setReceiveTimeout(Poco::Timespan(90,0));
try
{
char* pszCmdBuffer = NULL;
while (true)
{
long nCmdLen = 0;
recvData(socket(), &pszCmdBuffer, nCmdLen);
if (nCmdLen == 0)
{
if (!m_szClientID.empty())
{
C_NetCmdLogOff logOff;
logOff.HandleRequest(socket(), NULL, m_DBOperate, m_szClientID.c_str());
}
m_pMsgConsolePrompter->PromptMessageLineBy(_T("TCP_CONNECTION_1"));
break;
}
if (nCmdLen > 0)
{
C_NetCommandDecomposer netCmdDecomposer;
if(!netCmdDecomposer.Decompose(pszCmdBuffer))
{
ReleaseBuffer(&pszCmdBuffer);
continue;
}
tstring szCmdName = netCmdDecomposer.GetCmdName().c_str();
C_NetCommand* pRequestCmd = m_NetCmder.CreateCommand(szCmdName.c_str(), netCmdDecomposer.GetCmdType().c_str());
if(pRequestCmd == NULL)
{
ReleaseBuffer(&pszCmdBuffer);
continue;
}
if(szCmdName ==_T("log on") || szCmdName ==_T("log off") || szCmdName == _T("fetch channels") || szCmdName == _T("fetch content"))
{
if(!m_bLogOn)
{
C_NetCmdLogOn *pLogOn = dynamic_cast<C_NetCmdLogOn*>(pRequestCmd);
if(pLogOn == NULL)
continue;
tstring szMac = netCmdDecomposer.GetCmdPara(_T("id"));
if( pLogOn->HandleRequest(socket(), pszCmdBuffer, m_DBOperate, szMac.c_str()) )
{
m_szClientID = szMac;
m_bLogOn = true;
ReleaseBuffer(&pszCmdBuffer);
pLogOn->Release();
C_AreaEvent::GetInstance()->PushEvent(socket(), szMac.c_str());
}
continue;
}
}
pRequestCmd->HandleRequest(socket(), pszCmdBuffer, m_DBOperate, m_szClientID.c_str());
pRequestCmd->Release();
ReleaseBuffer(&pszCmdBuffer);
}
}
}
catch (Poco::Exception& exc)
{
if (!m_szClientID.empty())
{
C_NetCmdLogOff logOff;
logOff.HandleRequest(socket(), NULL, m_DBOperate, m_szClientID.c_str());
}
m_pMsgConsolePrompter->PromptMessageLineBy(_T("TCP_CONNECTION_2"), exc.what());
return;
}
}
示例13: get_raw_page_internal
/*
* workhorse
*/
static bytea *
get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
{
bytea *raw_page;
RangeVar *relrv;
Relation rel;
char *raw_page_data;
Buffer buf;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use raw functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
/* Check that this relation has storage */
if (rel->rd_rel->relkind == RELKIND_VIEW)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from view \"%s\"",
RelationGetRelationName(rel))));
if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from composite type \"%s\"",
RelationGetRelationName(rel))));
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
if (blkno >= RelationGetNumberOfBlocks(rel))
elog(ERROR, "block number %u is out of range for relation \"%s\"",
blkno, RelationGetRelationName(rel));
/* Initialize buffer to copy to */
raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ);
SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ);
raw_page_data = VARDATA(raw_page);
/* Take a verbatim copy of the page */
buf = ReadBufferExtended(rel, forknum, blkno, RBM_NORMAL, NULL);
LockBuffer(buf, BUFFER_LOCK_SHARE);
memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
relation_close(rel, AccessShareLock);
return raw_page;
}
示例14: pg_prewarm
//.........这里部分代码省略.........
rel = relation_open(relOid, AccessShareLock);
aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_SELECT);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_CLASS, get_rel_name(relOid));
/* Check that the fork exists. */
RelationOpenSmgr(rel);
if (!smgrexists(rel->rd_smgr, forkNumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("fork \"%s\" does not exist for this relation",
forkString)));
/* Validate block numbers, or handle nulls. */
nblocks = RelationGetNumberOfBlocksInFork(rel, forkNumber);
if (PG_ARGISNULL(3))
first_block = 0;
else
{
first_block = PG_GETARG_INT64(3);
if (first_block < 0 || first_block >= nblocks)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("starting block number must be between 0 and " INT64_FORMAT,
nblocks - 1)));
}
if (PG_ARGISNULL(4))
last_block = nblocks - 1;
else
{
last_block = PG_GETARG_INT64(4);
if (last_block < 0 || last_block >= nblocks)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ending block number must be between 0 and " INT64_FORMAT,
nblocks - 1)));
}
/* Now we're ready to do the real work. */
if (ptype == PREWARM_PREFETCH)
{
#ifdef USE_PREFETCH
/*
* In prefetch mode, we just hint the OS to read the blocks, but we
* don't know whether it really does it, and we don't wait for it to
* finish.
*
* It would probably be better to pass our prefetch requests in chunks
* of a megabyte or maybe even a whole segment at a time, but there's
* no practical way to do that at present without a gross modularity
* violation, so we just do this.
*/
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
PrefetchBuffer(rel, forkNumber, block);
++blocks_done;
}
#else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("prefetch is not supported by this build")));
#endif
}
else if (ptype == PREWARM_READ)
{
/*
* In read mode, we actually read the blocks, but not into shared
* buffers. This is more portable than prefetch mode (it works
* everywhere) and is synchronous.
*/
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
smgrread(rel->rd_smgr, forkNumber, block, blockbuffer.data);
++blocks_done;
}
}
else if (ptype == PREWARM_BUFFER)
{
/*
* In buffer mode, we actually pull the data into shared_buffers.
*/
for (block = first_block; block <= last_block; ++block)
{
Buffer buf;
CHECK_FOR_INTERRUPTS();
buf = ReadBufferExtended(rel, forkNumber, block, RBM_NORMAL, NULL);
ReleaseBuffer(buf);
++blocks_done;
}
}
/* Close relation, release lock. */
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(blocks_done);
}
示例15: ReleaseBuffer
FarString& FarString::TrimRight()
{
FarSF::RTrim (GetBuffer());
ReleaseBuffer();
return * this;
}