本文整理汇总了C++中TableTuple::serializeToExport方法的典型用法代码示例。如果您正苦于以下问题:C++ TableTuple::serializeToExport方法的具体用法?C++ TableTuple::serializeToExport怎么用?C++ TableTuple::serializeToExport使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TableTuple
的用法示例。
在下文中一共展示了TableTuple::serializeToExport方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: appendTuple
/*
* If txnId represents a new transaction, commit previous data.
* Always serialize the supplied tuple in to the stream.
* Return m_uso before this invocation - this marks the point
* in the stream the caller can rollback to if this append
* should be rolled back.
*/
size_t TupleStreamWrapper::appendTuple(int64_t lastCommittedTxnId,
int64_t txnId,
int64_t seqNo,
int64_t timestamp,
TableTuple &tuple,
TupleStreamWrapper::Type type)
{
size_t rowHeaderSz = 0;
size_t tupleMaxLength = 0;
assert(txnId >= m_openTransactionId);
commit(lastCommittedTxnId, txnId);
// Compute the upper bound on bytes required to serialize tuple.
// exportxxx: can memoize this calculation.
tupleMaxLength = computeOffsets(tuple, &rowHeaderSz);
if (!m_currBlock) {
extendBufferChain(m_defaultCapacity);
}
if ((m_currBlock->offset() + tupleMaxLength) > m_defaultCapacity) {
extendBufferChain(tupleMaxLength);
}
// initialize the full row header to 0. This also
// has the effect of setting each column non-null.
::memset(m_currBlock->mutableDataPtr(), 0, rowHeaderSz);
// the nullarray lives in rowheader after the 4 byte header length prefix
uint8_t *nullArray =
reinterpret_cast<uint8_t*>(m_currBlock->mutableDataPtr() + sizeof (int32_t));
// position the serializer after the full rowheader
ExportSerializeOutput io(m_currBlock->mutableDataPtr() + rowHeaderSz,
m_currBlock->remaining() - rowHeaderSz);
// write metadata columns
io.writeLong(txnId);
io.writeLong(timestamp);
io.writeLong(seqNo);
io.writeLong(m_partitionId);
io.writeLong(m_siteId);
// use 1 for INSERT EXPORT op, 0 for DELETE EXPORT op
io.writeLong((type == INSERT) ? 1L : 0L);
// write the tuple's data
tuple.serializeToExport(io, METADATA_COL_CNT, nullArray);
// write the row size in to the row header
// rowlength does not include the 4 byte row header
// but does include the null array.
ExportSerializeOutput hdr(m_currBlock->mutableDataPtr(), 4);
hdr.writeInt((int32_t)(io.position()) + (int32_t)rowHeaderSz - 4);
// update m_offset
m_currBlock->consumed(rowHeaderSz + io.position());
// update uso.
const size_t startingUso = m_uso;
m_uso += (rowHeaderSz + io.position());
return startingUso;
}
示例2: appendTuple
/*
* If SpHandle represents a new transaction, commit previous data.
* Always serialize the supplied tuple in to the stream.
* Return m_uso before this invocation - this marks the point
* in the stream the caller can rollback to if this append
* should be rolled back.
*/
size_t DRTupleStream::appendTuple(int64_t lastCommittedSpHandle,
char *tableHandle,
int64_t txnId,
int64_t spHandle,
int64_t uniqueId,
TableTuple &tuple,
DRRecordType type)
{
//Drop the row, don't move the USO
if (!m_enabled) return m_uso;
size_t rowHeaderSz = 0;
size_t tupleMaxLength = 0;
// Transaction IDs for transactions applied to this tuple stream
// should always be moving forward in time.
if (spHandle < m_openSpHandle)
{
throwFatalException(
"Active transactions moving backwards: openSpHandle is %jd, while the truncate spHandle is %jd",
(intmax_t)m_openSpHandle, (intmax_t)spHandle
);
}
size_t startingUso = commit(lastCommittedSpHandle, spHandle, txnId, uniqueId, false, false);
// Compute the upper bound on bytes required to serialize tuple.
// exportxxx: can memoize this calculation.
tupleMaxLength = computeOffsets(tuple, &rowHeaderSz) + TXN_RECORD_HEADER_SIZE;
if (!m_currBlock) {
extendBufferChain(m_defaultCapacity);
}
if (m_currBlock->remaining() < tupleMaxLength) {
extendBufferChain(tupleMaxLength);
}
ExportSerializeOutput io(m_currBlock->mutableDataPtr(),
m_currBlock->remaining());
io.writeByte(DR_VERSION);
io.writeByte(static_cast<int8_t>(type));
io.writeLong(*reinterpret_cast<int64_t*>(tableHandle));
// initialize the full row header to 0. This also
// has the effect of setting each column non-null.
::memset(m_currBlock->mutableDataPtr() + io.position(), 0, rowHeaderSz);
// the nullarray lives in rowheader after the 4 byte header length prefix
uint8_t *nullArray =
reinterpret_cast<uint8_t*>(m_currBlock->mutableDataPtr() + io.position() + sizeof(int32_t));
// Reserve the row header by moving the position beyond the row header.
// The row header includes the 4 byte length prefix and the null array.
const size_t lengthPrefixPosition = io.reserveBytes(rowHeaderSz);
// write the tuple's data
tuple.serializeToExport(io, 0, nullArray);
// write the row size in to the row header
// rowlength does not include the 4 byte length prefix or record header
// but does include the null array.
ExportSerializeOutput hdr(m_currBlock->mutableDataPtr() + lengthPrefixPosition, 4);
//The TXN_RECORD_HEADER_SIZE is 4 bytes longer because it includes the checksum at the end
//so there is no need to subtract and additional 4 bytes to make the length prefix not inclusive
hdr.writeInt((int32_t)(io.position() - TXN_RECORD_HEADER_SIZE));
uint32_t crc = vdbcrc::crc32cInit();
crc = vdbcrc::crc32c( crc, m_currBlock->mutableDataPtr(), io.position());
crc = vdbcrc::crc32cFinish(crc);
io.writeInt(crc);
// update m_offset
m_currBlock->consumed(io.position());
// No BEGIN TXN entry was written, use the current USO
if (startingUso == SIZE_MAX) {
startingUso = m_uso;
}
// update uso.
m_uso += io.position();
// std::cout << "Appending row " << io.position() << " at " << m_currBlock->offset() << std::endl;
return startingUso;
}
示例3: appendTuple
/*
* If SpHandle represents a new transaction, commit previous data.
* Always serialize the supplied tuple in to the stream.
* Return m_uso before this invocation - this marks the point
* in the stream the caller can rollback to if this append
* should be rolled back.
*/
size_t ExportTupleStream::appendTuple(int64_t lastCommittedSpHandle,
int64_t spHandle,
int64_t seqNo,
int64_t uniqueId,
int64_t timestamp,
TableTuple &tuple,
ExportTupleStream::Type type)
{
size_t rowHeaderSz = 0;
size_t tupleMaxLength = 0;
// Transaction IDs for transactions applied to this tuple stream
// should always be moving forward in time.
if (spHandle < m_openSpHandle)
{
throwFatalException(
"Active transactions moving backwards: openSpHandle is %jd, while the append spHandle is %jd",
(intmax_t)m_openSpHandle, (intmax_t)spHandle
);
}
//Most of the transaction id info and unique id info supplied to commit
//is nonsense since it isn't currently supplied with a transaction id
//but it is fine since export isn't currently using the info
commit(lastCommittedSpHandle, spHandle, spHandle, uniqueId, false, false);
// Compute the upper bound on bytes required to serialize tuple.
// exportxxx: can memoize this calculation.
tupleMaxLength = computeOffsets(tuple, &rowHeaderSz);
if (!m_currBlock) {
extendBufferChain(m_defaultCapacity);
}
if (m_currBlock->remaining() < tupleMaxLength) {
extendBufferChain(tupleMaxLength);
}
// initialize the full row header to 0. This also
// has the effect of setting each column non-null.
::memset(m_currBlock->mutableDataPtr(), 0, rowHeaderSz);
// the nullarray lives in rowheader after the 4 byte header length prefix
uint8_t *nullArray =
reinterpret_cast<uint8_t*>(m_currBlock->mutableDataPtr() + sizeof (int32_t));
// position the serializer after the full rowheader
ExportSerializeOutput io(m_currBlock->mutableDataPtr() + rowHeaderSz,
m_currBlock->remaining() - rowHeaderSz);
// write metadata columns
io.writeLong(spHandle);
io.writeLong(timestamp);
io.writeLong(seqNo);
io.writeLong(m_partitionId);
io.writeLong(m_siteId);
// use 1 for INSERT EXPORT op, 0 for DELETE EXPORT op
io.writeByte(static_cast<int8_t>((type == INSERT) ? 1L : 0L));
// write the tuple's data
tuple.serializeToExport(io, METADATA_COL_CNT, nullArray);
// write the row size in to the row header
// rowlength does not include the 4 byte row header
// but does include the null array.
ExportSerializeOutput hdr(m_currBlock->mutableDataPtr(), 4);
hdr.writeInt((int32_t)(io.position()) + (int32_t)rowHeaderSz - 4);
// update m_offset
m_currBlock->consumed(rowHeaderSz + io.position());
// update uso.
const size_t startingUso = m_uso;
m_uso += (rowHeaderSz + io.position());
// std::cout << "Appending row " << rowHeaderSz + io.position() << " to uso " << m_currBlock->uso() << " offset " << m_currBlock->offset() << std::endl;
return startingUso;
}
示例4: io
// helper to make a schema, a tuple and serialize to a buffer
size_t
TableTupleExportTest::serElSize(std::vector<uint16_t> &keep_offsets,
uint8_t *nullArray, char *dataPtr, bool nulls)
{
TableTuple *tt;
TupleSchema *ts;
char buf[1024]; // tuple data
ts = TupleSchema::createTupleSchema(m_schema, keep_offsets);
tt = new TableTuple(buf, ts);
// assuming all Export tuples were allocated for persistent
// storage and choosing set* api accordingly here.
switch (ts->columnCount()) {
// note my sophisticated and clever use of fall through
case 8:
{
NValue nv = ValueFactory::getStringValue("abcdeabcdeabcdeabcde"); // 20 char
if (nulls) { nv.free(); nv.setNull(); }
tt->setNValueAllocateForObjectCopies(7, nv, NULL);
nv.free();
}
case 7:
{
NValue nv = ValueFactory::getStringValue("ABCDEabcde"); // 10 char
if (nulls) { nv.free(); nv.setNull(); }
tt->setNValueAllocateForObjectCopies(6, nv, NULL);
nv.free();
}
case 6:
{
NValue nv = ValueFactory::getDecimalValueFromString("-12.34");
if (nulls) { nv.free(); nv.setNull(); }
tt->setNValueAllocateForObjectCopies(5, nv, NULL);
nv.free();
}
case 5:
{
NValue nv = ValueFactory::getTimestampValue(9999);
if (nulls) nv.setNull();
tt->setNValueAllocateForObjectCopies(4, nv, NULL);
nv.free();
}
case 4:
{
NValue nv = ValueFactory::getBigIntValue(1024);
if (nulls) nv.setNull();
tt->setNValueAllocateForObjectCopies(3, nv, NULL);
nv.free();
}
case 3:
{
NValue nv = ValueFactory::getIntegerValue(512);
if (nulls) nv.setNull();
tt->setNValueAllocateForObjectCopies(2, nv, NULL);
nv.free();
}
case 2:
{
NValue nv = ValueFactory::getSmallIntValue(256);
if (nulls) nv.setNull();
tt->setNValueAllocateForObjectCopies(1, nv, NULL);
nv.free();
}
case 1:
{
NValue nv = ValueFactory::getTinyIntValue(120);
if (nulls) nv.setNull();
tt->setNValueAllocateForObjectCopies(0, nv, NULL);
nv.free();
}
break;
default:
// this is an error in the test fixture.
EXPECT_EQ(0,1);
break;
}
// The function under test!
ExportSerializeOutput io(dataPtr, 2048);
tt->serializeToExport(io, 0, nullArray);
// and cleanup
tt->freeObjectColumns();
delete tt;
TupleSchema::freeTupleSchema(ts);
return io.position();
}