当前位置: 首页>>代码示例>>Java>>正文


Java RandomAccessReader.seek方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.RandomAccessReader.seek方法的典型用法代码示例。如果您正苦于以下问题:Java RandomAccessReader.seek方法的具体用法?Java RandomAccessReader.seek怎么用?Java RandomAccessReader.seek使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.util.RandomAccessReader的用法示例。


在下文中一共展示了RandomAccessReader.seek方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readSyncMarker

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private int readSyncMarker(CommitLogDescriptor descriptor, int offset, RandomAccessReader reader) throws IOException
{
    if (offset > reader.length() - CommitLogSegment.SYNC_MARKER_SIZE)
    {
        if (offset != reader.length() && offset != Integer.MAX_VALUE)
            logger.warn("Encountered bad header at position {} of Commit log {}; not enough room for a header", offset, reader.getPath());
        // cannot possibly be a header here. if we're == length(), assume it's a correctly written final segment
        return -1;
    }
    reader.seek(offset);
    PureJavaCrc32 crc = new PureJavaCrc32();
    crc.updateInt((int) (descriptor.id & 0xFFFFFFFFL));
    crc.updateInt((int) (descriptor.id >>> 32));
    crc.updateInt((int) reader.getPosition());
    int end = reader.readInt();
    long filecrc;
    if (descriptor.version < CommitLogDescriptor.VERSION_21)
        filecrc = reader.readLong();
    else
        filecrc = reader.readInt() & 0xffffffffL;
    if (crc.getValue() != filecrc)
    {
        if (end != 0 || filecrc != 0)
        {
            logger.warn("Encountered bad header at position {} of commit log {}, with invalid CRC. The end of segment marker should be zero.", offset, reader.getPath());
        }
        return -1;
    }
    else if (end < offset || end > reader.length())
    {
        logger.warn("Encountered bad header at position {} of commit log {}, with bad position but valid CRC", offset, reader.getPath());
        return -1;
    }
    return end;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:36,代码来源:CommitLogReplayer.java

示例2: readSyncMarker

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private int readSyncMarker(CommitLogDescriptor descriptor, int offset, RandomAccessReader reader, boolean tolerateTruncation) throws IOException
{
    if (offset > reader.length() - CommitLogSegment.SYNC_MARKER_SIZE)
    {
        // There was no room in the segment to write a final header. No data could be present here.
        return -1;
    }
    reader.seek(offset);
    CRC32 crc = new CRC32();
    updateChecksumInt(crc, (int) (descriptor.id & 0xFFFFFFFFL));
    updateChecksumInt(crc, (int) (descriptor.id >>> 32));
    updateChecksumInt(crc, (int) reader.getPosition());
    int end = reader.readInt();
    long filecrc = reader.readInt() & 0xffffffffL;
    if (crc.getValue() != filecrc)
    {
        if (end != 0 || filecrc != 0)
        {
            handleReplayError(false,
                              "Encountered bad header at position %d of commit log %s, with invalid CRC. " +
                              "The end of segment marker should be zero.",
                              offset, reader.getPath());
        }
        return -1;
    }
    else if (end < offset || end > reader.length())
    {
        handleReplayError(tolerateTruncation, "Encountered bad header at position %d of commit log %s, with bad position but valid CRC",
                          offset, reader.getPath());
        return -1;
    }
    return end;
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:34,代码来源:CommitLogReplayer.java

示例3: reset

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public void reset()
{
    if (!(input instanceof RandomAccessReader))
        throw new UnsupportedOperationException();

    RandomAccessReader file = (RandomAccessReader) input;
    file.seek(columnPosition);
    inputWithTracker.reset(headerSize());
}
 
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:10,代码来源:SSTableIdentityIterator.java

示例4: readHeader

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private int readHeader(long segmentId, int offset, RandomAccessReader reader) throws IOException
{
    if (offset > reader.length() - CommitLogSegment.SYNC_MARKER_SIZE)
    {
        if (offset != reader.length() && offset != Integer.MAX_VALUE)
            logger.warn("Encountered bad header at position {} of Commit log {}; not enough room for a header");
        // cannot possibly be a header here. if we're == length(), assume it's a correctly written final segment
        return -1;
    }
    reader.seek(offset);
    PureJavaCrc32 crc = new PureJavaCrc32();
    crc.update((int) (segmentId & 0xFFFFFFFFL));
    crc.update((int) (segmentId >>> 32));
    crc.update((int) reader.getPosition());
    int end = reader.readInt();
    long filecrc = reader.readLong();
    if (crc.getValue() != filecrc)
    {
        if (end != 0 || filecrc != 0)
        {
            logger.warn("Encountered bad header at position {} of commit log {}, with invalid CRC. The end of segment marker should be zero.", offset, reader.getPath());
        }
        return -1;
    }
    else if (end < offset || end > reader.length())
    {
        logger.warn("Encountered bad header at position {} of commit log {}, with bad position but valid CRC", offset, reader.getPath());
        return -1;
    }
    return end;
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:32,代码来源:CommitLogReplayer.java

示例5: export

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
 *
 * @param desc     the descriptor of the sstable to read from
 * @param outs     PrintStream to write the output to
 * @param toExport the keys corresponding to the rows to export
 * @param excludes keys to exclude from export
 * @param metadata Metadata to print keys in a proper format
 * @throws IOException on failure to read/write input/output
 */
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes, CFMetaData metadata) throws IOException
{
    SSTableReader sstable = SSTableReader.open(desc);
    RandomAccessReader dfile = sstable.openDataReader();
    try
    {
        IPartitioner partitioner = sstable.partitioner;

        if (excludes != null)
            toExport.removeAll(Arrays.asList(excludes));

        outs.println("[");

        int i = 0;

        // last key to compare order
        DecoratedKey lastKey = null;

        for (String key : toExport)
        {
            DecoratedKey decoratedKey = partitioner.decorateKey(metadata.getKeyValidator().fromString(key));

            if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
                throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

            lastKey = decoratedKey;

            RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
            if (entry == null)
                continue;

            dfile.seek(entry.position);
            ByteBufferUtil.readWithShortLength(dfile); // row key
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));

            Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);
            checkStream(outs);

            if (i != 0)
                outs.println(",");
            i++;
            serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
        }

        outs.println("\n]");
        outs.flush();
    }
    finally
    {
        dfile.close();
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:63,代码来源:SSTableExport.java

示例6: write

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Stream file of specified sections to given channel.
 *
 * StreamWriter uses LZF compression on wire to decrease size to transfer.
 *
 * @param channel where this writes data to
 * @throws IOException on any I/O error
 */
public void write(WritableByteChannel channel) throws IOException
{
    long totalSize = totalSize();
    RandomAccessReader file = sstable.openDataReader();
    ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
                                ? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
                                : null;
    transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];

    // setting up data compression stream
    compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
    long progress = 0L;

    try
    {
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : sections)
        {
            long start = validator == null ? section.left : validator.chunkStart(section.left);
            int readOffset = (int) (section.left - start);
            // seek to the beginning of the section
            file.seek(start);
            if (validator != null)
                validator.seek(start);

            // length of the section to read
            long length = section.right - start;
            // tracks write progress
            long bytesRead = 0;
            while (bytesRead < length)
            {
                long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
                bytesRead += lastBytesRead;
                progress += (lastBytesRead - readOffset);
                session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
                readOffset = 0;
            }

            // make sure that current section is send
            compressedOutput.flush();
        }
    }
    finally
    {
        // no matter what happens close file
        FileUtils.closeQuietly(file);
        FileUtils.closeQuietly(validator);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:58,代码来源:StreamWriter.java

示例7: export

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
 *
 * @param desc     the descriptor of the sstable to read from
 * @param outs     PrintStream to write the output to
 * @param toExport the keys corresponding to the rows to export
 * @param excludes keys to exclude from export
 * @throws IOException on failure to read/write input/output
 */
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes) throws IOException
{
    SSTableReader sstable = SSTableReader.open(desc);
    RandomAccessReader dfile = sstable.openDataReader();

    IPartitioner<?> partitioner = sstable.partitioner;

    if (excludes != null)
        toExport.removeAll(Arrays.asList(excludes));

    outs.println("[");

    int i = 0;

    // last key to compare order
    DecoratedKey lastKey = null;

    for (String key : toExport)
    {
        DecoratedKey decoratedKey = partitioner.decorateKey(hexToBytes(key));

        if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
            throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

        lastKey = decoratedKey;

        RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
        if (entry == null)
            continue;

        dfile.seek(entry.position);
        ByteBufferUtil.readWithShortLength(dfile); // row key
        if (sstable.descriptor.version.hasRowSizeAndColumnCount)
            dfile.readLong(); // row size
        DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
        int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt() : Integer.MAX_VALUE;

        Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, columnCount, sstable.descriptor.version);

        checkStream(outs);

        if (i != 0)
            outs.println(",");
        i++;
        serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
    }

    outs.println("\n]");
    outs.flush();
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:60,代码来源:SSTableExport.java

示例8: write

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Stream file of specified sections to given channel.
 *
 * StreamWriter uses LZF compression on wire to decrease size to transfer.
 *
 * @param channel where this writes data to
 * @throws IOException on any I/O error
 */
public void write(WritableByteChannel channel) throws IOException
{
    long totalSize = totalSize();
    RandomAccessReader file = sstable.openDataReader();
    ChecksumValidator validator = null;
    if (new File(sstable.descriptor.filenameFor(Component.CRC)).exists())
        validator = DataIntegrityMetadata.checksumValidator(sstable.descriptor);

    transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];

    // setting up data compression stream
    compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
    long progress = 0L;

    try
    {
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : sections)
        {
            long start = validator == null ? section.left : validator.chunkStart(section.left);
            int skipBytes = (int) (section.left - start);
            // seek to the beginning of the section
            file.seek(start);
            if (validator != null)
                validator.seek(start);

            // length of the section to read
            long length = section.right - start;
            // tracks write progress
            long bytesTransferred = 0;
            while (bytesTransferred < length)
            {
                long lastWrite = write(file, validator, skipBytes, length, bytesTransferred);
                bytesTransferred += lastWrite;
                progress += lastWrite;
                session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
                skipBytes = 0;
            }

            // make sure that current section is send
            compressedOutput.flush();
        }
    }
    finally
    {
        // no matter what happens close file
        FileUtils.closeQuietly(file);
    }

    // release reference only when completed successfully
    sstable.releaseReference();
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:61,代码来源:StreamWriter.java

示例9: export

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
 *
 * @param desc     the descriptor of the sstable to read from
 * @param outs     PrintStream to write the output to
 * @param toExport the keys corresponding to the rows to export
 * @param excludes keys to exclude from export
 * @param metadata Metadata to print keys in a proper format
 * @throws IOException on failure to read/write input/output
 */
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes, CFMetaData metadata) throws IOException
{
    SSTableReader sstable = SSTableReader.open(desc);
    RandomAccessReader dfile = sstable.openDataReader();
    try
    {
        IPartitioner<?> partitioner = sstable.partitioner;

        if (excludes != null)
            toExport.removeAll(Arrays.asList(excludes));

        outs.println("[");

        int i = 0;

        // last key to compare order
        DecoratedKey lastKey = null;

        for (String key : toExport)
        {
            DecoratedKey decoratedKey = partitioner.decorateKey(metadata.getKeyValidator().fromString(key));

            if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
                throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

            lastKey = decoratedKey;

            RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
            if (entry == null)
                continue;

            dfile.seek(entry.position);
            ByteBufferUtil.readWithShortLength(dfile); // row key
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));

            Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);
            checkStream(outs);

            if (i != 0)
                outs.println(",");
            i++;
            serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
        }

        outs.println("\n]");
        outs.flush();
    }
    finally
    {
        dfile.close();
    }
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:63,代码来源:SSTableExport.java

示例10: write

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Stream file of specified sections to given channel.
 *
 * StreamWriter uses LZF compression on wire to decrease size to transfer.
 *
 * @param channel where this writes data to
 * @throws IOException on any I/O error
 */
public void write(WritableByteChannel channel) throws IOException
{
    long totalSize = totalSize();
    RandomAccessReader file = sstable.openDataReader();
    ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
                                ? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
                                : null;
    transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];

    // setting up data compression stream
    compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
    long progress = 0L;

    try
    {
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : sections)
        {
            long start = validator == null ? section.left : validator.chunkStart(section.left);
            int skipBytes = (int) (section.left - start);
            // seek to the beginning of the section
            file.seek(start);
            if (validator != null)
                validator.seek(start);

            // length of the section to read
            long length = section.right - start;
            // tracks write progress
            long bytesTransferred = 0;
            while (bytesTransferred < length)
            {
                long lastWrite = write(file, validator, skipBytes, length, bytesTransferred);
                bytesTransferred += lastWrite;
                progress += lastWrite;
                session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
                skipBytes = 0;
            }

            // make sure that current section is send
            compressedOutput.flush();
        }
    }
    finally
    {
        // no matter what happens close file
        FileUtils.closeQuietly(file);
        FileUtils.closeQuietly(validator);
    }
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:58,代码来源:StreamWriter.java

示例11: export

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
/**
 * Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
 *
 * @param desc     the descriptor of the sstable to read from
 * @param outs     PrintStream to write the output to
 * @param toExport the keys corresponding to the rows to export
 * @param excludes keys to exclude from export
 * @throws IOException on failure to read/write input/output
 */
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes) throws IOException
{
    SSTableReader sstable = SSTableReader.open(desc);
    RandomAccessReader dfile = sstable.openDataReader();

    IPartitioner<?> partitioner = sstable.partitioner;

    if (excludes != null)
        toExport.removeAll(Arrays.asList(excludes));

    outs.println("[");

    int i = 0;

    // last key to compare order
    DecoratedKey lastKey = null;

    for (String key : toExport)
    {
        DecoratedKey decoratedKey = partitioner.decorateKey(hexToBytes(key));

        if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
            throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

        lastKey = decoratedKey;

        RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
        if (entry == null)
            continue;

        dfile.seek(entry.position);
        ByteBufferUtil.readWithShortLength(dfile); // row key
        DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
        Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);

        checkStream(outs);

        if (i != 0)
            outs.println(",");
        i++;
        serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
    }

    outs.println("\n]");
    outs.flush();
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:56,代码来源:SSTableExport.java


注:本文中的org.apache.cassandra.io.util.RandomAccessReader.seek方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。