当前位置: 首页>>代码示例>>Java>>正文


Java SSTableWriter.abort方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.sstable.SSTableWriter.abort方法的典型用法代码示例。如果您正苦于以下问题:Java SSTableWriter.abort方法的具体用法?Java SSTableWriter.abort怎么用?Java SSTableWriter.abort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.sstable.SSTableWriter的用法示例。


在下文中一共展示了SSTableWriter.abort方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: abort

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * Abort this task.
 * If the task already received all files and
 * {@link org.apache.cassandra.streaming.StreamReceiveTask.OnCompletionRunnable} task is submitted,
 * then task cannot be aborted.
 */
public synchronized void abort()
{
    if (done)
        return;

    done = true;
    for (SSTableWriter writer : sstables)
        writer.abort();
    sstables.clear();
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:17,代码来源:StreamReceiveTask.java

示例2: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @param channel where this reads data from
 * @return SSTable transferred
 * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
public SSTableWriter read(ReadableByteChannel channel) throws IOException
{
    logger.debug("reading file from {}, repairedAt = {}", session.peer, repairedAt);
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    if (kscf == null)
    {
        // schema was dropped during streaming
        throw new IOException("CF " + cfId + " was dropped during streaming");
    }
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
    DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
    BytesReadTracker in = new BytesReadTracker(dis);
    try
    {
        while (in.getBytesRead() < totalSize)
        {
            writeRow(writer, in, cfs);
            // TODO move this to BytesReadTracker
            session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
        }
        return writer;
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(dis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:42,代码来源:StreamReader.java

示例3: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @return SSTable transferred
 * @throws java.io.IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
@Override
public SSTableReader read(ReadableByteChannel channel) throws IOException
{
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize);

    CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
    BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
    try
    {
        for (Pair<Long, Long> section : sections)
        {
            long length = section.right - section.left;
            // skip to beginning of section inside chunk
            cis.position(section.left);
            in.reset(0);
            while (in.getBytesRead() < length)
            {
                writeRow(writer, in, cfs);
                // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
            }
        }
        return writer.closeAndOpenReader();
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(cis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:44,代码来源:CompressedStreamReader.java

示例4: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @param channel where this reads data from
 * @return SSTable transferred
 * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
public SSTableReader read(ReadableByteChannel channel) throws IOException
{
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize);
    DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
    BytesReadTracker in = new BytesReadTracker(dis);
    try
    {
        while (in.getBytesRead() < totalSize)
        {
            writeRow(writer, in, cfs);
            // TODO move this to BytesReadTracker
            session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
        }
        return writer.closeAndOpenReader();
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(dis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:36,代码来源:StreamReader.java

示例5: abort

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
public void abort()
{
    aborted = true;
    Runnable r = new Runnable()
    {
        public void run()
        {
            for (SSTableWriter writer : sstables)
                writer.abort();
        }
    };
    StorageService.tasks.submit(r);
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:14,代码来源:StreamReceiveTask.java

示例6: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @param channel where this reads data from
 * @return SSTable transferred
 * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
public SSTableWriter read(ReadableByteChannel channel) throws IOException
{
    logger.info("reading file from {}, repairedAt = {}", session.peer, repairedAt);
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
    DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
    BytesReadTracker in = new BytesReadTracker(dis);
    try
    {
        while (in.getBytesRead() < totalSize)
        {
            writeRow(writer, in, cfs);
            // TODO move this to BytesReadTracker
            session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
        }
        return writer;
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(dis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:37,代码来源:StreamReader.java

示例7: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.info("Writing {}", Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        boolean trackContention = logger.isDebugEnabled();
        int heavilyContendedRowCount = 0;
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
        {
            AtomicBTreeColumns cf = entry.getValue();

            if (cf.isMarkedForDelete() && cf.hasColumns())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS))
                    continue;
            }

            if (trackContention && cf.usePessimisticLocking())
                heavilyContendedRowCount++;

            if (!cf.isEmpty())
                writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            writer.isolateReferences();

            // temp sstables should contain non-repaired data.
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context);
        }

        if (heavilyContendedRowCount > 0)
            logger.debug(String.format("High update contention in %d/%d partitions of %s ", heavilyContendedRowCount, rows.size(), Memtable.this.toString()));

        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:65,代码来源:Memtable.java

示例8: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @return SSTable transferred
 * @throws java.io.IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
@Override
public SSTableWriter read(ReadableByteChannel channel) throws IOException
{
    logger.debug("reading file from {}, repairedAt = {}", session.peer, repairedAt);
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    if (kscf == null)
    {
        // schema was dropped during streaming
        throw new IOException("CF " + cfId + " was dropped during streaming");
    }
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);

    CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
    BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
    try
    {
        for (Pair<Long, Long> section : sections)
        {
            long length = section.right - section.left;
            // skip to beginning of section inside chunk
            cis.position(section.left);
            in.reset(0);
            while (in.getBytesRead() < length)
            {
                writeRow(writer, in, cfs);
                // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
            }
        }
        return writer;
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(cis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:50,代码来源:CompressedStreamReader.java

示例9: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.debug("Writing {}", Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, AtomicSortedColumns> entry : rows.entrySet())
        {
            ColumnFamily cf = entry.getValue();
            if (cf.isMarkedForDelete())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS) && !(cf.getColumnCount() == 0))
                    continue;

                // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                // But it can result in unexpected behaviour where deletes never make it to disk,
                // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                // is a CF level tombstone to ensure the delete makes it into an SSTable.
                // We also shouldn't be dropping any columns obsoleted by partition and/or range tombstones in case
                // the table has secondary indexes, or else the stale entries wouldn't be cleaned up during compaction,
                // and will only be dropped during 2i query read-repair, if at all.
                if (!cfs.indexManager.hasIndexes())
                    ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
            }
            writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.debug("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context.get());
        }
        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:60,代码来源:Memtable.java

示例10: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context, File sstableDirectory)
{
    logger.info("Writing {}", Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
        {
            ColumnFamily cf = entry.getValue();

            if (cf.isMarkedForDelete() && cf.hasColumns())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS))
                    continue;
            }

            if (!cf.isEmpty())
                writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            writer.isolateReferences();

            // temp sstables should contain non-repaired data.
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context);
        }

        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:56,代码来源:Memtable.java

示例11: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.info("Writing " + Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
        {
            ColumnFamily cf = entry.getValue();
            if (cf.isMarkedForDelete())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.columnFamily.equals(SystemTable.BATCHLOG_CF) && cfs.table.name.equals(Table.SYSTEM_KS) && !cf.isEmpty())
                    continue;

                // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                // But it can result in unexpected behaviour where deletes never make it to disk,
                // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                // is a CF level tombstone to ensure the delete makes it into an SSTable.
                ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
            }
            writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context.get());
        }
        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:56,代码来源:Memtable.java

示例12: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.info("Writing {}", Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
        {
            ColumnFamily cf = entry.getValue();
            if (cf.isMarkedForDelete())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS) && !(cf.getColumnCount() == 0))
                    continue;
            }

            if (cf.getColumnCount() > 0 || cf.isMarkedForDelete())
                writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context.get());
        }
        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:52,代码来源:Memtable.java

示例13: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.info("Writing " + Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
        {
            ColumnFamily cf = entry.getValue();
            if (cf.isMarkedForDelete())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.columnFamily.equals(SystemTable.BATCHLOG_CF) && cfs.table.name.equals(Table.SYSTEM_KS) && !cf.isEmpty())
                    continue;

                // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                // But it can result in unexpected behaviour where deletes never make it to disk,
                // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                // is a CF level tombstone to ensure the delete makes it into an SSTable.
                // We also shouldn't be dropping any columns obsoleted by partition and/or range tombstones in case
                // the table has secondary indexes, or else the stale entries wouldn't be cleaned up during compaction,
                // and will only be dropped during 2i query read-repair, if at all.
                if (!cfs.indexManager.hasIndexes())
                    ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
            }
            writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context.get());
        }
        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:wso2,项目名称:wso2-cassandra,代码行数:60,代码来源:Memtable.java

示例14: writeSortedContents

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
    logger.info("Writing {}", Memtable.this.toString());

    SSTableReader ssTable;
    // errors when creating the writer that may leave empty temp files.
    SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
    try
    {
        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
        {
            ColumnFamily cf = entry.getValue();

            if (cf.isMarkedForDelete() && cf.hasColumns())
            {
                // When every node is up, there's no reason to write batchlog data out to sstables
                // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                // See CASSANDRA-4667.
                if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS))
                    continue;
            }

            if (!cf.isEmpty())
                writer.append((DecoratedKey)entry.getKey(), cf);
        }

        if (writer.getFilePointer() > 0)
        {
            writer.isolateReferences();

            // temp sstables should contain non-repaired data.
            ssTable = writer.closeAndOpenReader();
            logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                      ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
        }
        else
        {
            writer.abort();
            ssTable = null;
            logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                        context);
        }

        return ssTable;
    }
    catch (Throwable e)
    {
        writer.abort();
        throw Throwables.propagate(e);
    }
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:57,代码来源:Memtable.java

示例15: read

import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
 * @return SSTable transferred
 * @throws java.io.IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
@Override
public SSTableWriter read(ReadableByteChannel channel) throws IOException
{
    logger.info("reading file from {}, repairedAt = {}", session.peer, repairedAt);
    long totalSize = totalSize();

    Pair<String, String> kscf = Schema.instance.getCF(cfId);
    ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

    SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);

    CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
    BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
    try
    {
        for (Pair<Long, Long> section : sections)
        {
            long length = section.right - section.left;
            // skip to beginning of section inside chunk
            cis.position(section.left);
            in.reset(0);
            while (in.getBytesRead() < length)
            {
                writeRow(writer, in, cfs);
                // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
            }
        }
        return writer;
    }
    catch (Throwable e)
    {
        writer.abort();
        drain(cis, in.getBytesRead());
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw Throwables.propagate(e);
    }
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:45,代码来源:CompressedStreamReader.java


注:本文中的org.apache.cassandra.io.sstable.SSTableWriter.abort方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。