本文整理汇总了Java中org.apache.cassandra.io.sstable.format.SSTableFormat类的典型用法代码示例。如果您正苦于以下问题:Java SSTableFormat类的具体用法?Java SSTableFormat怎么用?Java SSTableFormat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SSTableFormat类属于org.apache.cassandra.io.sstable.format包,在下文中一共展示了SSTableFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Descriptor
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public Descriptor(Version version, String directory, String ksname, String cfname, int generation,
SSTableFormat.Type formatType, Component digestComponent, Configuration configuration)
{
assert version != null && directory != null && ksname != null && cfname != null &&
formatType.info.getLatestVersion().getClass().equals(version.getClass());
this.version = version;
this.directory = directory;
this.ksname = ksname;
this.cfname = cfname;
this.generation = generation;
this.formatType = formatType;
this.digestComponent = digestComponent;
this.conf = configuration;
hashCode = Objects.hashCode(version, this.directory, generation, ksname, cfname, formatType);
}
示例2: FileMessageHeader
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public FileMessageHeader(UUID cfId,
int sequenceNumber,
Version version,
SSTableFormat.Type format,
long estimatedKeys,
List<Pair<Long, Long>> sections,
CompressionInfo compressionInfo,
long repairedAt,
int sstableLevel,
SerializationHeader.Component header)
{
this.cfId = cfId;
this.sequenceNumber = sequenceNumber;
this.version = version;
this.format = format;
this.estimatedKeys = estimatedKeys;
this.sections = sections;
this.compressionInfo = compressionInfo;
this.compressionMetadata = null;
this.repairedAt = repairedAt;
this.sstableLevel = sstableLevel;
this.header = header;
this.size = calculateSize();
}
示例3: deserialize
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public FileMessageHeader deserialize(DataInputPlus in, int version) throws IOException
{
UUID cfId = UUIDSerializer.serializer.deserialize(in, MessagingService.current_version);
int sequenceNumber = in.readInt();
Version sstableVersion = DatabaseDescriptor.getSSTableFormat().info.getVersion(in.readUTF());
SSTableFormat.Type format = SSTableFormat.Type.LEGACY;
if (version >= StreamMessage.VERSION_22)
format = SSTableFormat.Type.validate(in.readUTF());
long estimatedKeys = in.readLong();
int count = in.readInt();
List<Pair<Long, Long>> sections = new ArrayList<>(count);
for (int k = 0; k < count; k++)
sections.add(Pair.create(in.readLong(), in.readLong()));
CompressionInfo compressionInfo = CompressionInfo.serializer.deserialize(in, MessagingService.current_version);
long repairedAt = in.readLong();
int sstableLevel = in.readInt();
SerializationHeader.Component header = version >= StreamMessage.VERSION_30 && sstableVersion.storeRows()
? SerializationHeader.serializer.deserialize(sstableVersion, in)
: null;
return new FileMessageHeader(cfId, sequenceNumber, sstableVersion, format, estimatedKeys, sections, compressionInfo, repairedAt, sstableLevel, header);
}
示例4: Descriptor
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public Descriptor(Version version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType, Component digestComponent)
{
assert version != null && directory != null && ksname != null && cfname != null && formatType.info.getLatestVersion().getClass().equals(version.getClass());
this.version = version;
try
{
this.directory = directory.getCanonicalFile();
}
catch (IOException e)
{
throw new IOError(e);
}
this.ksname = ksname;
this.cfname = cfname;
this.generation = generation;
this.formatType = formatType;
this.digestComponent = digestComponent;
hashCode = Objects.hashCode(version, this.directory, generation, ksname, cfname, formatType);
}
示例5: testFromFilenameFor
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
private void testFromFilenameFor(File dir)
{
// normal
checkFromFilename(new Descriptor(dir, ksname, cfname, 1), false);
// skip component (for streaming lock file)
checkFromFilename(new Descriptor(dir, ksname, cfname, 2), true);
// secondary index
String idxName = "myidx";
File idxDir = new File(dir.getAbsolutePath() + File.separator + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4), false);
// legacy version
checkFromFilename(new Descriptor("ja", dir, ksname, cfname, 1, SSTableFormat.Type.LEGACY), false);
// legacy secondary index
checkFromFilename(new Descriptor("ja", dir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 3, SSTableFormat.Type.LEGACY), false);
}
示例6: appendFileName
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
private void appendFileName(StringBuilder buff)
{
if (!version.hasNewFileName())
{
buff.append(ksname).append(separator);
buff.append(cfname).append(separator);
}
buff.append(version).append(separator);
buff.append(generation);
if (formatType != SSTableFormat.Type.LEGACY)
buff.append(separator).append(formatType.name);
}
示例7: createSSTableWriter
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public static SSTableWriter createSSTableWriter(final Descriptor inputSSTableDescriptor,
final CFMetaData outCfmMetaData,
final SSTableReader inputSSTable) {
final String sstableDirectory = System.getProperty("user.dir") + "/cassandra/compresseddata";
LOGGER.info("Output directory: " + sstableDirectory);
final File outputDirectory = new File(sstableDirectory + File.separatorChar
+ inputSSTableDescriptor.ksname
+ File.separatorChar + inputSSTableDescriptor.cfname);
if (!outputDirectory.exists() && !outputDirectory.mkdirs()) {
throw new FSWriteError(new IOException("failed to create tmp directory"),
outputDirectory.getAbsolutePath());
}
final SSTableFormat.Type sstableFormat = SSTableFormat.Type.BIG;
final BigTableWriter writer = new BigTableWriter(
new Descriptor(
sstableFormat.info.getLatestVersion().getVersion(),
outputDirectory.getAbsolutePath(),
inputSSTableDescriptor.ksname, inputSSTableDescriptor.cfname,
inputSSTableDescriptor.generation,
sstableFormat,
inputSSTableDescriptor.getConfiguration()),
inputSSTable.getTotalRows(), 0L, outCfmMetaData,
new MetadataCollector(outCfmMetaData.comparator)
.sstableLevel(inputSSTable.getSSTableMetadata().sstableLevel),
new SerializationHeader(true,
outCfmMetaData, outCfmMetaData.partitionColumns(),
org.apache.cassandra.db.rows.EncodingStats.NO_STATS));
return writer;
}
示例8: run
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public void run() {
try {
Descriptor desc = new Descriptor(SSTableFormat.Type.BIG.info.getLatestVersion(),
new File("."),
"keyspace",
"table",
0,
SSTableFormat.Type.BIG,
Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType()));
SSTableTxnWriter out = SSTableTxnWriter.create(metadata,
desc,
0,
ActiveRepairService.UNREPAIRED_SSTABLE,
0,
SerializationHeader.make(metadata, sstables),
Collections.emptySet());
System.out.println("Merging " + sstables.size() + " sstables to " + desc.filenameFor(Component.DATA));
UnfilteredPartitionIterator merged =
UnfilteredPartitionIterators.mergeLazily(
sstables.stream()
.map(SSTableReader::getScanner)
.collect(Collectors.toList()),
FBUtilities.nowInSeconds());
while (merged.hasNext()) {
out.append(merged.next());
}
out.finish(false);
} catch (Exception e) {
e.printStackTrace();
}
}
示例9: createWriter
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
protected SSTableMultiWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt, SSTableFormat.Type format) throws IOException
{
Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize);
if (localDir == null)
throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
desc = Descriptor.fromFilename(cfs.getSSTablePath(cfs.getDirectories().getLocationForDisk(localDir), format));
return cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, sstableLevel, getHeader(cfs.metadata), session.getTransaction(cfId));
}
示例10: getDescriptor
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
/**
* Get a descriptor for the legacy sstable at the given version.
*/
protected Descriptor getDescriptor(String legacyVersion, String table)
{
return new Descriptor(legacyVersion, getTableDir(legacyVersion, table), "legacy_tables", table, 1,
BigFormat.instance.getVersion(legacyVersion).hasNewFileName()?
SSTableFormat.Type.BIG :SSTableFormat.Type.LEGACY);
}
示例11: withFormatType
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public Descriptor withFormatType(SSTableFormat.Type newType)
{
return new Descriptor(newType.info.getLatestVersion(), directory, ksname, cfname, generation, newType,
digestComponent, conf);
}
示例12: getFormat
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public SSTableFormat getFormat()
{
return formatType.info;
}
示例13: fromFilename
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public static Descriptor fromFilename(String filename, SSTableFormat.Type formatType, Configuration configuration)
{
return fromFilename(filename, configuration).withFormatType(formatType);
}
示例14: write
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
public List<String> write(Iterator<T> data) throws IOException {
SSTableTxnWriter writer = null;
try {
CFMetaData outputCFMetaData = setCFMetadataWithParams(origCFMetaData,
cassTable.getKeyspaceName(),
cassTable.getTableName());
Descriptor outDescriptor = new Descriptor(BigFormat.latestVersion.getVersion(),
outLocation,
cassTable.getKeyspaceName(),
cassTable.getTableName(),
generation++,
SSTableFormat.Type.BIG,
conf);
SerializationHeader header = new SerializationHeader(true,
outputCFMetaData,
outputCFMetaData.partitionColumns(),
EncodingStats.NO_STATS);
//Todo: fix these settings
writer = SSTableTxnWriter.createWithNoLogging(outputCFMetaData, outDescriptor, 4, -1, 1, header);
while (data.hasNext())
writer.append(data.next());
} catch (Exception e) {
LOGGER.info(e.getMessage());
throw e;
} finally {
if (writer != null) {
writer.finish();
LOGGER.info("Done saving sstable to: " + outLocation);
}
FileUtils.closeQuietly(writer);
}
List retVal = new LinkedList();
retVal.add(outLocation);
return retVal;
}
示例15: testCreatingSSTableWithTnx
import org.apache.cassandra.io.sstable.format.SSTableFormat; //导入依赖的package包/类
/**
* Test creating sstable files using SSTableTxnWriter.
* @throws IOException
*/
@Test
public void testCreatingSSTableWithTnx() throws IOException {
final String inputSSTableFullPathFileName = CASS3_DATA_DIR + "keyspace1/bills_compress/mc-6-big-Data.db";
final Descriptor descriptor = Descriptor.fromFilename(inputSSTableFullPathFileName,
TestBaseSSTableFunSuite.HADOOP_CONF);
final CFMetaData inputCFMetaData =
SSTableUtils.metaDataFromSSTable(inputSSTableFullPathFileName,
"casspactor",
"bills_compress",
Collections.<String>emptyList(),
Collections.<String>emptyList(),
TestBaseSSTableFunSuite.HADOOP_CONF);
final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(descriptor, inputCFMetaData);
final SerializationHeader header = new SerializationHeader(true, outputCFMetaData,
inputCFMetaData.partitionColumns(),
EncodingStats.NO_STATS);
final Descriptor outDescriptor = new Descriptor(
SSTableFormat.Type.BIG.info.getLatestVersion().getVersion(),
"/tmp",
"casspactor",
"bills_compress",
9,
SSTableFormat.Type.BIG, TestBaseSSTableFunSuite.HADOOP_CONF);
final SSTableTxnWriter writer = SSTableTxnWriter.create(outputCFMetaData,
outDescriptor,
4,
-1,
1,
header);
final ColumnDefinition staticCollDef =
ColumnDefinition.staticDef(inputCFMetaData, ByteBuffer.wrap("balance".getBytes()), Int32Type.instance);
final ColumnDefinition regCollDef1 =
ColumnDefinition.regularDef(inputCFMetaData, ByteBuffer.wrap("amount".getBytes()), Int32Type.instance);
final ColumnDefinition regCollDef2 =
ColumnDefinition.regularDef(inputCFMetaData, ByteBuffer.wrap("name".getBytes()), UTF8Type.instance);
final DecoratedKey key = Murmur3Partitioner.instance.decorateKey(ByteBuffer.wrap("user1".getBytes()));
final long now = System.currentTimeMillis();
final Row.Builder builder = BTreeRow.sortedBuilder();
builder.newRow(Clustering.STATIC_CLUSTERING);
builder.addCell(BufferCell.live(staticCollDef, now, Int32Type.instance.decompose(123)));
final PartitionUpdate partitionUpdate = PartitionUpdate.singleRowUpdate(inputCFMetaData,
key, builder.build());
final Row.Builder builder2 = BTreeRow.sortedBuilder();
final Clustering clustering2 = new BufferClustering(Int32Type.instance.decompose(10000));
builder2.newRow(clustering2);
builder2.addCell(BufferCell.live(regCollDef1, now, Int32Type.instance.decompose(5)));
builder2.addCell(BufferCell.live(regCollDef2, now, UTF8Type.instance.decompose("minh1")));
final PartitionUpdate partitionUpdate2 = PartitionUpdate.singleRowUpdate(inputCFMetaData,
key, builder2.build());
final List<PartitionUpdate> partitionUpdates = new ArrayList<PartitionUpdate>() {
private static final long serialVersionUID = 1L;
{
add(partitionUpdate);
add(partitionUpdate2);
}
};
final PartitionUpdate mergedUpdate = PartitionUpdate.merge(partitionUpdates);
writer.append(mergedUpdate.unfilteredIterator());
writer.finish(false);
}