本文整理汇总了Java中org.apache.cassandra.io.sstable.Descriptor.filenameFor方法的典型用法代码示例。如果您正苦于以下问题:Java Descriptor.filenameFor方法的具体用法?Java Descriptor.filenameFor怎么用?Java Descriptor.filenameFor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.sstable.Descriptor
的用法示例。
在下文中一共展示了Descriptor.filenameFor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deserialize
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
String statsFile = descriptor.filenameFor(Component.STATS);
if (!HadoopFileUtils.exists(statsFile, descriptor.getConfiguration()))
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile, descriptor.getConfiguration()))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例2: deserialize
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.debug("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.debug("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例3: writeFullChecksum
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public void writeFullChecksum(Descriptor descriptor)
{
File outFile = new File(descriptor.filenameFor(Component.DIGEST));
BufferedWriter out = null;
try
{
out = Files.newBufferedWriter(outFile.toPath(), Charsets.UTF_8);
out.write(String.valueOf(fullChecksum.getValue()));
}
catch (IOException e)
{
throw new FSWriteError(e, outFile);
}
finally
{
FileUtils.closeQuietly(out);
}
}
示例4: deserialize
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize( Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例5: FileDigestValidator
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public FileDigestValidator(Descriptor descriptor) throws IOException
{
this.descriptor = descriptor;
checksum = descriptor.version.uncompressedChecksumType().newInstance();
digestReader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.digestFor(descriptor.version.uncompressedChecksumType()))));
dataReader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.DATA)));
try
{
storedDigestValue = Long.parseLong(digestReader.readLine());
}
catch (Exception e)
{
close();
// Attempting to create a FileDigestValidator without a DIGEST file will fail
throw new IOException("Corrupted SSTable : " + descriptor.filenameFor(Component.DATA));
}
}
示例6: BigTableWriter
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public BigTableWriter(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header)
{
super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
//txn.trackNew(this); // must track before any files are created
if (compression)
{
dataFile = new CompressedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
metadata.params.compression,
metadataCollector, descriptor.getConfiguration());
}
else
{
dataFile = new ChecksummedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.CRC),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
descriptor.getConfiguration());
}
dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA))
.withConfiguration(descriptor.getConfiguration())
.compressed(compression);
//chunkCache.ifPresent(dbuilder::withChunkCache);
iwriter = new IndexWriter(keyCount);
columnIndexWriter = new ColumnIndex(this.header, dataFile, descriptor.version, this.observers,
getRowIndexEntrySerializer().indexInfoSerializer());
}
示例7: writeMetadata
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components)
{
try (SequentialWriter out = new SequentialWriter(desc.filenameFor(Component.STATS), writerOption, desc.getConfiguration()))
{
desc.getMetadataSerializer().serialize(components, out, desc.version);
out.finish();
}
catch (IOException e)
{
throw new FSWriteError(e, desc.filenameFor(Component.STATS));
}
}
示例8: saveSummary
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
* Save index summary to Summary.db file.
*/
public static void saveSummary(Descriptor descriptor, DecoratedKey first, DecoratedKey last, IndexSummary summary)
{
String filePath = descriptor.filenameFor(Component.SUMMARY);
//TODO: add a retry here on deletion
HadoopFileUtils.deleteIfExists(filePath, descriptor.getConfiguration());
//TODO: will make the retry nicer
int attempt = 0;
int maxAttempt = 5;
boolean isSuccess = false;
while (!isSuccess) {
if (attempt > 0)
FBUtilities.sleepQuietly((int) Math.round(Math.pow(2, attempt)) * 1000);
try (HadoopFileUtils.HadoopFileChannel hos = HadoopFileUtils.newFilesystemChannel(filePath,
descriptor.getConfiguration());
DataOutputStreamPlus oStream = new BufferedDataOutputStreamPlus(hos)) {
IndexSummary.serializer.serialize(summary, oStream, descriptor.version.hasSamplingLevel());
if (first != null && last != null) {
ByteBufferUtil.writeWithLength(first.getKey(), oStream);
ByteBufferUtil.writeWithLength(last.getKey(), oStream);
}
isSuccess = true;
} catch (Throwable e) {
logger.trace("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
HadoopFileUtils.deleteIfExists(filePath, descriptor.getConfiguration());
attempt++;
if (attempt == maxAttempt) //TODO: do we need to record all retried excpetions here or assume they'r same
throw new RuntimeException("Have retried for " + maxAttempt + " times but still failed!", e);
}
}
}
示例9: create
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
* Create metadata about given compressed file including uncompressed data length, chunk size
* and list of the chunk offsets of the compressed data.
*
* This is an expensive operation! Don't create more than one for each
* sstable.
*
* @param dataFilePath Path to the compressed file
*
* @return metadata about given compressed file.
*/
//static Map<String, CompressionMetadata> fileCompressionMetadata = new ConcurrentHashMap<>();
//private static Object lock = new Object();
public static CompressionMetadata create(String dataFilePath, long dataFileSize, Configuration configuration)
{
Descriptor desc = Descriptor.fromFilename(dataFilePath, configuration);
logger.info("Creating CompressionMetadata for file [" + dataFilePath + "]");
return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO),
dataFileSize,
desc.version.compressedChecksumType(),
configuration);
}
示例10: getTempSSTablePath
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private String getTempSSTablePath(File directory, Descriptor.Version version)
{
Descriptor desc = new Descriptor(version,
directory,
keyspace.getName(),
name,
fileIndexGenerator.incrementAndGet(),
Descriptor.Type.TEMP);
return desc.filenameFor(Component.DATA);
}
示例11: rewriteSSTableMetadata
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private void rewriteSSTableMetadata(Descriptor descriptor, Map<MetadataType, MetadataComponent> currentComponents) throws IOException
{
Descriptor tmpDescriptor = descriptor.asType(Descriptor.Type.TEMP);
try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(tmpDescriptor.filenameFor(Component.STATS))))
{
serialize(currentComponents, out);
out.flush();
}
// we cant move a file on top of another file in windows:
if (FBUtilities.isWindows())
FileUtils.delete(descriptor.filenameFor(Component.STATS));
FileUtils.renameWithConfirm(tmpDescriptor.filenameFor(Component.STATS), descriptor.filenameFor(Component.STATS));
}
示例12: createFakeSSTable
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private static void createFakeSSTable(File dir, String cf, int gen, boolean temp, List<File> addTo) throws IOException
{
Descriptor desc = new Descriptor(dir, KS, cf, gen, temp ? Descriptor.Type.TEMP : Descriptor.Type.FINAL);
for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER })
{
File f = new File(desc.filenameFor(c));
f.createNewFile();
addTo.add(f);
}
}
示例13: getTempSSTablePath
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private String getTempSSTablePath(File directory, Descriptor.Version version)
{
Descriptor desc = new Descriptor(version,
directory,
keyspace.getName(),
name,
fileIndexGenerator.incrementAndGet(),
true);
return desc.filenameFor(Component.DATA);
}
示例14: createFakeSSTable
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private static void createFakeSSTable(File dir, String cf, int gen, boolean temp, List<File> addTo) throws IOException
{
Descriptor desc = new Descriptor(dir, KS, cf, gen, temp);
for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER })
{
File f = new File(desc.filenameFor(c));
f.createNewFile();
addTo.add(f);
}
}
示例15: getSSTablePath
import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
private String getSSTablePath(File directory, Version version, SSTableFormat.Type format)
{
Descriptor desc = new Descriptor(version,
directory,
keyspace.getName(),
name,
fileIndexGenerator.incrementAndGet(),
format,
Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType()));
return desc.filenameFor(Component.DATA);
}