本文整理汇总了Java中org.apache.cassandra.io.util.FileUtils.closeQuietly方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtils.closeQuietly方法的具体用法?Java FileUtils.closeQuietly怎么用?Java FileUtils.closeQuietly使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.FileUtils
的用法示例。
在下文中一共展示了FileUtils.closeQuietly方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadProperties
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private static Properties loadProperties()
{
Properties properties = new Properties();
InputStream stream = InvertedIndex.class.getClassLoader().getResourceAsStream("InvertedIndex.properties");
try
{
properties.load(stream);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
finally
{
FileUtils.closeQuietly(stream);
}
logger.info("loaded property file, InvertedIndex.properties");
return properties;
}
示例2: dumpInterArrivalTimes
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Dump the inter arrival times for examination if necessary.
*/
public void dumpInterArrivalTimes()
{
File file = FileUtils.createTempFile("failuredetector-", ".dat");
OutputStream os = null;
try
{
os = new BufferedOutputStream(new FileOutputStream(file, true));
os.write(toString().getBytes());
}
catch (IOException e)
{
throw new FSWriteError(e, file);
}
finally
{
FileUtils.closeQuietly(os);
}
}
示例3: appendTOC
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Appends new component names to the TOC component.
*/
protected static void appendTOC(Descriptor descriptor, Collection<Component> components)
{
File tocFile = new File(descriptor.filenameFor(Component.TOC));
PrintWriter w = null;
try
{
w = new PrintWriter(new FileWriter(tocFile, true));
for (Component component : components)
w.println(component.name);
}
catch (IOException e)
{
throw new FSWriteError(e, tocFile);
}
finally
{
FileUtils.closeQuietly(w);
}
}
示例4: writeChecksum
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
protected void writeChecksum(long checksum, String filePath)
{
File outFile = new File(filePath);
BufferedWriter out = null;
try
{
out = Files.newBufferedWriter(outFile.toPath(), Charsets.UTF_8);
out.write(String.valueOf(checksum));
out.flush();
out.close();
}
catch (IOException e)
{
throw new FSWriteError(e, outFile);
}
finally
{
FileUtils.closeQuietly(out);
}
}
示例5: buildSummaryAtLevel
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
try
{
long indexSize = primaryIndex.length();
try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel))
{
long indexPosition;
while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
RowIndexEntry.Serializer.skip(primaryIndex);
}
return summaryBuilder.build(partitioner);
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
}
示例6: testHugeBFSerialization
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Test
@Ignore
public void testHugeBFSerialization() throws IOException
{
ByteBuffer test = ByteBuffer.wrap(new byte[] {0, 1});
File file = FileUtils.createTempFile("bloomFilterTest-", ".dat");
BloomFilter filter = (BloomFilter) FilterFactory.getFilter(((long)Integer.MAX_VALUE / 8) + 1, 0.01d, true);
filter.add(test);
DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(file));
FilterFactory.serialize(filter, out);
filter.bitset.serialize(out);
out.close();
filter.close();
DataInputStream in = new DataInputStream(new FileInputStream(file));
BloomFilter filter2 = (BloomFilter) FilterFactory.deserialize(in, true);
Assert.assertTrue(filter2.isPresent(test));
FileUtils.closeQuietly(in);
}
示例7: loadProperties
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private static Properties loadProperties()
{
Properties properties = new Properties();
InputStream stream = AuditTrigger.class.getClassLoader().getResourceAsStream("AuditTrigger.properties");
try
{
properties.load(stream);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
finally
{
FileUtils.closeQuietly(stream);
}
return properties;
}
示例8: close
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
public void close()
{
// This will close the input iterators
FileUtils.closeQuietly(mergeIterator);
if (listener != null)
listener.close();
}
示例9: main
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
public static void main(String[] args) {
String inputSSTableFullPathFileName = new File(getInputFile(args)).getAbsolutePath();
LOGGER.info("Input file name: " + inputSSTableFullPathFileName);
Configuration conf = new Configuration();
final Descriptor inputSSTableDescriptor = Descriptor.fromFilename(inputSSTableFullPathFileName, conf);
SSTableWriter writer = null;
try {
final CFMetaData inputCFMetaData =
SSTableUtils.metaDataFromSSTable(inputSSTableFullPathFileName, conf);
final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(inputSSTableDescriptor, inputCFMetaData);
final SSTableReader inputSStable = SSTableReader.openNoValidation(inputSSTableDescriptor, inputCFMetaData);
writer = SSTableUtils.createSSTableWriter(inputSSTableDescriptor, outputCFMetaData, inputSStable);
final ISSTableScanner currentScanner = inputSStable.getScanner();
while (currentScanner.hasNext()) {
final UnfilteredRowIterator row = currentScanner.next();
writer.append(row);
}
writer.finish(false);
} catch (IOException e) {
e.printStackTrace(System.err);
} finally {
FileUtils.closeQuietly(writer);
}
}
示例10: testWritingToLocalSSTable
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/******************************************************
* 1. Input data
* This is the schema definition of the table that is used to generate the non-compressed input data:
* <p>
* CREATE TABLE bills_nc (
* user text,
* balance int static,
* expense_id int,
* amount int,
* name text,
* PRIMARY KEY (user, expense_id))
* WITH compression = { 'sstable_compression' : '' };
* <p>
* <p>
* 2. Compressing and producing output data
* Running this main will convert data file under src/test/resources/data/bills_compress/mc-6-big-Data.db
* in to the corresponding compressed file, using LZ4 compression, along with auxiliary
* files (CompressionInfo.db, Index.db, etc).
* <p>
* The output is under cassanrda/compresseddata/cassandra/data directory
* <p>
* 3. Verification
* Since this is C* 3.0 format, you should use sstabledump command to dump out the json contents
* for both intput data and output data to verify.
* %>sstabledump cassandra/data/mc-1-big-Data.db
* and
* %>sstabledump cassandra/compresseddata/cassandra/data/mc-1-big-Data.db
*******************************************************/
@Test
public void testWritingToLocalSSTable() {
final String inputSSTableFullPathFileName = CASS3_DATA_DIR + "keyspace1/bills_compress/mc-6-big-Data.db";
LOGGER.info("Input file name: " + inputSSTableFullPathFileName);
final Descriptor inputSSTableDescriptor = Descriptor.fromFilename(inputSSTableFullPathFileName,
TestBaseSSTableFunSuite.HADOOP_CONF);
SSTableWriter writer = null;
try {
SSTableSingleReader reader = new SSTableSingleReader(inputSSTableFullPathFileName,
"casspactor",
"bills_nc",
TestBaseSSTableFunSuite.HADOOP_CONF);
final CFMetaData inputCFMetaData = reader.getCfMetaData();
final ISSTableScanner currentScanner = reader.getSSTableScanner();
final SSTableReader inputSStable = reader.getSstableReader();
//Create writer
final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(inputSSTableDescriptor, inputCFMetaData);
writer = SSTableUtils.createSSTableWriter(inputSSTableDescriptor, outputCFMetaData, inputSStable);
while (currentScanner.hasNext()) {
final UnfilteredRowIterator row = currentScanner.next();
writer.append(row);
}
writer.finish(false);
} catch (IOException e) {
e.printStackTrace(System.err);
} finally {
FileUtils.closeQuietly(writer);
}
}
示例11: releaseIndexes
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private void releaseIndexes(List<RangeIterator<Long, Token>> indexes)
{
if (indexes == null)
return;
for (RangeIterator<Long, Token> index : indexes)
FileUtils.closeQuietly(index);
}
示例12: release
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
public void release()
{
int n = references.decrementAndGet();
if (n == 0)
{
FileUtils.closeQuietly(index);
sstable.releaseReference();
if (obsolete.get() || sstable.isMarkedCompacted())
FileUtils.delete(index.getIndexPath());
}
}
示例13: createSSLContext
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
public static SSLContext createSSLContext(EncryptionOptions options, boolean buildTruststore) throws IOException
{
FileInputStream tsf = null;
FileInputStream ksf = null;
SSLContext ctx;
try
{
ctx = SSLContext.getInstance(options.protocol);
TrustManager[] trustManagers = null;
if(buildTruststore)
{
tsf = new FileInputStream(options.truststore);
TrustManagerFactory tmf = TrustManagerFactory.getInstance(options.algorithm);
KeyStore ts = KeyStore.getInstance(options.store_type);
ts.load(tsf, options.truststore_password.toCharArray());
tmf.init(ts);
trustManagers = tmf.getTrustManagers();
}
ksf = new FileInputStream(options.keystore);
KeyManagerFactory kmf = KeyManagerFactory.getInstance(options.algorithm);
KeyStore ks = KeyStore.getInstance(options.store_type);
ks.load(ksf, options.keystore_password.toCharArray());
kmf.init(ks, options.keystore_password.toCharArray());
ctx.init(kmf.getKeyManagers(), trustManagers, null);
}
catch (Exception e)
{
throw new IOException("Error creating the initializing the SSL Context", e);
}
finally
{
FileUtils.closeQuietly(tsf);
FileUtils.closeQuietly(ksf);
}
return ctx;
}
示例14: loadSummary
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Load index summary from Summary.db file if it exists.
*
* if loaded index summary has different index interval from current value stored in schema,
* then Summary.db file will be deleted and this returns false to rebuild summary.
*
* @param ibuilder
* @param dbuilder
* @return true if index summary is loaded successfully from Summary.db file.
*/
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
indexSummary = IndexSummary.serializer.deserialize(
iStream, partitioner, descriptor.version.hasSamplingLevel,
metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
if (indexSummary != null)
indexSummary.close();
logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
// corrupted; delete it and fall back to creating a new summary
FileUtils.closeQuietly(iStream);
// delete it and fall back to creating a new summary
FileUtils.deleteWithConfirm(summariesFile);
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
示例15: firstKeyBeyond
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Finds and returns the first key beyond a given token in this SSTable or null if no such key exists.
*/
public DecoratedKey firstKeyBeyond(RowPosition token)
{
if (token.compareTo(first) < 0)
return first;
long sampledPosition = getIndexScanPosition(token);
Iterator<FileDataInput> segments = ifile.iterator(sampledPosition);
while (segments.hasNext())
{
FileDataInput in = segments.next();
try
{
while (!in.isEOF())
{
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(in);
DecoratedKey indexDecoratedKey = partitioner.decorateKey(indexKey);
if (indexDecoratedKey.compareTo(token) > 0)
return indexDecoratedKey;
RowIndexEntry.Serializer.skip(in);
}
}
catch (IOException e)
{
markSuspect();
throw new CorruptSSTableException(e, in.getPath());
}
finally
{
FileUtils.closeQuietly(in);
}
}
return null;
}