本文整理汇总了Java中org.apache.cassandra.io.util.FileUtils.deleteWithConfirm方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtils.deleteWithConfirm方法的具体用法?Java FileUtils.deleteWithConfirm怎么用?Java FileUtils.deleteWithConfirm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.FileUtils
的用法示例。
在下文中一共展示了FileUtils.deleteWithConfirm方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: delete
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* We use a ReferenceQueue to manage deleting files that have been compacted
* and for which no more SSTable references exist. But this is not guaranteed
* to run for each such file because of the semantics of the JVM gc. So,
* we write a marker to `compactedFilename` when a file is compacted;
* if such a marker exists on startup, the file should be removed.
*
* This method will also remove SSTables that are marked as temporary.
*
* @return true if the file was deleted
*/
public static boolean delete(Descriptor desc, Set<Component> components)
{
// remove the DATA component first if it exists
if (components.contains(Component.DATA))
FileUtils.deleteWithConfirm(desc.filenameFor(Component.DATA));
for (Component component : components)
{
if (component.equals(Component.DATA) || component.equals(Component.SUMMARY))
continue;
FileUtils.deleteWithConfirm(desc.filenameFor(component));
}
if (components.contains(Component.SUMMARY))
FileUtils.delete(desc.filenameFor(Component.SUMMARY));
logger.trace("Deleted {}", desc);
return true;
}
示例2: recycleSegment
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Differs from the above because it can work on any file instead of just existing
* commit log segments managed by this manager.
*
* @param file segment file that is no longer in use.
*/
void recycleSegment(final File file)
{
if (isCapExceeded()
|| CommitLogDescriptor.fromFileName(file.getName()).getMessagingVersion() != MessagingService.current_version)
{
// (don't decrease managed size, since this was never a "live" segment)
logger.debug("(Unopened) segment {} is no longer needed and will be deleted now", file);
FileUtils.deleteWithConfirm(file);
return;
}
logger.debug("Recycling {}", file);
// this wasn't previously a live segment, so add it to the managed size when we make it live
size.addAndGet(DatabaseDescriptor.getCommitLogSegmentSize());
segmentManagementTasks.add(new Callable<CommitLogSegment>()
{
public CommitLogSegment call()
{
return new CommitLogSegment(file.getPath());
}
});
}
示例3: delete
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* We use a ReferenceQueue to manage deleting files that have been compacted
* and for which no more SSTable references exist. But this is not guaranteed
* to run for each such file because of the semantics of the JVM gc. So,
* we write a marker to `compactedFilename` when a file is compacted;
* if such a marker exists on startup, the file should be removed.
*
* This method will also remove SSTables that are marked as temporary.
*
* @return true if the file was deleted
*/
public static boolean delete(Descriptor desc, Set<Component> components)
{
// remove the DATA component first if it exists
if (components.contains(Component.DATA))
FileUtils.deleteWithConfirm(desc.filenameFor(Component.DATA));
for (Component component : components)
{
if (component.equals(Component.DATA) || component.equals(Component.SUMMARY))
continue;
FileUtils.deleteWithConfirm(desc.filenameFor(component));
}
FileUtils.delete(desc.filenameFor(Component.SUMMARY));
logger.debug("Deleted {}", desc);
return true;
}
示例4: recycleSegment
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Differs from the above because it can work on any file instead of just existing
* commit log segments managed by this allocator.
*
* @param file segment file that is no longer in use.
*/
public void recycleSegment(final File file)
{
// check against SEGMENT_SIZE avoids recycling odd-sized or empty segments from old C* versions and unit tests
if (isCapExceeded() || file.length() != DatabaseDescriptor.getCommitLogSegmentSize()
|| CommitLogDescriptor.fromFileName(file.getName()).getMessagingVersion() != MessagingService.current_version)
{
// (don't decrease managed size, since this was never a "live" segment)
logger.debug("(Unopened) segment {} is no longer needed and will be deleted now", file);
FileUtils.deleteWithConfirm(file);
return;
}
logger.debug("Recycling {}", file);
// this wasn't previously a live segment, so add it to the managed size when we make it live
size.addAndGet(DatabaseDescriptor.getCommitLogSegmentSize());
queue.add(new Runnable()
{
public void run()
{
CommitLogSegment segment = new CommitLogSegment(file.getPath());
internalAddReadySegment(segment);
}
});
}
示例5: delete
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* We use a ReferenceQueue to manage deleting files that have been compacted
* and for which no more SSTable references exist. But this is not guaranteed
* to run for each such file because of the semantics of the JVM gc. So,
* we write a marker to `compactedFilename` when a file is compacted;
* if such a marker exists on startup, the file should be removed.
*
* This method will also remove SSTables that are marked as temporary.
*
* @return true if the file was deleted
*/
public static boolean delete(Descriptor desc, Set<Component> components)
{
// remove the DATA component first if it exists
if (components.contains(Component.DATA))
FileUtils.deleteWithConfirm(desc.filenameFor(Component.DATA));
for (Component component : components)
{
if (component.equals(Component.DATA) || component.equals(Component.COMPACTED_MARKER) || component.equals(Component.SUMMARY))
continue;
FileUtils.deleteWithConfirm(desc.filenameFor(component));
}
// remove the COMPACTED_MARKER component last if it exists
// Note: newly created sstable should not have a marker, but we keep this for now to make sure
// we don't leave older marker around
FileUtils.delete(desc.filenameFor(Component.COMPACTED_MARKER));
FileUtils.delete(desc.filenameFor(Component.SUMMARY));
logger.debug("Deleted {}", desc);
return true;
}
示例6: loadSummary
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Load index summary from Summary.db file if it exists.
*
* if loaded index summary has different index interval from current value stored in schema,
* then Summary.db file will be deleted and this returns false to rebuild summary.
*
* @param ibuilder
* @param dbuilder
* @return true if index summary is loaded successfully from Summary.db file.
*/
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
indexSummary = IndexSummary.serializer.deserialize(
iStream, partitioner, descriptor.version.hasSamplingLevel,
metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
if (indexSummary != null)
indexSummary.close();
logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
// corrupted; delete it and fall back to creating a new summary
FileUtils.closeQuietly(iStream);
// delete it and fall back to creating a new summary
FileUtils.deleteWithConfirm(summariesFile);
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
示例7: testMissingHeader
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Test
public void testMissingHeader() throws IOException
{
Keyspace keyspace1 = Keyspace.open("Keyspace1");
Keyspace keyspace2 = Keyspace.open("Keyspace2");
Mutation rm;
DecoratedKey dk = Util.dk("keymulti");
ColumnFamily cf;
cf = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
cf.addColumn(column("col1", "val1", 1L));
rm = new Mutation("Keyspace1", dk.getKey(), cf);
rm.apply();
cf = ArrayBackedSortedColumns.factory.create("Keyspace2", "Standard3");
cf.addColumn(column("col2", "val2", 1L));
rm = new Mutation("Keyspace2", dk.getKey(), cf);
rm.apply();
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
{
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
CommitLog.instance.recover();
assertColumns(Util.getColumnFamily(keyspace1, dk, "Standard1"), "col1");
assertColumns(Util.getColumnFamily(keyspace2, dk, "Standard3"), "col2");
}
示例8: discard
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Completely discards a segment file by deleting it. (Potentially blocking operation)
*/
public void discard(boolean deleteFile)
{
// TODO shouldn't we close the file when we're done writing to it, which comes (potentially) much earlier than it's eligible for recyling?
close();
if (deleteFile)
FileUtils.deleteWithConfirm(logFile);
}
示例9: testMissingHeader
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Test
public void testMissingHeader() throws IOException, ExecutionException, InterruptedException
{
Keyspace keyspace1 = Keyspace.open("Keyspace1");
Keyspace keyspace2 = Keyspace.open("Keyspace2");
RowMutation rm;
DecoratedKey dk = Util.dk("keymulti");
ColumnFamily cf;
cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "Standard1");
cf.addColumn(column("col1", "val1", 1L));
rm = new RowMutation("Keyspace1", dk.key, cf);
rm.apply();
cf = TreeMapBackedSortedColumns.factory.create("Keyspace2", "Standard3");
cf.addColumn(column("col2", "val2", 1L));
rm = new RowMutation("Keyspace2", dk.key, cf);
rm.apply();
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
{
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
CommitLog.instance.recover();
assertColumns(Util.getColumnFamily(keyspace1, dk, "Standard1"), "col1");
assertColumns(Util.getColumnFamily(keyspace2, dk, "Standard3"), "col2");
}
示例10: recycleSegment
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Differs from the above because it can work on any file instead of just existing
* commit log segments managed by this manager.
*
* @param file segment file that is no longer in use.
*/
void recycleSegment(final File file)
{
// (don't decrease managed size, since this was never a "live" segment)
logger.trace("(Unopened) segment {} is no longer needed and will be deleted now", file);
FileUtils.deleteWithConfirm(file);
}
示例11: discard
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Completely discards a segment file by deleting it. (Potentially blocking operation)
*/
void discard(boolean deleteFile)
{
close();
if (deleteFile)
FileUtils.deleteWithConfirm(logFile);
commitLog.allocator.addSize(-onDiskSize());
}
示例12: testMissingHeader
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Test
public void testMissingHeader() throws IOException
{
Keyspace keyspace1 = Keyspace.open(KEYSPACE1);
Keyspace keyspace2 = Keyspace.open(KEYSPACE2);
DecoratedKey dk = Util.dk("keymulti");
UnfilteredRowIterator upd1 = Util.apply(new RowUpdateBuilder(keyspace1.getColumnFamilyStore(CF_STANDARD1).metadata, 1L, 0, "keymulti")
.clustering("col1").add("val", "1")
.build());
UnfilteredRowIterator upd2 = Util.apply(new RowUpdateBuilder(keyspace2.getColumnFamilyStore(CF_STANDARD3).metadata, 1L, 0, "keymulti")
.clustering("col1").add("val", "1")
.build());
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
{
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(false);
Assert.assertTrue(Util.equal(upd1, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace1.getColumnFamilyStore(CF_STANDARD1), dk).build()).unfilteredIterator()));
Assert.assertTrue(Util.equal(upd2, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace2.getColumnFamilyStore(CF_STANDARD3), dk).build()).unfilteredIterator()));
}
示例13: delete
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Completely discards a segment file by deleting it. (Potentially blocking operation)
*/
void delete()
{
FileUtils.deleteWithConfirm(logFile);
}
示例14: load
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
* @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
* avoid persisting it to disk by setting this to false
*/
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
boolean builtSummary = false;
if (recreateBloomFilter || !summaryLoaded)
{
buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
builtSummary = true;
}
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
// Check for an index summary that was downsampled even though the serialization format doesn't support
// that. If it was downsampled, rebuild it. See CASSANDRA-8993 for details.
if (!descriptor.version.hasSamplingLevel && !builtSummary && !validateSummarySamplingLevel())
{
indexSummary.close();
ifile.close();
dfile.close();
logger.info("Detected erroneously downsampled index summary; will rebuild summary at full sampling");
FileUtils.deleteWithConfirm(new File(descriptor.filenameFor(Component.SUMMARY)));
ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
buildSummary(false, ibuilder, dbuilder, false, Downsampling.BASE_SAMPLING_LEVEL);
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
saveSummary(ibuilder, dbuilder);
}
else if (saveSummaryIfCreated && builtSummary)
{
saveSummary(ibuilder, dbuilder);
}
}