本文整理汇总了Java中org.apache.cassandra.io.util.FileUtils.clean方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtils.clean方法的具体用法?Java FileUtils.clean怎么用?Java FileUtils.clean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.FileUtils
的用法示例。
在下文中一共展示了FileUtils.clean方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ensureCapacity
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Ensure {@code buf} is large enough for {@code outputLength}. If not, it is cleaned up and a new buffer is allocated;
* else; buffer has it's position/limit set appropriately.
*
* @param buf buffer to test the size of; may be null, in which case, a new buffer is allocated.
* @param outputLength the minimum target size of the buffer
* @param allowBufferResize true if resizing (reallocating) the buffer is allowed
* @param bufferType on- or off- heap byte buffer
* @return {@code buf} if it was large enough, else a newly allocated buffer.
*/
public static ByteBuffer ensureCapacity(ByteBuffer buf, int outputLength, boolean allowBufferResize, BufferType bufferType)
{
if (0 > outputLength)
throw new IllegalArgumentException("invalid size for output buffer: " + outputLength);
if (buf == null || buf.capacity() < outputLength)
{
if (!allowBufferResize)
throw new IllegalStateException(String.format("output buffer is not large enough for data: current capacity %d, required %d", buf.capacity(), outputLength));
FileUtils.clean(buf);
buf = bufferType.allocate(outputLength);
}
else
{
buf.position(0).limit(outputLength);
}
return buf;
}
示例2: close
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
/**
* Close the segment file.
*/
public void close()
{
if (closed)
return;
try
{
FileUtils.clean(buffer);
logFileAccessor.close();
closed = true;
}
catch (IOException e)
{
throw new FSWriteError(e, getPath());
}
}
示例3: close
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Override
public void close()
{
if (!FileUtils.isCleanerAvailable())
return;
/*
* Try forcing the unmapping of pages using undocumented unsafe sun APIs.
* If this fails (non Sun JVM), we'll have to wait for the GC to finalize the mapping.
* If this works and a thread tries to access any page, hell will unleash on earth.
*/
try
{
for (MappedByteBuffer segment : pages)
FileUtils.clean(segment);
}
catch (Exception e)
{
// This is not supposed to happen
}
}
示例4: internalClose
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Override
protected void internalClose()
{
usedBuffers.decrementAndGet();
try {
if (bufferPool.size() < MAX_BUFFERPOOL_SIZE)
bufferPool.add(buffer);
else
FileUtils.clean(buffer);
super.internalClose();
}
finally
{
onClose.run();
}
}
示例5: put
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
public void put(ByteBuffer buffer)
{
Chunk chunk = Chunk.getParentChunk(buffer);
if (chunk == null)
{
FileUtils.clean(buffer);
return;
}
LocalPool owner = chunk.owner;
// ask the free method to take exclusive ownership of the act of recycling
// if we are either: already not owned by anyone, or owned by ourselves
long free = chunk.free(buffer, owner == null | owner == this);
if (free == 0L)
{
// 0L => we own recycling responsibility, so must recycle;
chunk.recycle();
// if we are also the owner, we must remove the Chunk from our local queue
if (owner == this)
removeFromLocalQueue(chunk);
}
else if (((free == -1L) && owner != this) && chunk.owner == null)
{
// although we try to take recycle ownership cheaply, it is not always possible to do so if the owner is racing to unset.
// we must also check after completely freeing if the owner has since been unset, and try to recycle
chunk.tryRecycle();
}
}
示例6: reset
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@VisibleForTesting
void reset()
{
Chunk parent = getParentChunk(slab);
if (parent != null)
parent.free(slab, false);
else
FileUtils.clean(slab);
}
示例7: doPreCleanup
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Override
protected Throwable doPreCleanup(Throwable accumulate)
{
accumulate = super.doPreCleanup(accumulate);
if (compressed != null)
{
try { FileUtils.clean(compressed); }
catch (Throwable t) { accumulate = merge(accumulate, t); }
compressed = null;
}
return accumulate;
}
示例8: internalClose
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
void internalClose()
{
try
{
if (FileUtils.isCleanerAvailable())
FileUtils.clean(buffer);
logFileAccessor.close();
}
catch (IOException e)
{
throw new FSWriteError(e, getPath());
}
}
示例9: migrateLegacyHints
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private void migrateLegacyHints()
{
ByteBuffer buffer = ByteBuffer.allocateDirect(256 * 1024);
String query = String.format("SELECT DISTINCT target_id FROM %s.%s", SystemKeyspace.NAME, SystemKeyspace.LEGACY_HINTS);
//noinspection ConstantConditions
QueryProcessor.executeInternal(query).forEach(row -> migrateLegacyHints(row.getUUID("target_id"), buffer));
FileUtils.clean(buffer);
}
示例10: internalClose
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
@Override
protected void internalClose()
{
if (FileUtils.isCleanerAvailable())
FileUtils.clean(buffer);
super.internalClose();
}
示例11: write
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
private void write(HintsWriter writer, long timestamp) throws IOException
{
ByteBuffer buffer = ByteBuffer.allocateDirect(256 * 1024);
try (HintsWriter.Session session = writer.newSession(buffer))
{
write(session, timestamp);
}
FileUtils.clean(buffer);
}
示例12: free
import org.apache.cassandra.io.util.FileUtils; //导入方法依赖的package包/类
void free()
{
FileUtils.clean(slab);
}