本文整理汇总了Java中org.apache.hadoop.io.erasurecode.ECChunk类的典型用法代码示例。如果您正苦于以下问题:Java ECChunk类的具体用法?Java ECChunk怎么用?Java ECChunk使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ECChunk类属于org.apache.hadoop.io.erasurecode包,在下文中一共展示了ECChunk类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testInputPosition
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Tests that the input buffer's position is moved to the end after
* encode/decode.
*/
protected void testInputPosition(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders();
prepareBufferAllocator(false);
// verify encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
ECChunk[] parityChunks = prepareParityChunksForEncoding();
ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
encoder.encode(dataChunks, parityChunks);
verifyBufferPositionAtEnd(dataChunks);
// verify decode
backupAndEraseChunks(clonedDataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
clonedDataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
verifyBufferPositionAtEnd(inputChunks);
}
示例2: testCoding
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders();
prepareBufferAllocator(true);
setAllowChangeInputs(false);
// Generate data and encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
markChunks(dataChunks);
ECChunk[] parityChunks = prepareParityChunksForEncoding();
encoder.encode(dataChunks, parityChunks);
compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));
// Decode
restoreChunksFromMark(dataChunks);
backupAndEraseChunks(dataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
dataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
}
示例3: toBuffers
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Convert an array of this chunks to an array of ByteBuffers
* @param chunks chunks to convertToByteArrayState into buffers
* @return an array of ByteBuffers
*/
static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for (int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk == null) {
buffers[i] = null;
} else {
buffers[i] = chunk.getBuffer();
if (chunk.isAllZero()) {
CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
}
}
}
return buffers;
}
示例4: testInputPosition
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Tests that the input buffer's position is moved to the end after
* encode/decode.
*/
protected void testInputPosition(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
prepareBufferAllocator(false);
// verify encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
ECChunk[] parityChunks = prepareParityChunksForEncoding();
ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
encoder.encode(dataChunks, parityChunks);
verifyBufferPositionAtEnd(dataChunks);
// verify decode
backupAndEraseChunks(clonedDataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
clonedDataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
verifyBufferPositionAtEnd(inputChunks);
}
示例5: testCoding
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
prepareBufferAllocator(true);
setAllowChangeInputs(false);
// Generate data and encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
markChunks(dataChunks);
ECChunk[] parityChunks = prepareParityChunksForEncoding();
encoder.encode(dataChunks, parityChunks);
compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));
// Decode
restoreChunksFromMark(dataChunks);
backupAndEraseChunks(dataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
dataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
}
示例6: performCoding
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
public void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks) {
if (erasedIndexes.length == 0) {
return;
}
ByteBuffer[] inputBuffers = ECChunk.toBuffers(inputChunks);
ByteBuffer[] outputBuffers = ECChunk.toBuffers(outputChunks);
performCoding(inputBuffers, outputBuffers);
}
示例7: dumpChunks
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Print data in hex format in an array of chunks.
* @param header
* @param chunks
*/
public static void dumpChunks(String header, ECChunk[] chunks) {
System.out.println();
System.out.println(header);
for (int i = 0; i < chunks.length; i++) {
dumpChunk(chunks[i]);
}
System.out.println();
}
示例8: dumpChunk
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Print data in hex format in a chunk.
* @param chunk
*/
public static void dumpChunk(ECChunk chunk) {
String str;
if (chunk == null) {
str = "<EMPTY>";
} else {
byte[] bytes = chunk.toBytesArray();
str = DumpUtil.bytesToHex(bytes, 16);
}
System.out.println(str);
}
示例9: decode
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
public void decode(ECChunk[] inputs, int[] erasedIndexes,
ECChunk[] outputs) {
ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
decode(newInputs, erasedIndexes, newOutputs);
}
示例10: performCodingStep
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length * subPacketSize];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length * subPacketSize];
for (int i = 0; i < numChunksInBlock; i += subPacketSize) {
// Pretend that we're reading input chunks from input blocks.
for (int k = 0; k < subPacketSize; ++k) {
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[k * inputBlocks.length + j] = ((TestBlock)
inputBlocks[j]).chunks[i + k];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[k * outputBlocks.length + j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i + k] =
outputChunks[k * outputBlocks.length + j];
}
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
示例11: performCodingStep
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* This is typically how a coding step should be performed.
* @param codingStep
*/
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length];
for (int i = 0; i < numChunksInBlock; ++i) {
// Pretend that we're reading input chunks from input blocks.
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[j] = ((TestBlock) inputBlocks[j]).chunks[i];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i] = outputChunks[j];
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
示例12: generateDataBlock
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
* Generate random data and return a data block.
* @return
*/
protected ECBlock generateDataBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
for (int i = 0; i < numChunksInBlock; ++i) {
chunks[i] = generateDataChunk();
}
return new TestBlock(chunks);
}
示例13: ensureOnlyLeastRequiredChunks
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
protected void ensureOnlyLeastRequiredChunks(ECChunk[] inputChunks) {
int leastRequiredNum = numDataUnits;
int erasedNum = erasedDataIndexes.length + erasedParityIndexes.length;
int goodNum = inputChunks.length - erasedNum;
int redundantNum = goodNum - leastRequiredNum;
for (int i = 0; i < inputChunks.length && redundantNum > 0; i++) {
if (inputChunks[i] != null) {
inputChunks[i] = null; // Setting it null, not needing it actually
redundantNum--;
}
}
}
示例14: verifyBufferPositionAtEnd
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
private void verifyBufferPositionAtEnd(ECChunk[] inputChunks) {
for (ECChunk chunk : inputChunks) {
if (chunk != null) {
Assert.assertEquals(0, chunk.getBuffer().remaining());
}
}
}
示例15: getEmptyChunks
import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
private ECChunk[] getEmptyChunks(int num) {
ECChunk[] chunks = new ECChunk[num];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = new ECChunk(ByteBuffer.wrap(getZeroChunkBytes()));
}
return chunks;
}