当前位置: 首页>>代码示例>>Java>>正文


Java ECChunk类代码示例

本文整理汇总了Java中org.apache.hadoop.io.erasurecode.ECChunk的典型用法代码示例。如果您正苦于以下问题:Java ECChunk类的具体用法?Java ECChunk怎么用?Java ECChunk使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ECChunk类属于org.apache.hadoop.io.erasurecode包,在下文中一共展示了ECChunk类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testInputPosition

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Tests that the input buffer's position is moved to the end after
 * encode/decode.
 */
protected void testInputPosition(boolean usingDirectBuffer) {
  this.usingDirectBuffer = usingDirectBuffer;
  prepareCoders();
  prepareBufferAllocator(false);

  // verify encode
  ECChunk[] dataChunks = prepareDataChunksForEncoding();
  ECChunk[] parityChunks = prepareParityChunksForEncoding();
  ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
  encoder.encode(dataChunks, parityChunks);
  verifyBufferPositionAtEnd(dataChunks);

  // verify decode
  backupAndEraseChunks(clonedDataChunks, parityChunks);
  ECChunk[] inputChunks = prepareInputChunksForDecoding(
      clonedDataChunks, parityChunks);
  ensureOnlyLeastRequiredChunks(inputChunks);
  ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
  decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
  verifyBufferPositionAtEnd(inputChunks);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestRawCoderBase.java

示例2: testCoding

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void testCoding(boolean usingDirectBuffer) {
  this.usingDirectBuffer = usingDirectBuffer;
  prepareCoders();

  prepareBufferAllocator(true);
  setAllowChangeInputs(false);

  // Generate data and encode
  ECChunk[] dataChunks = prepareDataChunksForEncoding();
  markChunks(dataChunks);
  ECChunk[] parityChunks = prepareParityChunksForEncoding();
  encoder.encode(dataChunks, parityChunks);
  compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));

  // Decode
  restoreChunksFromMark(dataChunks);
  backupAndEraseChunks(dataChunks, parityChunks);
  ECChunk[] inputChunks = prepareInputChunksForDecoding(
      dataChunks, parityChunks);
  ensureOnlyLeastRequiredChunks(inputChunks);
  ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
  decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
  compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestDummyRawCoder.java

示例3: toBuffers

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Convert an array of this chunks to an array of ByteBuffers
 * @param chunks chunks to convertToByteArrayState into buffers
 * @return an array of ByteBuffers
 */
static ByteBuffer[] toBuffers(ECChunk[] chunks) {
  ByteBuffer[] buffers = new ByteBuffer[chunks.length];

  ECChunk chunk;
  for (int i = 0; i < chunks.length; i++) {
    chunk = chunks[i];
    if (chunk == null) {
      buffers[i] = null;
    } else {
      buffers[i] = chunk.getBuffer();
      if (chunk.isAllZero()) {
        CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
      }
    }
  }

  return buffers;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:CoderUtil.java

示例4: testInputPosition

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Tests that the input buffer's position is moved to the end after
 * encode/decode.
 */
protected void testInputPosition(boolean usingDirectBuffer) {
  this.usingDirectBuffer = usingDirectBuffer;
  prepareCoders(true);
  prepareBufferAllocator(false);

  // verify encode
  ECChunk[] dataChunks = prepareDataChunksForEncoding();
  ECChunk[] parityChunks = prepareParityChunksForEncoding();
  ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
  encoder.encode(dataChunks, parityChunks);
  verifyBufferPositionAtEnd(dataChunks);

  // verify decode
  backupAndEraseChunks(clonedDataChunks, parityChunks);
  ECChunk[] inputChunks = prepareInputChunksForDecoding(
      clonedDataChunks, parityChunks);
  ensureOnlyLeastRequiredChunks(inputChunks);
  ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
  decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
  verifyBufferPositionAtEnd(inputChunks);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestRawCoderBase.java

示例5: testCoding

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void testCoding(boolean usingDirectBuffer) {
  this.usingDirectBuffer = usingDirectBuffer;
  prepareCoders(true);

  prepareBufferAllocator(true);
  setAllowChangeInputs(false);

  // Generate data and encode
  ECChunk[] dataChunks = prepareDataChunksForEncoding();
  markChunks(dataChunks);
  ECChunk[] parityChunks = prepareParityChunksForEncoding();
  encoder.encode(dataChunks, parityChunks);
  compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));

  // Decode
  restoreChunksFromMark(dataChunks);
  backupAndEraseChunks(dataChunks, parityChunks);
  ECChunk[] inputChunks = prepareInputChunksForDecoding(
      dataChunks, parityChunks);
  ensureOnlyLeastRequiredChunks(inputChunks);
  ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
  decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
  compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestDummyRawCoder.java

示例6: performCoding

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
public void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks) {
  if (erasedIndexes.length == 0) {
    return;
  }

  ByteBuffer[] inputBuffers = ECChunk.toBuffers(inputChunks);
  ByteBuffer[] outputBuffers = ECChunk.toBuffers(outputChunks);
  performCoding(inputBuffers, outputBuffers);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:11,代码来源:HHXORErasureDecodingStep.java

示例7: dumpChunks

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Print data in hex format in an array of chunks.
 * @param header
 * @param chunks
 */
public static void dumpChunks(String header, ECChunk[] chunks) {
  System.out.println();
  System.out.println(header);
  for (int i = 0; i < chunks.length; i++) {
    dumpChunk(chunks[i]);
  }
  System.out.println();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:DumpUtil.java

示例8: dumpChunk

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Print data in hex format in a chunk.
 * @param chunk
 */
public static void dumpChunk(ECChunk chunk) {
  String str;
  if (chunk == null) {
    str = "<EMPTY>";
  } else {
    byte[] bytes = chunk.toBytesArray();
    str = DumpUtil.bytesToHex(bytes, 16);
  }
  System.out.println(str);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:15,代码来源:DumpUtil.java

示例9: decode

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
public void decode(ECChunk[] inputs, int[] erasedIndexes,
                   ECChunk[] outputs) {
  ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
  ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
  decode(newInputs, erasedIndexes, newOutputs);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:AbstractRawErasureDecoder.java

示例10: performCodingStep

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
@Override
protected void performCodingStep(ErasureCodingStep codingStep) {
  // Pretend that we're opening these input blocks and output blocks.
  ECBlock[] inputBlocks = codingStep.getInputBlocks();
  ECBlock[] outputBlocks = codingStep.getOutputBlocks();
  // We allocate input and output chunks accordingly.
  ECChunk[] inputChunks = new ECChunk[inputBlocks.length * subPacketSize];
  ECChunk[] outputChunks = new ECChunk[outputBlocks.length * subPacketSize];

  for (int i = 0; i < numChunksInBlock; i += subPacketSize) {
    // Pretend that we're reading input chunks from input blocks.
    for (int k = 0; k < subPacketSize; ++k) {
      for (int j = 0; j < inputBlocks.length; ++j) {
        inputChunks[k * inputBlocks.length + j] = ((TestBlock)
                inputBlocks[j]).chunks[i + k];
      }

      // Pretend that we allocate and will write output results to the blocks.
      for (int j = 0; j < outputBlocks.length; ++j) {
        outputChunks[k * outputBlocks.length + j] = allocateOutputChunk();
        ((TestBlock) outputBlocks[j]).chunks[i + k] =
                outputChunks[k * outputBlocks.length + j];
      }
    }

    // Given the input chunks and output chunk buffers, just call it !
    codingStep.performCoding(inputChunks, outputChunks);
  }

  codingStep.finish();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:32,代码来源:TestHHErasureCoderBase.java

示例11: performCodingStep

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * This is typically how a coding step should be performed.
 * @param codingStep
 */
protected void performCodingStep(ErasureCodingStep codingStep) {
  // Pretend that we're opening these input blocks and output blocks.
  ECBlock[] inputBlocks = codingStep.getInputBlocks();
  ECBlock[] outputBlocks = codingStep.getOutputBlocks();
  // We allocate input and output chunks accordingly.
  ECChunk[] inputChunks = new ECChunk[inputBlocks.length];
  ECChunk[] outputChunks = new ECChunk[outputBlocks.length];

  for (int i = 0; i < numChunksInBlock; ++i) {
    // Pretend that we're reading input chunks from input blocks.
    for (int j = 0; j < inputBlocks.length; ++j) {
      inputChunks[j] = ((TestBlock) inputBlocks[j]).chunks[i];
    }

    // Pretend that we allocate and will write output results to the blocks.
    for (int j = 0; j < outputBlocks.length; ++j) {
      outputChunks[j] = allocateOutputChunk();
      ((TestBlock) outputBlocks[j]).chunks[i] = outputChunks[j];
    }

    // Given the input chunks and output chunk buffers, just call it !
    codingStep.performCoding(inputChunks, outputChunks);
  }

  codingStep.finish();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:TestErasureCoderBase.java

示例12: generateDataBlock

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
/**
 * Generate random data and return a data block.
 * @return
 */
protected ECBlock generateDataBlock() {
  ECChunk[] chunks = new ECChunk[numChunksInBlock];

  for (int i = 0; i < numChunksInBlock; ++i) {
    chunks[i] = generateDataChunk();
  }

  return new TestBlock(chunks);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:TestErasureCoderBase.java

示例13: ensureOnlyLeastRequiredChunks

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
protected void ensureOnlyLeastRequiredChunks(ECChunk[] inputChunks) {
  int leastRequiredNum = numDataUnits;
  int erasedNum = erasedDataIndexes.length + erasedParityIndexes.length;
  int goodNum = inputChunks.length - erasedNum;
  int redundantNum = goodNum - leastRequiredNum;

  for (int i = 0; i < inputChunks.length && redundantNum > 0; i++) {
    if (inputChunks[i] != null) {
      inputChunks[i] = null; // Setting it null, not needing it actually
      redundantNum--;
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:TestRawCoderBase.java

示例14: verifyBufferPositionAtEnd

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
private void verifyBufferPositionAtEnd(ECChunk[] inputChunks) {
  for (ECChunk chunk : inputChunks) {
    if (chunk != null) {
      Assert.assertEquals(0, chunk.getBuffer().remaining());
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:TestRawCoderBase.java

示例15: getEmptyChunks

import org.apache.hadoop.io.erasurecode.ECChunk; //导入依赖的package包/类
private ECChunk[] getEmptyChunks(int num) {
  ECChunk[] chunks = new ECChunk[num];
  for (int i = 0; i < chunks.length; i++) {
    chunks[i] = new ECChunk(ByteBuffer.wrap(getZeroChunkBytes()));
  }
  return chunks;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:TestDummyRawCoder.java


注:本文中的org.apache.hadoop.io.erasurecode.ECChunk类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。