當前位置: 首頁>>代碼示例>>Java>>正文


Java BytesWritable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.BytesWritable的典型用法代碼示例。如果您正苦於以下問題:Java BytesWritable類的具體用法?Java BytesWritable怎麽用?Java BytesWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


BytesWritable類屬於org.apache.hadoop.io包,在下文中一共展示了BytesWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: doVerify

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
protected void doVerify(Configuration conf, HTableDescriptor htd) throws Exception {
  Path outputDir = getTestDir(TEST_NAME, "verify-output");
  LOG.info("Verify output dir: " + outputDir);

  Job job = Job.getInstance(conf);
  job.setJarByClass(this.getClass());
  job.setJobName(TEST_NAME + " Verification for " + htd.getTableName());
  setJobScannerConf(job);

  Scan scan = new Scan();

  TableMapReduceUtil.initTableMapperJob(
      htd.getTableName().getNameAsString(), scan, VerifyMapper.class,
      BytesWritable.class, BytesWritable.class, job);
  TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
  int scannerCaching = conf.getInt("verify.scannercaching", SCANNER_CACHING);
  TableMapReduceUtil.setScannerCaching(job, scannerCaching);

  job.setReducerClass(VerifyReducer.class);
  job.setNumReduceTasks(conf.getInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT));
  FileOutputFormat.setOutputPath(job, outputDir);
  assertTrue(job.waitForCompletion(true));

  long numOutputRecords = job.getCounters().findCounter(Counters.ROWS_WRITTEN).getValue();
  assertEquals(0, numOutputRecords);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:IntegrationTestLoadAndVerify.java

示例2: readObject

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
private void readObject(Writable obj) throws IOException {
  int numBytes = WritableUtils.readVInt(inStream);
  byte[] buffer;
  // For BytesWritable and Text, use the specified length to set the length
  // this causes the "obvious" translations to work. So that if you emit
  // a string "abc" from C++, it shows up as "abc".
  if (obj instanceof BytesWritable) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((BytesWritable) obj).set(buffer, 0, numBytes);
  } else if (obj instanceof Text) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((Text) obj).set(buffer);
  } else {
    obj.readFields(inStream);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:BinaryProtocol.java

示例3: getValue

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:25,代碼來源:TFile.java

示例4: SeqFileAppendable

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize,
    String compress, int minBlkSize) throws IOException {
  Configuration conf = new Configuration();

  CompressionCodec codec = null;
  if ("lzo".equals(compress)) {
    codec = Compression.Algorithm.LZO.getCodec();
  }
  else if ("gz".equals(compress)) {
    codec = Compression.Algorithm.GZ.getCodec();
  }
  else if (!"none".equals(compress))
    throw new IOException("Codec not supported.");

  this.fsdos = fs.create(path, true, osBufferSize);

  if (!"none".equals(compress)) {
    writer =
        SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
            BytesWritable.class, SequenceFile.CompressionType.BLOCK, codec);
  }
  else {
    writer =
        SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
            BytesWritable.class, SequenceFile.CompressionType.NONE, null);
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:28,代碼來源:TestTFileSeqFileComparison.java

示例5: testNestedIterable

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
public void testNestedIterable() throws Exception {
  Random r = new Random();
  Writable[] writs = {
    new BooleanWritable(r.nextBoolean()),
    new FloatWritable(r.nextFloat()),
    new FloatWritable(r.nextFloat()),
    new IntWritable(r.nextInt()),
    new LongWritable(r.nextLong()),
    new BytesWritable("dingo".getBytes()),
    new LongWritable(r.nextLong()),
    new IntWritable(r.nextInt()),
    new BytesWritable("yak".getBytes()),
    new IntWritable(r.nextInt())
  };
  TupleWritable sTuple = makeTuple(writs);
  assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestJoinTupleWritable.java

示例6: evaluate

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
  if (digest == null) {
    return null;
  }

  digest.reset();
  if (isStr) {
    Text n = getTextValue(arguments, 0, converters);
    if (n == null) {
      return null;
    }
    digest.update(n.getBytes(), 0, n.getLength());
  } else {
    BytesWritable bWr = getBinaryValue(arguments, 0, converters);
    if (bWr == null) {
      return null;
    }
    digest.update(bWr.getBytes(), 0, bWr.getLength());
  }
  byte[] resBin = digest.digest();
  String resStr = Hex.encodeHexString(resBin);

  output.set(resStr);
  return output;
}
 
開發者ID:myui,項目名稱:hive-udf-backports,代碼行數:27,代碼來源:GenericUDFSha2.java

示例7: next

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/**
 * Read raw bytes from a SequenceFile.
 */
public synchronized boolean next(BytesWritable key, BytesWritable val)
    throws IOException {
  if (done) return false;
  long pos = in.getPosition();
  boolean eof = -1 == in.nextRawKey(buffer);
  if (!eof) {
    key.set(buffer.getData(), 0, buffer.getLength());
    buffer.reset();
    in.nextRawValue(vbytes);
    vbytes.writeUncompressedBytes(buffer);
    val.set(buffer.getData(), 0, buffer.getLength());
    buffer.reset();
  }
  return !(done = (eof || (pos >= end && in.syncSeen())));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:SequenceFileAsBinaryInputFormat.java

示例8: resolve

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/**
 * Resolves a given identifier. This method has to be called before calling
 * any of the getters.
 */
public void resolve(String identifier) {
  if (identifier.equalsIgnoreCase(RAW_BYTES_ID)) {
    setInputWriterClass(RawBytesInputWriter.class);
    setOutputReaderClass(RawBytesOutputReader.class);
    setOutputKeyClass(BytesWritable.class);
    setOutputValueClass(BytesWritable.class);
  } else if (identifier.equalsIgnoreCase(TYPED_BYTES_ID)) {
    setInputWriterClass(TypedBytesInputWriter.class);
    setOutputReaderClass(TypedBytesOutputReader.class);
    setOutputKeyClass(TypedBytesWritable.class);
    setOutputValueClass(TypedBytesWritable.class);
  } else if (identifier.equalsIgnoreCase(KEY_ONLY_TEXT_ID)) {
    setInputWriterClass(KeyOnlyTextInputWriter.class);
    setOutputReaderClass(KeyOnlyTextOutputReader.class);
    setOutputKeyClass(Text.class);
    setOutputValueClass(NullWritable.class);
  } else { // assume TEXT_ID
    setInputWriterClass(TextInputWriter.class);
    setOutputReaderClass(TextOutputReader.class);
    setOutputKeyClass(Text.class);
    setOutputValueClass(Text.class);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:IdentifierResolver.java

示例9: run

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
@Override
public void run() {
  for (int i = 0; i < count; i++) {
    try {
      int byteSize = RANDOM.nextInt(BYTE_COUNT);
      byte[] bytes = new byte[byteSize];
      System.arraycopy(BYTES, 0, bytes, 0, byteSize);
      Writable param = new BytesWritable(bytes);
      client.call(param, address);
      Thread.sleep(RANDOM.nextInt(20));
    } catch (Exception e) {
      LOG.fatal("Caught Exception", e);
      failed = true;
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestIPCServerResponder.java

示例10: readFields

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
public void readFields(DataInput in) throws IOException {
  // After the RecordStartMark, we expect to get a SEGMENT_HEADER_ID (-1).
  long segmentId = WritableUtils.readVLong(in);
  if (SEGMENT_HEADER_ID != segmentId) {
    throw new IOException("Expected segment header id " + SEGMENT_HEADER_ID
        + "; got " + segmentId);
  }

  // Get the length of the rest of the segment, in bytes.
  long length = WritableUtils.readVLong(in);

  // Now read the actual main byte array.
  if (length > Integer.MAX_VALUE) {
    throw new IOException("Unexpected oversize data array length: "
        + length);
  } else if (length < 0) {
    throw new IOException("Unexpected undersize data array length: "
        + length);
  }
  byte [] segmentData = new byte[(int) length];
  in.readFully(segmentData);
  recordLenBytes = new BytesWritable(segmentData);

  reset(); // Reset the iterator allowing the user to yield offset/lengths.
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:26,代碼來源:LobFile.java

示例11: accept

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/** Filtering method
 * If MD5(key) % frequency==0, return true; otherwise return false
 * @see Filter#accept(Object)
 */
public boolean accept(Object key) {
  try {
    long hashcode;
    if (key instanceof Text) {
      hashcode = MD5Hashcode((Text)key);
    } else if (key instanceof BytesWritable) {
      hashcode = MD5Hashcode((BytesWritable)key);
    } else {
      ByteBuffer bb;
      bb = Text.encode(key.toString());
      hashcode = MD5Hashcode(bb.array(), 0, bb.limit());
    }
    if (hashcode / frequency * frequency == hashcode)
      return true;
  } catch(Exception e) {
    LOG.warn(e);
    throw new RuntimeException(e);
  }
  return false;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:SequenceFileInputFilter.java

示例12: reduce

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/**
 * {@inheritDoc}
 */
protected void reduce(final Text key, final Iterable<BytesWritable> values, final Context context) throws IOException, InterruptedException {
    final Configuration configuration = context.getConfiguration();
    final String sourcePath = configuration.get("compactionSourcePath");
    final String targetPath = configuration.get("compactionTargetPath");

    // Reducer stores data at the target directory retaining the directory structure of files
    String filePath = key.toString().replace(sourcePath, targetPath);
    if (key.toString().endsWith("/")) {
        filePath = filePath.concat("file");
    }

    log.info("Compaction output path {}", filePath);
    final URI uri = URI.create(filePath);
    final MultipleOutputs multipleOutputs = new MultipleOutputs<NullWritable, BytesWritable>(context);
    try {
        for (final BytesWritable text : values) {
            multipleOutputs.write(NullWritable.get(), text, uri.toString());
        }
    } finally {
        multipleOutputs.close();
    }
}
 
開發者ID:ExpediaInceCommercePlatform,項目名稱:dataSqueeze,代碼行數:26,代碼來源:BytesWritableCompactionReducer.java

示例13: readObject

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
  int numBytes = WritableUtils.readVInt(inStream);
  byte[] buffer;
  // For BytesWritable and Text, use the specified length to set the length
  // this causes the "obvious" translations to work. So that if you emit
  // a string "abc" from C++, it shows up as "abc".
  if (obj instanceof BytesWritable) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((BytesWritable) obj).set(buffer, 0, numBytes);
  } else if (obj instanceof Text) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((Text) obj).set(buffer);
  } else {
    obj.readFields(inStream);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:CommonStub.java

示例14: configureWaitingJobConf

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
/**
 * Configure a waiting job
 */
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
                                    Path outputPath, int numMaps, int numRed,
                                    String jobName, String mapSignalFilename,
                                    String redSignalFilename)
throws IOException {
  jobConf.setJobName(jobName);
  jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
  jobConf.setOutputFormat(SequenceFileOutputFormat.class);
  FileInputFormat.setInputPaths(jobConf, inDir);
  FileOutputFormat.setOutputPath(jobConf, outputPath);
  jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
  jobConf.setReducerClass(IdentityReducer.class);
  jobConf.setOutputKeyClass(BytesWritable.class);
  jobConf.setOutputValueClass(BytesWritable.class);
  jobConf.setInputFormat(RandomInputFormat.class);
  jobConf.setNumMapTasks(numMaps);
  jobConf.setNumReduceTasks(numRed);
  jobConf.setJar("build/test/mapred/testjar/testjob.jar");
  jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
  jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:UtilsForTests.java

示例15: createFiles

import org.apache.hadoop.io.BytesWritable; //導入依賴的package包/類
private static void createFiles(int length, int numFiles, Random random,
  Job job) throws IOException {
  Range[] ranges = createRanges(length, numFiles, random);

  for (int i = 0; i < numFiles; i++) {
    Path file = new Path(workDir, "test_" + i + ".seq");
    // create a file with length entries
    @SuppressWarnings("deprecation")
    SequenceFile.Writer writer =
      SequenceFile.createWriter(localFs, job.getConfiguration(), file,
                                IntWritable.class, BytesWritable.class);
    Range range = ranges[i];
    try {
      for (int j = range.start; j < range.end; j++) {
        IntWritable key = new IntWritable(j);
        byte[] data = new byte[random.nextInt(10)];
        random.nextBytes(data);
        BytesWritable value = new BytesWritable(data);
        writer.append(key, value);
      }
    } finally {
      writer.close();
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestCombineSequenceFileInputFormat.java


注:本文中的org.apache.hadoop.io.BytesWritable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。