當前位置: 首頁>>代碼示例>>Java>>正文


Java BytesWritable.getLength方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.BytesWritable.getLength方法的典型用法代碼示例。如果您正苦於以下問題:Java BytesWritable.getLength方法的具體用法?Java BytesWritable.getLength怎麽用?Java BytesWritable.getLength使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.BytesWritable的用法示例。


在下文中一共展示了BytesWritable.getLength方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getValue

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:25,代碼來源:TFile.java

示例2: map

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
@Override
public void map(LongWritable key, BytesWritable value, Context context)
    throws IOException, InterruptedException {

  String fileName = new String(value.getBytes(), 0,
      value.getLength(), charsetUTF8);
  Path path = new Path(fileName);

  FSDataOutputStream dos =
      FileSystem.create(fs, path, new FsPermission(GRIDMIX_DISTCACHE_FILE_PERM));

  int size = 0;
  for (long bytes = key.get(); bytes > 0; bytes -= size) {
    r.nextBytes(val.getBytes());
    size = (int)Math.min(val.getLength(), bytes);
    dos.write(val.getBytes(), 0, size);// Write to distCache file
  }
  dos.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:GenerateDistCacheData.java

示例3: seekTFile

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.getBytes(), 0, key.getLength());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getLength();
      totalBytes += val.getLength();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:36,代碼來源:TestTFileSeek.java

示例4: writeBytesWritable

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
public static void writeBytesWritable(BytesWritable val, int paramIdx,
    int sqlType, PreparedStatement s) throws SQLException {
  if (null == val) {
    s.setNull(paramIdx, sqlType);
  } else {
    // val.getBytes() is only valid in [0, len)
    byte [] rawBytes = val.getBytes();
    int len = val.getLength();
    byte [] outBytes = new byte[len];
    System.arraycopy(rawBytes, 0, outBytes, 0, len);
    s.setBytes(paramIdx, outBytes);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:14,代碼來源:JdbcWritableBridge.java

示例5: getString

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
public String getString(Object k) {
  BytesWritable bytes = get(k);
  if (null == bytes) {
    return null;
  } else {
    try {
      return new String(bytes.getBytes(), 0, bytes.getLength(), "UTF-8");
    } catch (UnsupportedEncodingException uee) {
      // Shouldn't happen; UTF-8 is always supported.
      throw new RuntimeException(uee);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:14,代碼來源:LobFile.java

示例6: reduce

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
/** Concatenate map outputs. */
@Override
protected void reduce(LongWritable offset, Iterable<BytesWritable> values,
    Context context) throws IOException, InterruptedException {
  // read map outputs
  for (BytesWritable bytes : values) {
    for (int i = 0; i < bytes.getLength(); i++)
      hex.add(bytes.getBytes()[i]);
  }

  LOG.info("hex.size() = " + hex.size());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:BaileyBorweinPlouffe.java

示例7: doValidateSetupGenDC

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(
    RecordReader<LongWritable, BytesWritable> reader, FileSystem fs,
    long[] sortedFileSizes) throws IOException, InterruptedException {

  // Validate permissions of dist cache directory
  Path distCacheDir = dce.getDistributedCacheDir();
  assertEquals(
      "Wrong permissions for distributed cache dir " + distCacheDir,
      fs.getFileStatus(distCacheDir).getPermission().getOtherAction()
          .and(FsAction.EXECUTE), FsAction.EXECUTE);

  // Validate the content of the sequence file generated by
  // dce.setupGenerateDistCacheData().
  LongWritable key = new LongWritable();
  BytesWritable val = new BytesWritable();
  for (int i = 0; i < sortedFileSizes.length; i++) {
    assertTrue("Number of files written to the sequence file by "
        + "setupGenerateDistCacheData is less than the expected.",
        reader.nextKeyValue());
    key = reader.getCurrentKey();
    val = reader.getCurrentValue();
    long fileSize = key.get();
    String file = new String(val.getBytes(), 0, val.getLength());

    // Dist Cache files should be sorted based on file size.
    assertEquals("Dist cache file size is wrong.", sortedFileSizes[i],
        fileSize);

    // Validate dist cache file path.

    // parent dir of dist cache file
    Path parent = new Path(file).getParent().makeQualified(fs.getUri(),fs.getWorkingDirectory());
    // should exist in dist cache dir
    assertTrue("Public dist cache file path is wrong.",
        distCacheDir.equals(parent));
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:TestDistCacheEmulation.java

示例8: seekTFile

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader = HFile.createReaderFromStream(path, fsdis,
      fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
  reader.loadFileInfo();
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  HFileScanner scanner = reader.getScanner(false, USE_PREAD);
  BytesWritable key = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    byte [] k = new byte [key.getLength()];
    System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
    if (scanner.seekTo(KeyValue.createKeyValueFromKey(k)) >= 0) {
      ByteBuffer bbkey = scanner.getKey();
      ByteBuffer bbval = scanner.getValue();
      totalBytes += bbkey.limit();
      totalBytes += bbval.limit();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestHFileSeek.java

示例9: map

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
@Override
protected void map(BytesWritable key, NullWritable value, Context output) throws IOException {
  current[i] = new byte[key.getLength()];
  System.arraycopy(key.getBytes(), 0, current[i], 0, key.getLength());
  if (++i == current.length) {
    LOG.info("Persisting current.length=" + current.length + ", count=" + count + ", id=" +
      Bytes.toStringBinary(id) + ", current=" + Bytes.toStringBinary(current[0]) +
      ", i=" + i);
    persist(output, count, prev, current, id);
    i = 0;

    if (first == null) {
      first = current;
    }
    prev = current;
    current = new byte[this.width][];

    count += current.length;
    output.setStatus("Count " + count);

    if (count % wrap == 0) {
      // this block of code turns the 1 million linked list of length 25 into one giant
      //circular linked list of 25 million
      circularLeftShift(first);

      persist(output, -1, prev, first, null);

      first = null;
      prev = null;
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:IntegrationTestBigLinkedList.java

示例10: reduce

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
@Override
public void reduce(BytesWritable key, Iterable<BytesWritable> values, Context context)
    throws IOException, InterruptedException {
  int defCount = 0;
  boolean lostFamilies = false;
  refs.clear();
  for (BytesWritable type : values) {
    if (type.getLength() == DEF.getLength()) {
      defCount++;
      if (type.getBytes()[0] == 1) {
        lostFamilies = true;
      }
    } else {
      byte[] bytes = new byte[type.getLength()];
      System.arraycopy(type.getBytes(), 0, bytes, 0, type.getLength());
      refs.add(bytes);
    }
  }

  // TODO check for more than one def, should not happen
  StringBuilder refsSb = null;
  String keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
  if (defCount == 0 || refs.size() != 1) {
    refsSb = dumpExtraInfoOnRefs(key, context, refs);
    LOG.error("LinkedListError: key=" + keyString + ", reference(s)=" +
      (refsSb != null? refsSb.toString(): ""));
  }
  if (lostFamilies) {
    LOG.error("LinkedListError: key=" + keyString + ", lost big or tiny families");
    context.getCounter(Counts.LOST_FAMILIES).increment(1);
    context.write(key, LOSTFAM);
  }

  if (defCount == 0 && refs.size() > 0) {
    // This is bad, found a node that is referenced but not defined. It must have been
    // lost, emit some info about this node for debugging purposes.
    // Write out a line per reference. If more than one, flag it.;
    for (int i = 0; i < refs.size(); i++) {
      byte[] bs = refs.get(i);
      int ordinal;
      if (i <= 0) {
        ordinal = Counts.UNDEFINED.ordinal();
        context.write(key, new BytesWritable(addPrefixFlag(ordinal, bs)));
        context.getCounter(Counts.UNDEFINED).increment(1);
      } else {
        ordinal = Counts.EXTRA_UNDEF_REFERENCES.ordinal();
        context.write(key, new BytesWritable(addPrefixFlag(ordinal, bs)));
      }
    }
    if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
      // Print out missing row; doing get on reference gives info on when the referencer
      // was added which can help a little debugging. This info is only available in mapper
      // output -- the 'Linked List error Key...' log message above. What we emit here is
      // useless for debugging.
      context.getCounter("undef", keyString).increment(1);
    }
  } else if (defCount > 0 && refs.size() == 0) {
    // node is defined but not referenced
    context.write(key, UNREF);
    context.getCounter(Counts.UNREFERENCED).increment(1);
    if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
      context.getCounter("unref", keyString).increment(1);
    }
  } else {
    if (refs.size() > 1) {
      // Skip first reference.
      for (int i = 1; i < refs.size(); i++) {
        context.write(key,
          new BytesWritable(addPrefixFlag(Counts.EXTRAREFERENCES.ordinal(), refs.get(i))));
      }
      context.getCounter(Counts.EXTRAREFERENCES).increment(refs.size() - 1);
    }
    // node is defined and referenced
    context.getCounter(Counts.REFERENCED).increment(1);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:77,代碼來源:IntegrationTestBigLinkedList.java

示例11: initialize

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
  checkArgsSize(arguments, 2, 2);

  checkArgPrimitive(arguments, 0);
  checkArgPrimitive(arguments, 1);

  // the function should support both string and binary input types
  if (canParam0BeStr()) {
    checkArgGroups(arguments, 0, inputTypes, STRING_GROUP, BINARY_GROUP);
  } else {
    checkArgGroups(arguments, 0, inputTypes, BINARY_GROUP);
  }
  checkArgGroups(arguments, 1, inputTypes, STRING_GROUP, BINARY_GROUP);

  if (isStr0 = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(inputTypes[0]) == STRING_GROUP) {
    obtainStringConverter(arguments, 0, inputTypes, converters);
  } else {
    obtainBinaryConverter(arguments, 0, inputTypes, converters);
  }

  isKeyConstant = arguments[1] instanceof ConstantObjectInspector;
  byte[] key = null;
  int keyLength = 0;

  if (isStr1 = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(inputTypes[1]) == STRING_GROUP) {
    if (isKeyConstant) {
      String keyStr = getConstantStringValue(arguments, 1);
      if (keyStr != null) {
        key = keyStr.getBytes();
        keyLength = key.length;
      }
    } else {
      obtainStringConverter(arguments, 1, inputTypes, converters);
    }
  } else {
    if (isKeyConstant) {
      BytesWritable keyWr = getConstantBytesValue(arguments, 1);
      if (keyWr != null) {
        key = keyWr.getBytes();
        keyLength = keyWr.getLength();
      }
    } else {
      obtainBinaryConverter(arguments, 1, inputTypes, converters);
    }
  }

  if (key != null) {
    secretKey = getSecretKey(key, keyLength);
  }

  try {
    cipher = Cipher.getInstance("AES");
  } catch (NoSuchPaddingException | NoSuchAlgorithmException e) {
    throw new RuntimeException(e);
  }

  ObjectInspector outputOI = PrimitiveObjectInspectorFactory.writableBinaryObjectInspector;
  return outputOI;
}
 
開發者ID:myui,項目名稱:hive-udf-backports,代碼行數:61,代碼來源:GenericUDFAesBase.java

示例12: getInternalSource

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
@Override
protected InputStream getInternalSource(BytesWritable data) {
  return new ByteArrayInputStream(data.getBytes(), 0, data.getLength());
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:5,代碼來源:BlobRef.java

示例13: pair

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
static private byte[] pair(BytesWritable a, BytesWritable b) {
  byte[] pairData = new byte[a.getLength()+ b.getLength()];
  System.arraycopy(a.getBytes(), 0, pairData, 0, a.getLength());
  System.arraycopy(b.getBytes(), 0, pairData, a.getLength(), b.getLength());
  return pairData;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:SortValidator.java

示例14: createTFile

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    HFileContext context = new HFileContextBuilder()
                          .withBlockSize(options.minBlockSize)
                          .withCompression(AbstractHFileWriter.compressionByName(options.compress))
                          .build();
    Writer writer = HFile.getWriterFactoryNoCache(conf)
        .withOutputStream(fout)
        .withFileContext(context)
        .withComparator(new KeyValue.RawBytesComparator())
        .create();
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        byte [] k = new byte [key.getLength()];
        System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
        byte [] v = new byte [val.getLength()];
        System.arraycopy(val.getBytes(), 0, v, 0, key.getLength());
        KeyValue kv = new KeyValue(k, CF, QUAL, v);
        writer.append(kv);
        totalBytes += kv.getKeyLength();
        totalBytes += kv.getValueLength();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:TestHFileSeek.java

示例15: getRowOnly

import org.apache.hadoop.io.BytesWritable; //導入方法依賴的package包/類
/**
 * @param bw
 * @return Row bytes minus the type flag.
 */
public static byte[] getRowOnly(BytesWritable bw) {
  byte[] bytes = new byte [bw.getLength() - Bytes.SIZEOF_SHORT];
  System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length);
  return bytes;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:IntegrationTestBigLinkedList.java


注:本文中的org.apache.hadoop.io.BytesWritable.getLength方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。