当前位置: 首页>>代码示例>>Java>>正文


Java BytesWritable.setSize方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.BytesWritable.setSize方法的典型用法代码示例。如果您正苦于以下问题:Java BytesWritable.setSize方法的具体用法?Java BytesWritable.setSize怎么用?Java BytesWritable.setSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.BytesWritable的用法示例。


在下文中一共展示了BytesWritable.setSize方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getValue

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:25,代码来源:TFile.java

示例2: fillKey

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.getBytes(), n, l);
    n += l;
  }
  if (sorted && WritableComparator.compareBytes(
          lastKey.getBytes(), MIN_KEY_LEN, lastKey.getLength() - MIN_KEY_LEN,
          o.getBytes(), MIN_KEY_LEN, o.getLength() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.getBytes(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:KVGenerator.java

示例3: fillKey

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
  if (sorted
      && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
          .getSize()
          - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:KVGenerator.java

示例4: fillValue

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void fillValue(BytesWritable o) {
  int len = valLenRNG.nextInt();
  o.setSize(len);
  int n = 0;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.getBytes(), n, l);
    n += l;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:KVGenerator.java

示例5: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.getBytes());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.getBytes();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:11,代码来源:KeySampler.java

示例6: fillValue

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void fillValue(BytesWritable o) {
  int len = valLenRNG.nextInt();
  o.setSize(len);
  int n = 0;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:KVGenerator.java

示例7: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:KeySampler.java

示例8: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int rnd = 0;
  if (max != min) {
    rnd = random.nextInt(max - min);
  }
  int n = rnd + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:KeySampler.java

示例9: createBigMapInputFile

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private static void createBigMapInputFile(Configuration conf, FileSystem fs, 
                                          Path dir, long fileSizeInMB) 
throws IOException {
  // Check if the input path exists and is non-empty
  if (fs.exists(dir)) {
    FileStatus[] list = fs.listStatus(dir);
    if (list.length > 0) {
      throw new IOException("Input path: " + dir + " already exists... ");
    }
  }
  
  Path file = new Path(dir, "part-0");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              BytesWritable.class, BytesWritable.class,
                              CompressionType.NONE);
  long numBytesToWrite = fileSizeInMB * 1024 * 1024;
  int minKeySize = conf.getInt(MIN_KEY, 10);;
  int keySizeRange = 
    conf.getInt(MAX_KEY, 1000) - minKeySize;
  int minValueSize = conf.getInt(MIN_VALUE, 0);
  int valueSizeRange = 
    conf.getInt(MAX_VALUE, 20000) - minValueSize;
  BytesWritable randomKey = new BytesWritable();
  BytesWritable randomValue = new BytesWritable();

  LOG.info("Writing " + numBytesToWrite + " bytes to " + file + " with " +
           "minKeySize: " + minKeySize + " keySizeRange: " + keySizeRange +
           " minValueSize: " + minValueSize + " valueSizeRange: " + valueSizeRange);
  long start = System.currentTimeMillis();
  while (numBytesToWrite > 0) {
    int keyLength = minKeySize + 
      (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
    randomKey.setSize(keyLength);
    randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
    int valueLength = minValueSize +
      (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
    randomValue.setSize(valueLength);
    randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
    writer.append(randomKey, randomValue);
    numBytesToWrite -= keyLength + valueLength;
  }
  writer.close();
  long end = System.currentTimeMillis();

  LOG.info("Created " + file + " of size: " + fileSizeInMB + "MB in " + 
           (end-start)/1000 + "secs");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:BigMapOutput.java

示例10: getKey

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 * 
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.getBytes());
  return key.getLength();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:TFile.java


注:本文中的org.apache.hadoop.io.BytesWritable.setSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。