当前位置: 首页>>代码示例>>Java>>正文


Java BytesWritable.set方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.BytesWritable.set方法的典型用法代码示例。如果您正苦于以下问题:Java BytesWritable.set方法的具体用法?Java BytesWritable.set怎么用?Java BytesWritable.set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.BytesWritable的用法示例。


在下文中一共展示了BytesWritable.set方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
/**
 * Read raw bytes from a SequenceFile.
 */
public synchronized boolean next(BytesWritable key, BytesWritable val)
    throws IOException {
  if (done) return false;
  long pos = in.getPosition();
  boolean eof = -1 == in.nextRawKey(buffer);
  if (!eof) {
    key.set(buffer.getData(), 0, buffer.getLength());
    buffer.reset();
    in.nextRawValue(vbytes);
    vbytes.writeUncompressedBytes(buffer);
    val.set(buffer.getData(), 0, buffer.getLength());
    buffer.reset();
  }
  return !(done = (eof || (pos >= end && in.syncSeen())));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:SequenceFileAsBinaryInputFormat.java

示例2: map

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
    throws IOException, InterruptedException {
  BytesWritable bwKey = new BytesWritable(key.get());
  BytesWritable bwVal = new BytesWritable();
  for (Cell kv : value.listCells()) {
    if (Bytes.compareTo(TEST_QUALIFIER, 0, TEST_QUALIFIER.length,
                        kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) {
      context.write(bwKey, EMPTY);
    } else {
      bwVal.set(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
      context.write(bwVal, bwKey);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:IntegrationTestLoadAndVerify.java

示例3: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
public void next(BytesWritable key, BytesWritable value, boolean dupKey) {
  if (dupKey) {
    key.set(lastKey);
  }
  else {
    fillKey(key);
  }
  fillValue(value);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:KVGenerator.java

示例4: fillBuffer

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void fillBuffer(Random rng, BytesWritable bw, byte[] tmp, int len) {
  int n = 0;
  while (n < len) {
    byte[] word = dictionary[rng.nextInt(dictionary.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, tmp, n, l);
    n += l;
  }
  bw.set(tmp, 0, len);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:11,代码来源:TestTFileSeqFileComparison.java

示例5: timeWrite

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void timeWrite(Path path, KVAppendable appendable, int baseKlen,
    int baseVlen, long fileSize) throws IOException {
  int maxKlen = baseKlen * 2;
  int maxVlen = baseVlen * 2;
  BytesWritable key = new BytesWritable();
  BytesWritable value = new BytesWritable();
  byte[] keyBuffer = new byte[maxKlen];
  byte[] valueBuffer = new byte[maxVlen];
  Random rng = new Random(options.seed);
  long totalBytes = 0;
  printlnWithTimestamp("Start writing: " + path.getName() + "...");
  startTime();

  for (long i = 0; true; ++i) {
    if (i % 1000 == 0) { // test the size for every 1000 rows.
      if (fs.getFileStatus(path).getLen() >= fileSize) {
        break;
      }
    }
    int klen = rng.nextInt(baseKlen) + baseKlen;
    int vlen = rng.nextInt(baseVlen) + baseVlen;
    fillBuffer(rng, key, keyBuffer, klen);
    fillBuffer(rng, value, valueBuffer, vlen);
    key.set(keyBuffer, 0, klen);
    value.set(valueBuffer, 0, vlen);
    appendable.append(key, value);
    totalBytes += klen;
    totalBytes += vlen;
  }
  stopTime();
  appendable.close();
  reportStats(path, totalBytes);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:34,代码来源:TestTFileSeqFileComparison.java

示例6: writeOutput

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void writeOutput(RemoteIterator<? extends FileStatus> input) throws IOException {
    Path outPath = new Path(output);
    if (distribFs.exists(outPath)) {
        throw new IllegalArgumentException("Output file already exists, Not overwriting it:" + output);
    }

    Writer writer = SequenceFile.createWriter(distribFs.getConf(),
            Writer.file(outPath),
            Writer.keyClass(Text.class),
            Writer.valueClass(BytesWritable.class),
            Writer.compression(SequenceFile.CompressionType.RECORD));
    Text key = new Text();
    BytesWritable value = new BytesWritable();
    long skipped = 0;
    long copied = 0;
    while (input.hasNext()) {
        FileStatus next = input.next();
        if (filter(next)) {
            key.set(next.getPath().toString());
            FSDataInputStream stream = localFs.open(next.getPath());
            //CAUTION : this could cause memory overflow
            byte[] bytes = IOUtils.toByteArray(stream);
            value.set(bytes, 0, bytes.length);
            writer.append(key, value);
            copied++;
        } else {
            skipped++;
        }
    }
    writer.close();
    System.out.println("Files copied ::" + copied);
    System.out.println("Files skipped ::" + skipped);
}
 
开发者ID:thammegowda,项目名称:tika-dl4j-spark-imgrec,代码行数:34,代码来源:Local2SeqFile.java

示例7: convertBinaryTypes

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private Object convertBinaryTypes(Object val, String javaColType) {
  byte[] bb = (byte[]) val;
  if (javaColType.equals(BYTESWRITABLE)) {
    BytesWritable bw = new BytesWritable();
    bw.set(bb, 0, bb.length);
    return bw;
  }
  return null;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:10,代码来源:SqoopHCatExportHelper.java

示例8: writeUsingRecordWriter

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
/**
 * Helper method to write using monarch-record-writer.
 *
 * @param lines the data/records to be written
 * @param conf the writer configuration
 * @return the key-prefix used by the monarch writer
 * @throws IOException
 */
public static String writeUsingRecordWriter(List<String> lines, Configuration conf) throws IOException {
  final MonarchRecordWriter mrw = new MonarchRecordWriter(conf);
  final BytesWritable bytesWritable = new BytesWritable();
  byte[] bytes;
  for (final String line : lines) {
    bytes = line.getBytes();
    bytesWritable.set(bytes, 0, bytes.length);
    mrw.write(bytesWritable);
  }
  mrw.close(true);
  return mrw.getKeyPrefix();
}
 
开发者ID:ampool,项目名称:monarch,代码行数:21,代码来源:TestHelper.java

示例9: next

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
@Override
public synchronized boolean next(LongWritable key, BytesWritable value)
    throws IOException {
  boolean dataRead = reader.nextKeyValue();
  if (dataRead) {
    LongWritable newKey = reader.getCurrentKey();
    BytesWritable newValue = reader.getCurrentValue();
    key.set(newKey.get());
    value.set(newValue);
  }
  return dataRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:FixedLengthRecordReader.java

示例10: makeByteWritable

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private BytesWritable makeByteWritable(Event e) {
  BytesWritable bytesObject = new BytesWritable();
  bytesObject.set(e.getBody(), 0, e.getBody().length);
  return bytesObject;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:6,代码来源:HDFSWritableSerializer.java

示例11: fromAvro

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
/**
 * Convert from Avro type to Sqoop's java representation of the SQL type
 * see SqlManager#toJavaType
 */
public static Object fromAvro(Object avroObject, Schema schema, String type) {
  if (avroObject == null) {
    return null;
  }

  switch (schema.getType()) {
    case NULL:
      return null;
    case BOOLEAN:
    case INT:
    case FLOAT:
    case DOUBLE:
      return avroObject;
    case LONG:
      if (type.equals(DATE_TYPE)) {
        return new Date((Long) avroObject);
      } else if (type.equals(TIME_TYPE)) {
        return new Time((Long) avroObject);
      } else if (type.equals(TIMESTAMP_TYPE)) {
        return new Timestamp((Long) avroObject);
      }
      return avroObject;
    case BYTES:
      ByteBuffer bb = (ByteBuffer) avroObject;
      BytesWritable bw = new BytesWritable();
      bw.set(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining());
      if (type.equals(BLOB_REF_TYPE)) {
        // TODO: Should convert BytesWritable to BlobRef properly. (SQOOP-991)
        throw new UnsupportedOperationException("BlobRef not supported");
      }
      return bw;
    case STRING:
      if (type.equals(BIG_DECIMAL_TYPE)) {
        return new BigDecimal(avroObject.toString());
      } else if (type.equals(DATE_TYPE)) {
        return Date.valueOf(avroObject.toString());
      } else if (type.equals(TIME_TYPE)) {
        return Time.valueOf(avroObject.toString());
      } else if (type.equals(TIMESTAMP_TYPE)) {
        return Timestamp.valueOf(avroObject.toString());
      }
      return avroObject.toString();
    case ENUM:
      return avroObject.toString();
    case UNION:
      List<Schema> types = schema.getTypes();
      if (types.size() != 2) {
        throw new IllegalArgumentException("Only support union with null");
      }
      Schema s1 = types.get(0);
      Schema s2 = types.get(1);
      if (s1.getType() == Schema.Type.NULL) {
        return fromAvro(avroObject, s2, type);
      } else if (s2.getType() == Schema.Type.NULL) {
        return fromAvro(avroObject, s1, type);
      } else {
        throw new IllegalArgumentException("Only support union with null");
      }
    case FIXED:
      return new BytesWritable(((GenericFixed) avroObject).bytes());
    case RECORD:
    case ARRAY:
    case MAP:
    default:
      throw new IllegalArgumentException("Cannot convert Avro type "
          + schema.getType());
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:73,代码来源:AvroUtil.java

示例12: textifyBytes

import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private static String textifyBytes(Text t) {
  BytesWritable b = new BytesWritable();
  b.set(t.getBytes(), 0, t.getLength());
  return b.toString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TeraValidate.java


注:本文中的org.apache.hadoop.io.BytesWritable.set方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。