當前位置: 首頁>>代碼示例>>Java>>正文


Java Writable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.Writable的典型用法代碼示例。如果您正苦於以下問題:Java Writable類的具體用法?Java Writable怎麽用?Java Writable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Writable類屬於org.apache.hadoop.io包,在下文中一共展示了Writable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: map

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Given an output filename, write a bunch of random records to it.
 */
public void map(WritableComparable key, 
                Writable value,
                Context context) throws IOException,InterruptedException {
  int itemCount = 0;
  while (numBytesToWrite > 0) {
    int keyLength = minKeySize + 
      (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
    randomKey.setSize(keyLength);
    randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
    int valueLength = minValueSize +
      (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
    randomValue.setSize(valueLength);
    randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
    context.write(randomKey, randomValue);
    numBytesToWrite -= keyLength + valueLength;
    context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
    context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
    if (++itemCount % 200 == 0) {
      context.setStatus("wrote record " + itemCount + ". " + 
                         numBytesToWrite + " bytes left.");
    }
  }
  context.setStatus("done with " + itemCount + " records.");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:RandomWriter.java

示例2: registerProtocolEngine

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Register a RPC kind and the class to deserialize the rpc request.
 * 
 * Called by static initializers of rpcKind Engines
 * @param rpcKind
 * @param rpcRequestWrapperClass - this class is used to deserialze the
 *  the rpc request.
 *  @param rpcInvoker - use to process the calls on SS.
 */

public static void registerProtocolEngine(RPC.RpcKind rpcKind, 
        Class<? extends Writable> rpcRequestWrapperClass,
        RpcInvoker rpcInvoker) {
  RpcKindMapValue  old = 
      rpcKindMap.put(rpcKind, new RpcKindMapValue(rpcRequestWrapperClass, rpcInvoker));
  if (old != null) {
    rpcKindMap.put(rpcKind, old);
    throw new IllegalArgumentException("ReRegistration of rpcKind: " +
        rpcKind);      
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("rpcKind=" + rpcKind +
        ", rpcRequestWrapperClass=" + rpcRequestWrapperClass +
        ", rpcInvoker=" + rpcInvoker);
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:27,代碼來源:Server.java

示例3: Call

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
private Call(RPC.RpcKind rpcKind, Writable param) {
  this.rpcKind = rpcKind;
  this.rpcRequest = param;

  final Integer id = callId.get();
  if (id == null) {
    this.id = nextCallId();
  } else {
    callId.set(null);
    this.id = id;
  }
  
  final Integer rc = retryCount.get();
  if (rc == null) {
    this.retry = 0;
  } else {
    this.retry = rc;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:Client.java

示例4: getClassCode

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
static Integer getClassCode(final Class<?> c)
throws IOException {
  Integer code = CLASS_TO_CODE.get(c);
  if (code == null ) {
    if (List.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(List.class);
    } else if (Writable.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Writable.class);
    } else if (c.isArray()) {
      code = CLASS_TO_CODE.get(Array.class);
    } else if (Message.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Message.class);
    } else if (Serializable.class.isAssignableFrom(c)){
      code = CLASS_TO_CODE.get(Serializable.class);
    } else if (Scan.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Scan.class);
    }
  }
  return code;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:HbaseObjectWritableFor96Migration.java

示例5: testWideTuple2

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
public void testWideTuple2() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[9] = new Text("Number 9");
                                   
  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(9);
  
  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 9) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos,
        has);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestJoinTupleWritable.java

示例6: testWideTuple2

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
public void testWideTuple2() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[9] = new Text("Number 9");
                                   
  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(9);
  
  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 9) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestTupleWritable.java

示例7: makeRandomWritables

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
private Writable[] makeRandomWritables() {
  Random r = new Random();
  Writable[] writs = {
    new BooleanWritable(r.nextBoolean()),
    new FloatWritable(r.nextFloat()),
    new FloatWritable(r.nextFloat()),
    new IntWritable(r.nextInt()),
    new LongWritable(r.nextLong()),
    new BytesWritable("dingo".getBytes()),
    new LongWritable(r.nextLong()),
    new IntWritable(r.nextInt()),
    new BytesWritable("yak".getBytes()),
    new IntWritable(r.nextInt())
  };
  return writs;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestTupleWritable.java

示例8: appendMetaBlock

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Add a meta block to the end of the file. Call before close(). Metadata
 * blocks are expensive. Fill one with a bunch of serialized data rather than
 * do a metadata block per metadata instance. If metadata is small, consider
 * adding to file info using {@link #appendFileInfo(byte[], byte[])}
 *
 * @param metaBlockName
 *          name of the block
 * @param content
 *          will call readFields to get data later (DO NOT REUSE)
 */
@Override
public void appendMetaBlock(String metaBlockName, Writable content) {
  byte[] key = Bytes.toBytes(metaBlockName);
  int i;
  for (i = 0; i < metaNames.size(); ++i) {
    // stop when the current key is greater than our own
    byte[] cur = metaNames.get(i);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0,
        key.length) > 0) {
      break;
    }
  }
  metaNames.add(i, key);
  metaData.add(i, content);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:HFileWriterV2.java

示例9: testWideTuple

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
public void testWideTuple() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[42] = new Text("Number 42");
                                   
  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(42);
  
  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 42) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestTupleWritable.java

示例10: run

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
@Override
public void run() {
  for (int i = 0; i < count; i++) {
    try {
      int byteSize = RANDOM.nextInt(BYTE_COUNT);
      byte[] bytes = new byte[byteSize];
      System.arraycopy(BYTES, 0, bytes, 0, byteSize);
      Writable param = new BytesWritable(bytes);
      call(client, param, address);
      Thread.sleep(RANDOM.nextInt(20));
    } catch (Exception e) {
      LOG.fatal("Caught Exception", e);
      failed = true;
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:17,代碼來源:TestIPCServerResponder.java

示例11: getCurrentValue

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Get the 'value' corresponding to the last read 'key'.
 *
 * @param val : The 'value' to be read.
 */
public synchronized void getCurrentValue(Writable val)
    throws IOException {
  if (val instanceof Configurable) {
    ((Configurable) val).setConf(this.conf);
  }
  // Position stream to 'current' value
  seekToCurrentValue();

  val.readFields(valIn);
  if (valIn.read() > 0) {
    log.info("available bytes: " + valIn.available());
    throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength)
                          + " bytes, should read " +
                          (valBuffer.getLength() - keyLength));
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:22,代碼來源:WALFile.java

示例12: next

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Read the next key in the file into <code>key</code>, skipping its value.  True if another
 * entry exists, and false at end of file.
 */
public synchronized boolean next(Writable key) throws IOException {
  if (key.getClass() != WALEntry.class) {
    throw new IOException("wrong key class: " + key.getClass().getName()
                          + " is not " + WALEntry.class);
  }

  outBuf.reset();

  keyLength = next(outBuf);
  if (keyLength < 0) {
    return false;
  }

  valBuffer.reset(outBuf.getData(), outBuf.getLength());

  key.readFields(valBuffer);
  valBuffer.mark(0);
  if (valBuffer.getPosition() != keyLength) {
    throw new IOException(key + " read " + valBuffer.getPosition()
                          + " bytes, should read " + keyLength);
  }


  return true;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:30,代碼來源:WALFile.java

示例13: addBloomFilter

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
private void addBloomFilter(final BloomFilterWriter bfw,
    final BlockType blockType) {
  if (bfw.getKeyCount() <= 0)
    return;

  if (blockType != BlockType.GENERAL_BLOOM_META &&
      blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
    throw new RuntimeException("Block Type: " + blockType.toString() +
        "is not supported");
  }
  additionalLoadOnOpenData.add(new BlockWritable() {
    @Override
    public BlockType getBlockType() {
      return blockType;
    }

    @Override
    public void writeToBlock(DataOutput out) throws IOException {
      bfw.getMetaWriter().write(out);
      Writable dataWriter = bfw.getDataWriter();
      if (dataWriter != null)
        dataWriter.write(out);
    }
  });
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HFileWriterV2.java

示例14: setupResponse

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
/**
 * Setup response for the IPC Call.
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, 
                           Call call, Status status, 
                           Writable rv, String errorClass, String error) 
throws IOException {
  response.reset();
  DataOutputStream out = new DataOutputStream(response);
  out.writeInt(call.id);                // write call id
  out.writeInt(status.state);           // write status

  if (status == Status.SUCCESS) {
    rv.write(out);
  } else {
    WritableUtils.writeString(out, errorClass);
    WritableUtils.writeString(out, error);
  }
  /*if (call.connection.useWrap) {
    wrapWithSasl(response, call);
  }*/
  call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}
 
開發者ID:spafka,項目名稱:spark_deep,代碼行數:32,代碼來源:Server.java

示例15: testWideWritable

import org.apache.hadoop.io.Writable; //導入依賴的package包/類
public void testWideWritable() throws Exception {
  Writable[] manyWrits = makeRandomWritables(131);
  
  TupleWritable sTuple = new TupleWritable(manyWrits);
  for (int i =0; i<manyWrits.length; i++)
  {
    if (i % 3 == 0) {
      sTuple.setWritten(i);
    }
  }
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  sTuple.write(new DataOutputStream(out));
  ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
  TupleWritable dTuple = new TupleWritable();
  dTuple.readFields(new DataInputStream(in));
  assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
  assertEquals("All tuple data has not been read from the stream", 
    -1, in.read());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestJoinTupleWritable.java


注:本文中的org.apache.hadoop.io.Writable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。