当前位置: 首页>>代码示例>>Java>>正文


Java Writable.readFields方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.Writable.readFields方法的典型用法代码示例。如果您正苦于以下问题:Java Writable.readFields方法的具体用法?Java Writable.readFields怎么用?Java Writable.readFields使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.Writable的用法示例。


在下文中一共展示了Writable.readFields方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getCurrentValue

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
/**
 * Get the 'value' corresponding to the last read 'key'.
 *
 * @param val : The 'value' to be read.
 */
public synchronized void getCurrentValue(Writable val)
    throws IOException {
  if (val instanceof Configurable) {
    ((Configurable) val).setConf(this.conf);
  }
  // Position stream to 'current' value
  seekToCurrentValue();

  val.readFields(valIn);
  if (valIn.read() > 0) {
    log.info("available bytes: " + valIn.available());
    throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength)
                          + " bytes, should read " +
                          (valBuffer.getLength() - keyLength));
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:22,代码来源:WALFile.java

示例2: next

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
/**
 * Read the next key in the file into <code>key</code>, skipping its value.  True if another
 * entry exists, and false at end of file.
 */
public synchronized boolean next(Writable key) throws IOException {
  if (key.getClass() != WALEntry.class) {
    throw new IOException("wrong key class: " + key.getClass().getName()
                          + " is not " + WALEntry.class);
  }

  outBuf.reset();

  keyLength = next(outBuf);
  if (keyLength < 0) {
    return false;
  }

  valBuffer.reset(outBuf.getData(), outBuf.getLength());

  key.readFields(valBuffer);
  valBuffer.mark(0);
  if (valBuffer.getPosition() != keyLength) {
    throw new IOException(key + " read " + valBuffer.getPosition()
                          + " bytes, should read " + keyLength);
  }


  return true;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:30,代码来源:WALFile.java

示例3: processData

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
private void processData(byte[] buf) throws  IOException, InterruptedException {
  DataInputStream dis =
    new DataInputStream(new ByteArrayInputStream(buf));
  int id = dis.readInt();                    // try to read an id
    
  if (LOG.isDebugEnabled())
    LOG.debug(" got #" + id);

  Writable param = ReflectionUtils.newInstance(paramClass, conf);//read param
  ((RPC.Invocation)param).setConf(conf);
  param.readFields(dis);        
   
  Call call = new Call(id, param, this);
  //
  callQueue.put(call);              // queue the call; maybe blocked here
  incRpcCount();  // Increment the rpc count
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:18,代码来源:Server.java

示例4: readObject

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
  int numBytes = WritableUtils.readVInt(inStream);
  byte[] buffer;
  // For BytesWritable and Text, use the specified length to set the length
  // this causes the "obvious" translations to work. So that if you emit
  // a string "abc" from C++, it shows up as "abc".
  if (obj instanceof BytesWritable) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((BytesWritable) obj).set(buffer, 0, numBytes);
  } else if (obj instanceof Text) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((Text) obj).set(buffer);
  } else {
    obj.readFields(inStream);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:CommonStub.java

示例5: readObject

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
private void readObject(Writable obj) throws IOException {
  int numBytes = WritableUtils.readVInt(inStream);
  byte[] buffer;
  // For BytesWritable and Text, use the specified length to set the length
  // this causes the "obvious" translations to work. So that if you emit
  // a string "abc" from C++, it shows up as "abc".
  if (obj instanceof BytesWritable) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((BytesWritable) obj).set(buffer, 0, numBytes);
  } else if (obj instanceof Text) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((Text) obj).set(buffer);
  } else {
    obj.readFields(inStream);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:BinaryProtocol.java

示例6: readFields

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
  // First clear the map. Otherwise we will just accumulate
  // entries every time this method is called.
  this.instance.clear();
  // Read the number of entries in the map
  int entries = in.readInt();
  // Then read each key/value pair
  for (int i = 0; i < entries; i++) {
    byte[] key = Bytes.readByteArray(in);
    byte id = in.readByte();
    Class clazz = getClass(id);
    V value = null;
    if (clazz.equals(byte[].class)) {
      byte[] bytes = Bytes.readByteArray(in);
      value = (V) bytes;
    } else {
      Writable w = (Writable) ReflectionUtils.newInstance(clazz, getConf());
      w.readFields(in);
      value = (V) w;
    }
    this.instance.put(key, value);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:IndexFile.java

示例7: getWritable

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
/**
 * Set bytes into the passed Writable by calling its
 * {@link Writable#readFields(java.io.DataInput)}.
 * @param bytes serialized bytes
 * @param offset offset into array
 * @param length length of data
 * @param w An empty Writable (usually made by calling the null-arg
 * constructor).
 * @return The passed Writable after its readFields has been called fed
 * by the passed <code>bytes</code> array or IllegalArgumentException
 * if passed null or an empty <code>bytes</code> array.
 * @throws IOException e
 * @throws IllegalArgumentException
 */
public static Writable getWritable(final byte [] bytes, final int offset,
  final int length, final Writable w)
throws IOException {
  if (bytes == null || length <=0) {
    throw new IllegalArgumentException("Can't build a writable with empty " +
      "bytes array");
  }
  if (w == null) {
    throw new IllegalArgumentException("Writable cannot be null");
  }
  DataInputBuffer in = new DataInputBuffer();
  try {
    in.reset(bytes, offset, length);
    w.readFields(in);
    return w;
  } finally {
    in.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:Writables.java

示例8: cloneWritableInto

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
@Deprecated
public static void cloneWritableInto(Writable dst, 
                                     Writable src) throws IOException {
  CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get();
  buffer.outBuffer.reset();
  src.write(buffer.outBuffer);
  buffer.moveData();
  dst.readFields(buffer.inBuffer);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:ReflectionUtils.java

示例9: cloneWritableInto

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
@Deprecated
public static void cloneWritableInto(Writable dst, 
                                     Writable src) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  src.write(buffer.outBuffer);
  buffer.moveData();
  dst.readFields(buffer.inBuffer);
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:10,代码来源:ReflectionUtils.java

示例10: copyWritable

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
/**
 * Copy one Writable to another.  Copies bytes using data streams.
 * @param bytes Source Writable
 * @param tgt Target Writable
 * @return The target Writable.
 * @throws IOException e
 */
public static Writable copyWritable(final byte [] bytes, final Writable tgt)
throws IOException {
  DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
  try {
    tgt.readFields(dis);
  } finally {
    dis.close();
  }
  return tgt;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:Writables.java

示例11: receiveRpcResponse

import org.apache.hadoop.io.Writable; //导入方法依赖的package包/类
private void receiveRpcResponse() {
  if (shouldCloseConnection.get()) {
    return;
  }
  touch();
  
  try {
    int totalLen = in.readInt();
    RpcResponseHeaderProto header = 
        RpcResponseHeaderProto.parseDelimitedFrom(in);
    checkResponse(header);

    int headerLen = header.getSerializedSize();
    headerLen += CodedOutputStream.computeRawVarint32Size(headerLen);

    int callId = header.getCallId();
    if (LOG.isDebugEnabled())
      LOG.debug(getName() + " got value #" + callId);

    Call call = calls.get(callId);
    RpcStatusProto status = header.getStatus();
    if (status == RpcStatusProto.SUCCESS) {
      Writable value = ReflectionUtils.newInstance(valueClass, conf);
      value.readFields(in);                 // read value
      calls.remove(callId);
      call.setRpcResponse(value);
      
      // verify that length was correct
      // only for ProtobufEngine where len can be verified easily
      if (call.getRpcResponse() instanceof ProtobufRpcEngine.RpcWrapper) {
        ProtobufRpcEngine.RpcWrapper resWrapper = 
            (ProtobufRpcEngine.RpcWrapper) call.getRpcResponse();
        if (totalLen != headerLen + resWrapper.getLength()) { 
          throw new RpcClientException(
              "RPC response length mismatch on rpc success");
        }
      }
    } else { // Rpc Request failed
      // Verify that length was correct
      if (totalLen != headerLen) {
        throw new RpcClientException(
            "RPC response length mismatch on rpc error");
      }
      
      final String exceptionClassName = header.hasExceptionClassName() ?
            header.getExceptionClassName() : 
              "ServerDidNotSetExceptionClassName";
      final String errorMsg = header.hasErrorMsg() ? 
            header.getErrorMsg() : "ServerDidNotSetErrorMsg" ;
      final RpcErrorCodeProto erCode = 
                (header.hasErrorDetail() ? header.getErrorDetail() : null);
      if (erCode == null) {
         LOG.warn("Detailed error code not set by server on rpc error");
      }
      RemoteException re = 
          ( (erCode == null) ? 
              new RemoteException(exceptionClassName, errorMsg) :
          new RemoteException(exceptionClassName, errorMsg, erCode));
      if (status == RpcStatusProto.ERROR) {
        calls.remove(callId);
        call.setException(re);
      } else if (status == RpcStatusProto.FATAL) {
        // Close the connection
        markClosed(re);
      }
    }
  } catch (IOException e) {
    markClosed(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:71,代码来源:Client.java


注:本文中的org.apache.hadoop.io.Writable.readFields方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。