当前位置: 首页>>代码示例>>Java>>正文


Java RpcCall类代码示例

本文整理汇总了Java中org.apache.hadoop.oncrpc.RpcCall的典型用法代码示例。如果您正苦于以下问题:Java RpcCall类的具体用法?Java RpcCall怎么用?Java RpcCall使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RpcCall类属于org.apache.hadoop.oncrpc包,在下文中一共展示了RpcCall类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handleInternal

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out,
    InetAddress client, Channel channel) {
  int procedure = rpcCall.getProcedure();
  int xid = rpcCall.getXid();
  if (procedure == MNTPROC_NULL) {
    out = nullOp(out, xid, client);
  } else if (procedure == MNTPROC_MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (procedure == MNTPROC_DUMP) {
    out = dump(out, xid, client);
  } else if (procedure == MNTPROC_UMNT) {      
    out = umnt(xdr, out, xid, client);
  } else if (procedure == MNTPROC_UMNTALL) {
    umntall(out, xid, client);
  } else if (procedure == MNTPROC_EXPORT) {
    out = MountResponse.writeExportList(out, xid, exports);
  } else {
    // Invalid procedure
    RpcAcceptedReply.voidReply(out, xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL);    }  
  return out;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:RpcProgramMountd.java

示例2: create

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static XDR create() {
  XDR request = new XDR();
  RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
      Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3_CREATE);

  // credentials
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  // verifier
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero

  SetAttr3 objAttr = new SetAttr3();
  CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
      "out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
  createReq.serialize(request);
  return request;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestOutOfOrderWrite.java

示例3: write

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3_WRITE);

  // credentials
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  // verifier
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestOutOfOrderWrite.java

示例4: write

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public WRITE3Response write(XDR xdr, RpcInfo info) {
  SecurityHandler securityHandler = getSecurityHandler(info);
  RpcCall rpcCall = (RpcCall) info.header();
  int xid = rpcCall.getXid();
  SocketAddress remoteAddress = info.remoteAddress();
  return write(xdr, info.channel(), xid, securityHandler, remoteAddress);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:RpcProgramNfs3.java

示例5: commit

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public COMMIT3Response commit(XDR xdr, RpcInfo info) {
  SecurityHandler securityHandler = getSecurityHandler(info);
  RpcCall rpcCall = (RpcCall) info.header();
  int xid = rpcCall.getXid();
  SocketAddress remoteAddress = info.remoteAddress();
  return commit(xdr, info.channel(), xid, securityHandler, remoteAddress);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:RpcProgramNfs3.java

示例6: create

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static XDR create() {
  XDR request = new XDR();
  RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  SetAttr3 objAttr = new SetAttr3();
  CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
      "out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
  createReq.serialize(request);
  return request;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java

示例7: write

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java

示例8: messageReceived

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
    throws Exception {

  RpcInfo info = (RpcInfo) e.getMessage();
  RpcCall rpcCall = (RpcCall) info.header();
  final int portmapProc = rpcCall.getProcedure();
  int xid = rpcCall.getXid();
  XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
      XDR.State.READING);
  XDR out = new XDR();

  if (portmapProc == PMAPPROC_NULL) {
    out = nullOp(xid, in, out);
  } else if (portmapProc == PMAPPROC_SET) {
    out = set(xid, in, out);
  } else if (portmapProc == PMAPPROC_UNSET) {
    out = unset(xid, in, out);
  } else if (portmapProc == PMAPPROC_DUMP) {
    out = dump(xid, in, out);
  } else if (portmapProc == PMAPPROC_GETPORT) {
    out = getport(xid, in, out);
  } else if (portmapProc == PMAPPROC_GETVERSADDR) {
    out = getport(xid, in, out);
  } else {
    LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone());
    reply.write(out);
  }

  ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
      .buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:RpcProgramPortmap.java

示例9: create

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
public static XDR create(PortmapMapping mapping, boolean set) {
  XDR request = new XDR();
  int procedure = set ? RpcProgramPortmap.PMAPPROC_SET
      : RpcProgramPortmap.PMAPPROC_UNSET;
  RpcCall call = RpcCall.getInstance(
      RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
      RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, procedure,
      new CredentialsNone(), new VerifierNone());
  call.write(request);
  return mapping.serialize(request);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:PortmapRequest.java

示例10: testRegistration

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Test(timeout = 1000)
public void testRegistration() throws IOException, InterruptedException {
  XDR req = new XDR();
  RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
      RpcProgramPortmap.VERSION,
      RpcProgramPortmap.PMAPPROC_SET,
      new CredentialsNone(), new VerifierNone()).write(req);

  PortmapMapping sent = new PortmapMapping(90000, 1,
      PortmapMapping.TRANSPORT_TCP, 1234);
  sent.serialize(req);

  byte[] reqBuf = req.getBytes();
  DatagramSocket s = new DatagramSocket();
  DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
      pm.getUdpServerLoAddress());
  try {
    s.send(p);
  } finally {
    s.close();
  }

  // Give the server a chance to process the request
  Thread.sleep(100);
  boolean found = false;
  @SuppressWarnings("unchecked")
  Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
      .getInternalState(pm.getHandler(), "map");

  for (PortmapMapping m : map.values()) {
    if (m.getPort() == sent.getPort()
        && PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
      found = true;
      break;
    }
  }
  Assert.assertTrue("Registration failed", found);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestPortmap.java

示例11: createPortmapXDRheader

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
    // TODO: Move this to RpcRequest
    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
    xdr_out.writeInt(0); //no auth
    xdr_out.writeInt(0);
    xdr_out.writeInt(0);
    xdr_out.writeInt(0);
    
    /*
    xdr_out.putInt(1); //unix auth
    xdr_out.putVariableOpaque(new byte[20]);
    xdr_out.putInt(0);
    xdr_out.putInt(0);
*/
  }
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:16,代码来源:TestPortmapRegister.java

示例12: handleInternal

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public XDR handleInternal(RpcCall rpcCall, XDR in, XDR out,
    InetAddress client, Channel channel) {
  Procedure procedure = Procedure.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  switch (procedure) {
  case PMAPPROC_NULL:
    out = nullOp(xid, in, out);
    break;
  case PMAPPROC_SET:
    out = set(xid, in, out);
    break;
  case PMAPPROC_UNSET:
    out = unset(xid, in, out);
    break;
  case PMAPPROC_DUMP:
    out = dump(xid, in, out);
    break;
  case PMAPPROC_GETPORT:
    out = getport(xid, in, out);
    break;
  case PMAPPROC_GETVERSADDR:
    out = getport(xid, in, out);
    break;
  default:
    LOG.info("PortmapHandler unknown rpc procedure=" + procedure);
    RpcAcceptedReply.voidReply(out, xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
  }
  return out;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:RpcProgramPortmap.java

示例13: create

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
public static XDR create(PortmapMapping mapping) {
  XDR request = new XDR();
  RpcCall.write(request,
      RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
      RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
      Procedure.PMAPPROC_SET.getValue());
  request.writeInt(AuthFlavor.AUTH_NONE.getValue());
  request.writeInt(0);
  request.writeInt(0);
  request.writeInt(0);
  return mapping.serialize(request);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:13,代码来源:PortmapRequest.java

示例14: handleInternal

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
  RpcCall rpcCall = (RpcCall) info.header();
  final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  byte[] data = new byte[info.data().readableBytes()];
  info.data().readBytes(data);
  XDR xdr = new XDR(data);
  XDR out = new XDR();
  InetAddress client =
      ((InetSocketAddress) info.remoteAddress()).getAddress();

  if (mntproc == MNTPROC.NULL) {
    out = nullOp(out, xid, client);
  } else if (mntproc == MNTPROC.MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.DUMP) {
    out = dump(out, xid, client);
  } else if (mntproc == MNTPROC.UMNT) {
    out = umnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.UMNTALL) {
    umntall(out, xid, client);
  } else if (mntproc == MNTPROC.EXPORT) {
    // Currently only support one NFS export 
    List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
    hostsMatchers.add(hostsMatcher);
    out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
  } else {
    // Invalid procedure
    RpcAcceptedReply
        .getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL,
            new VerifierNone()).write(out);
  }
  ChannelBuffer buf =
      ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:39,代码来源:RpcProgramMountd.java

示例15: write

import org.apache.hadoop.oncrpc.RpcCall; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 =
      new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE,
          ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:TestOutOfOrderWrite.java


注:本文中的org.apache.hadoop.oncrpc.RpcCall类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。