当前位置: 首页>>代码示例>>Java>>正文


Java MountResponse类代码示例

本文整理汇总了Java中org.apache.hadoop.mount.MountResponse的典型用法代码示例。如果您正苦于以下问题:Java MountResponse类的具体用法?Java MountResponse怎么用?Java MountResponse使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


MountResponse类属于org.apache.hadoop.mount包,在下文中一共展示了MountResponse类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handleInternal

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out,
    InetAddress client, Channel channel) {
  int procedure = rpcCall.getProcedure();
  int xid = rpcCall.getXid();
  if (procedure == MNTPROC_NULL) {
    out = nullOp(out, xid, client);
  } else if (procedure == MNTPROC_MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (procedure == MNTPROC_DUMP) {
    out = dump(out, xid, client);
  } else if (procedure == MNTPROC_UMNT) {      
    out = umnt(xdr, out, xid, client);
  } else if (procedure == MNTPROC_UMNTALL) {
    umntall(out, xid, client);
  } else if (procedure == MNTPROC_EXPORT) {
    out = MountResponse.writeExportList(out, xid, exports);
  } else {
    // Invalid procedure
    RpcAcceptedReply.voidReply(out, xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL);    }  
  return out;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:RpcProgramMountd.java

示例2: dump

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR dump(XDR out, int xid, InetAddress client) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT NULLOP : " + " client: " + client);
  }

  List<MountEntry> copy = new ArrayList<MountEntry>(mounts);
  MountResponse.writeMountList(out, xid, copy);
  return out;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:RpcProgramMountd.java

示例3: mnt

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
  String path = xdr.readString();
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT MNT path: " + path + " client: " + client);
  }

  String host = client.getHostName();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got host: " + host + " path: " + path);
  }
  if (!exports.contains(path)) {
    LOG.info("Path " + path + " is not shared.");
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  FileHandle handle = null;
  try {
    HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    
    handle = new FileHandle(exFileStatus.getFileId());
  } catch (IOException e) {
    LOG.error("Can't get handle for export:" + path + ", exception:" + e);
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  assert (handle != null);
  LOG.info("Giving handle (fileId:" + handle.getFileId()
      + ") to client for export " + path);
  mounts.add(new MountEntry(host, path));

  MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid,
      handle.getContent());
  return out;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:37,代码来源:RpcProgramMountd.java

示例4: dump

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
public XDR dump(XDR out, int xid, InetAddress client) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT NULLOP : " + " client: " + client);
  }

  List<MountEntry> copy = new ArrayList<MountEntry>(mounts);
  MountResponse.writeMountList(out, xid, copy);
  return out;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:10,代码来源:RpcProgramMountd.java

示例5: handleInternal

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
  RpcCall rpcCall = (RpcCall) info.header();
  final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  byte[] data = new byte[info.data().readableBytes()];
  info.data().readBytes(data);
  XDR xdr = new XDR(data);
  XDR out = new XDR();
  InetAddress client =
      ((InetSocketAddress) info.remoteAddress()).getAddress();

  if (mntproc == MNTPROC.NULL) {
    out = nullOp(out, xid, client);
  } else if (mntproc == MNTPROC.MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.DUMP) {
    out = dump(out, xid, client);
  } else if (mntproc == MNTPROC.UMNT) {
    out = umnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.UMNTALL) {
    umntall(out, xid, client);
  } else if (mntproc == MNTPROC.EXPORT) {
    // Currently only support one NFS export 
    List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
    hostsMatchers.add(hostsMatcher);
    out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
  } else {
    // Invalid procedure
    RpcAcceptedReply
        .getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL,
            new VerifierNone()).write(out);
  }
  ChannelBuffer buf =
      ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:39,代码来源:RpcProgramMountd.java

示例6: handleInternal

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
  RpcCall rpcCall = (RpcCall) info.header();
  final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  byte[] data = new byte[info.data().readableBytes()];
  info.data().readBytes(data);
  XDR xdr = new XDR(data);
  XDR out = new XDR();
  InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();

  if (mntproc == MNTPROC.NULL) {
    out = nullOp(out, xid, client);
  } else if (mntproc == MNTPROC.MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.DUMP) {
    out = dump(out, xid, client);
  } else if (mntproc == MNTPROC.UMNT) {      
    out = umnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.UMNTALL) {
    umntall(out, xid, client);
  } else if (mntproc == MNTPROC.EXPORT) {
    // Currently only support one NFS export "/"
    List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
    hostsMatchers.add(hostsMatcher);
    out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
  } else {
    // Invalid procedure
    RpcAcceptedReply.getInstance(xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
        out);
  }  
  ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:37,代码来源:RpcProgramMountd.java

示例7: handleInternal

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
  RpcCall rpcCall = (RpcCall) info.header();
  final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  byte[] data = new byte[info.data().readableBytes()];
  info.data().readBytes(data);
  XDR xdr = new XDR(data);
  XDR out = new XDR();
  InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();

  if (mntproc == MNTPROC.NULL) {
    out = nullOp(out, xid, client);
  } else if (mntproc == MNTPROC.MNT) {
    out = mnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.DUMP) {
    out = dump(out, xid, client);
  } else if (mntproc == MNTPROC.UMNT) {      
    out = umnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.UMNTALL) {
    umntall(out, xid, client);
  } else if (mntproc == MNTPROC.EXPORT) {
    // Currently only support one NFS export 
    List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
    hostsMatchers.add(hostsMatcher);
    out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
  } else {
    // Invalid procedure
    RpcAcceptedReply.getInstance(xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
        out);
  }  
  ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:37,代码来源:RpcProgramMountd.java

示例8: handleInternal

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
  RpcCall rpcCall = (RpcCall) info.header();
  final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
  int xid = rpcCall.getXid();
  byte[] data = new byte[info.data().readableBytes()];
  info.data().readBytes(data);
  XDR xdr = new XDR(data);
  XDR out = new XDR();
  InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();

  if (mntproc == MNTPROC.NULL) {
    out = nullOp(out, xid, client);
  } else if (mntproc == MNTPROC.MNT) {
    // Only do port monitoring for MNT
    if (!doPortMonitoring(info.remoteAddress())) {
      out = MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out,
          xid, null);
    } else {
      out = mnt(xdr, out, xid, client);
    }
  } else if (mntproc == MNTPROC.DUMP) {
    out = dump(out, xid, client);
  } else if (mntproc == MNTPROC.UMNT) {      
    out = umnt(xdr, out, xid, client);
  } else if (mntproc == MNTPROC.UMNTALL) {
    umntall(out, xid, client);
  } else if (mntproc == MNTPROC.EXPORT) {
    // Currently only support one NFS export
    List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
    if (hostsMatcher != null) {
      hostsMatchers.add(hostsMatcher);
      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
    } else {
      // This means there are no valid exports provided.
      RpcAcceptedReply.getInstance(xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
        out);
    }
  } else {
    // Invalid procedure
    RpcAcceptedReply.getInstance(xid,
        RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
        out);
  }
  ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
  RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
  RpcUtil.sendRpcResponse(ctx, rsp);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:RpcProgramMountd.java

示例9: mnt

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
  if (hostsMatcher == null) {
    return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
        null);
  }
  AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
  if (accessPrivilege == AccessPrivilege.NONE) {
    return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
        null);
  }

  String path = xdr.readString();
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT MNT path: " + path + " client: " + client);
  }

  String host = client.getHostName();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got host: " + host + " path: " + path);
  }
  if (!exports.contains(path)) {
    LOG.info("Path " + path + " is not shared.");
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  FileHandle handle = null;
  try {
    HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    
    handle = new FileHandle(exFileStatus.getFileId());
  } catch (IOException e) {
    LOG.error("Can't get handle for export:" + path, e);
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  assert (handle != null);
  LOG.info("Giving handle (fileId:" + handle.getFileId()
      + ") to client for export " + path);
  mounts.add(new MountEntry(host, path));

  MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid,
      handle.getContent());
  return out;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:RpcProgramMountd.java

示例10: mnt

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
  AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
  if (accessPrivilege == AccessPrivilege.NONE) {
    return MountResponse
        .writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
  }

  String path = xdr.readString();
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT MNT path: " + path + " client: " + client);
  }

  String host = client.getHostName();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got host: " + host + " path: " + path);
  }
  if (!exports.contains(path)) {
    LOG.info("Path " + path + " is not shared.");
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  FileHandle handle = null;
  try {
    HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    
    handle = new FileHandle(exFileStatus.getFileId());
  } catch (IOException e) {
    LOG.error("Can't get handle for export:" + path, e);
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  assert (handle != null);
  LOG.info("Giving handle (fileId:" + handle.getFileId() +
      ") to client for export " + path);
  mounts.add(new MountEntry(host, path));

  MountResponse
      .writeMNTResponse(Nfs3Status.NFS3_OK, out, xid, handle.getContent());
  return out;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:44,代码来源:RpcProgramMountd.java

示例11: mnt

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
  AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
  if (accessPrivilege == AccessPrivilege.NONE) {
    return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
        null);
  }

  String path = xdr.readString();
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT MNT path: " + path + " client: " + client);
  }

  String host = client.getHostName();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got host: " + host + " path: " + path);
  }
  if (!exports.contains(path)) {
    LOG.info("Path " + path + " is not shared.");
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  FileHandle handle = null;
  try {
    HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    
    handle = new FileHandle(exFileStatus.getFileId());
  } catch (IOException e) {
    LOG.error("Can't get handle for export:" + path + ", exception:" + e);
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  assert (handle != null);
  LOG.info("Giving handle (fileId:" + handle.getFileId()
      + ") to client for export " + path);
  mounts.add(new MountEntry(host, path));

  MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid,
      handle.getContent());
  return out;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:44,代码来源:RpcProgramMountd.java

示例12: mnt

import org.apache.hadoop.mount.MountResponse; //导入依赖的package包/类
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
  AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
  if (accessPrivilege == AccessPrivilege.NONE) {
    return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
        null);
  }

  String path = xdr.readString();
  if (LOG.isDebugEnabled()) {
    LOG.debug("MOUNT MNT path: " + path + " client: " + client);
  }

  String host = client.getHostName();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got host: " + host + " path: " + path);
  }
  if (!exports.contains(path)) {
    LOG.info("Path " + path + " is not shared.");
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  FileHandle handle = null;
  try {
    HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    
    handle = new FileHandle(exFileStatus.getFileId());
  } catch (IOException e) {
    LOG.error("Can't get handle for export:" + path, e);
    MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    return out;
  }

  assert (handle != null);
  LOG.info("Giving handle (fileId:" + handle.getFileId()
      + ") to client for export " + path);
  mounts.add(new MountEntry(host, path));

  MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid,
      handle.getContent());
  return out;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:44,代码来源:RpcProgramMountd.java


注:本文中的org.apache.hadoop.mount.MountResponse类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。