当前位置: 首页>>代码示例>>Java>>正文


Java IdUserGroup类代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.IdUserGroup的典型用法代码示例。如果您正苦于以下问题:Java IdUserGroup类的具体用法?Java IdUserGroup怎么用?Java IdUserGroup使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


IdUserGroup类属于org.apache.hadoop.nfs.nfs3包,在下文中一共展示了IdUserGroup类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: RpcProgramNfs3

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public RpcProgramNfs3(List<String> exports, Configuration config)
    throws IOException {
  super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
      Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100);
 
  config.set(FsPermission.UMASK_LABEL, "000");
  iug = new IdUserGroup();
  writeManager = new WriteManager(iug, config);
  clientCache = new DFSClientCache(config);
  superUserClient = new DFSClient(NameNode.getAddress(config), config);
  replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
      DFSConfigKeys.DFS_REPLICATION_DEFAULT);
  blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
      DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
  bufferSize = config.getInt("io.file.buffer.size", 4096);
  
  writeDumpDir = config.get("dfs.nfs3.dump.dir", "/tmp/.hdfs-nfs");    
  boolean enableDump = config.getBoolean("dfs.nfs3.enableDump", true);
  if (!enableDump) {
    writeDumpDir = null;
  } else {
    clearDirectory(writeDumpDir);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:RpcProgramNfs3.java

示例2: WriteManager

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
  this.iug = iug;
  this.config = config;
  streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
      Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
  LOG.info("Stream timeout is " + streamTimeout + "ms.");
  if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
    LOG.info("Reset stream timeout to minimum value " +
        Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
    streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
  }
  maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
      Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
  LOG.info("Maximum open streams is " + maxStreams);
  this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:WriteManager.java

示例3: WriteManager

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
  this.iug = iug;
  this.config = config;
  streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
      Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
  LOG.info("Stream timeout is " + streamTimeout + "ms.");
  if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
    LOG.info("Reset stream timeout to minimum value "
        + Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
    streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
  }
  maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
      Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
  LOG.info("Maximum open streams is "+ maxStreams);
  this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:17,代码来源:WriteManager.java

示例4: WriteManager

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
  this.iug = iug;
  
  streamTimeout = config.getLong("dfs.nfs3.stream.timeout",
      DEFAULT_STREAM_TIMEOUT);
  LOG.info("Stream timeout is " + streamTimeout + "ms.");
  if (streamTimeout < MINIMIUM_STREAM_TIMEOUT) {
    LOG.info("Reset stream timeout to minimum value "
        + MINIMIUM_STREAM_TIMEOUT + "ms.");
    streamTimeout = MINIMIUM_STREAM_TIMEOUT;
  }
  
  this.streamMonitor = new StreamMonitor();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:WriteManager.java

示例5: getFileAttr

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
/**
 * If the file is in cache, update the size based on the cached data size
 */
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle,
    IdUserGroup iug) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
  if (attr != null) {
    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:WriteManager.java

示例6: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdUserGroup iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  return new Nfs3FileAttributes(fs.isDir(), fs.getChildrenNum(), fs
      .getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:13,代码来源:Nfs3Utils.java

示例7: OpenFileCtx

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath, DFSClient client, IdUserGroup iug) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);
  
  pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
  
  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  asyncWriteBackStartOffset = 0;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;
  enabledDump = dumpFilePath == null ? false : true;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try {
    assert (nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {
  }
  dumpThread = null;
  this.client = client;
  this.iug = iug;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:OpenFileCtx.java

示例8: getFileAttr

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
/**
 * If the file is in cache, update the size based on the cached data size
 */
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle,
    IdUserGroup iug) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
  if (attr != null) {
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:WriteManager.java

示例9: RpcProgramNfs3

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public RpcProgramNfs3(Configuration config) throws IOException {
  super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
          Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
      Nfs3Constant.VERSION, Nfs3Constant.VERSION);

  config.set(FsPermission.UMASK_LABEL, "000");
  iug = new IdUserGroup();
  
  exports = NfsExports.getInstance(config);
  writeManager = new WriteManager(iug, config);
  clientCache = new DFSClientCache(config);
  superUserClient = new DFSClient(NameNode.getAddress(config), config);
  replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
      DFSConfigKeys.DFS_REPLICATION_DEFAULT);
  blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
      DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
  bufferSize = config
      .getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
          CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
  
  writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
      Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
  boolean enableDump = config.getBoolean(Nfs3Constant.ENABLE_FILE_DUMP_KEY,
      Nfs3Constant.ENABLE_FILE_DUMP_DEFAULT);
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY, DFS_NFS_USER_NAME_KEY);

  if (!enableDump) {
    writeDumpDir = null;
  } else {
    clearDirectory(writeDumpDir);
  }

  rpcCallCache = new RpcCallCache("NFS3", 256);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:36,代码来源:RpcProgramNfs3.java

示例10: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdUserGroup iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  
  return new Nfs3FileAttributes(fileType, fs.getChildrenNum(),
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:Nfs3Utils.java

示例11: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (request.getStableHow() == WriteStableHow.UNSTABLE) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("UNSTABLE write request, send response for offset: "
              + writeCtx.getOffset());
        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:40,代码来源:OpenFileCtx.java

示例12: WriteManager

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
  this.iug = iug;
  this.config = config;
  
  streamTimeout = config.getLong("dfs.nfs3.stream.timeout",
      DEFAULT_STREAM_TIMEOUT);
  LOG.info("Stream timeout is " + streamTimeout + "ms.");
  if (streamTimeout < MINIMIUM_STREAM_TIMEOUT) {
    LOG.info("Reset stream timeout to minimum value "
        + MINIMIUM_STREAM_TIMEOUT + "ms.");
    streamTimeout = MINIMIUM_STREAM_TIMEOUT;
  }
  
  this.streamMonitor = new StreamMonitor();
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:16,代码来源:WriteManager.java

示例13: RpcProgramNfs3

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public RpcProgramNfs3(Configuration config) throws IOException {
  super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
      Nfs3Constant.VERSION, Nfs3Constant.VERSION);
 
  config.set(FsPermission.UMASK_LABEL, "000");
  iug = new IdUserGroup();
  
  exports = NfsExports.getInstance(config);
  writeManager = new WriteManager(iug, config);
  clientCache = new DFSClientCache(config);
  superUserClient = new DFSClient(NameNode.getAddress(config), config);
  replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
      DFSConfigKeys.DFS_REPLICATION_DEFAULT);
  blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
      DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
  bufferSize = config.getInt(
      CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
      CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
  
  writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
      Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
  boolean enableDump = config.getBoolean(Nfs3Constant.ENABLE_FILE_DUMP_KEY,
      Nfs3Constant.ENABLE_FILE_DUMP_DEFAULT);
  if (!enableDump) {
    writeDumpDir = null;
  } else {
    clearDirectory(writeDumpDir);
  }

  rpcCallCache = new RpcCallCache("NFS3", 256);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:32,代码来源:RpcProgramNfs3.java

示例14: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdUserGroup iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  
  return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs
      .getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:16,代码来源:Nfs3Utils.java

示例15: OpenFileCtx

import org.apache.hadoop.nfs.nfs3.IdUserGroup; //导入依赖的package包/类
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath, DFSClient client, IdUserGroup iug) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);
  
  pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
  
  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  asyncWriteBackStartOffset = 0;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;  
  enabledDump = dumpFilePath == null ? false: true;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try {	
    assert(nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {}
  dumpThread = null;
  this.client = client;
  this.iug = iug;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:32,代码来源:OpenFileCtx.java


注:本文中的org.apache.hadoop.nfs.nfs3.IdUserGroup类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。