当前位置: 首页>>代码示例>>Java>>正文


Java Nfs3FileAttributes类代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes的典型用法代码示例。如果您正苦于以下问题:Java Nfs3FileAttributes类的具体用法?Java Nfs3FileAttributes怎么用?Java Nfs3FileAttributes使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Nfs3FileAttributes类属于org.apache.hadoop.nfs.nfs3包,在下文中一共展示了Nfs3FileAttributes类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFileAttr

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle,
    String fileName) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);

  if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
    OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
        .getFileId()));

    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:WriteManager.java

示例2: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:Nfs3Utils.java

示例3: getAccessRightsForUserGroup

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static int getAccessRightsForUserGroup(int uid, int gid,
    int[] auxGids, Nfs3FileAttributes attr) {
  int mode = attr.getMode();
  if (uid == attr.getUid()) {
    return getAccessRights(mode >> 6, attr.getType());
  }
  if (gid == attr.getGid()) {
    return getAccessRights(mode >> 3, attr.getType());
  }
  // Check for membership in auxiliary groups
  if (auxGids != null) {
    for (int auxGid : auxGids) {
      if (attr.getGid() == auxGid) {
        return getAccessRights(mode >> 3, attr.getType());
      }
    }
  }
  return getAccessRights(mode, attr.getType());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:Nfs3Utils.java

示例4: deserialize

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static PATHCONF3Response deserialize(XDR xdr) {
  int status = xdr.readInt();
  xdr.readBoolean();
  Nfs3FileAttributes objPostOpAttr = Nfs3FileAttributes.deserialize(xdr);
  int linkMax = 0;
  int nameMax = 0;
  boolean noTrunc = false;
  boolean chownRestricted = false;
  boolean caseInsensitive = false;
  boolean casePreserving = false;

  if (status == Nfs3Status.NFS3_OK) {
    linkMax = xdr.readInt();
    nameMax = xdr.readInt();
    noTrunc = xdr.readBoolean();
    chownRestricted = xdr.readBoolean();
    caseInsensitive = xdr.readBoolean();
    casePreserving = xdr.readBoolean();
  }
  return new PATHCONF3Response(status, objPostOpAttr, linkMax, nameMax,
      noTrunc, chownRestricted, caseInsensitive, casePreserving);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:PATHCONF3Response.java

示例5: deserialize

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static READDIR3Response deserialize(XDR xdr) {
  int status = xdr.readInt();
  xdr.readBoolean();
  Nfs3FileAttributes postOpDirAttr = Nfs3FileAttributes.deserialize(xdr);
  long cookieVerf = 0;
  ArrayList<Entry3> entries = new ArrayList<Entry3>();
  DirList3 dirList = null;

  if (status == Nfs3Status.NFS3_OK) {
    cookieVerf = xdr.readHyper();
    while (xdr.readBoolean()) {
      Entry3 e =  Entry3.deserialzie(xdr);
      entries.add(e);
    }
    boolean eof = xdr.readBoolean();
    Entry3[] allEntries = new Entry3[entries.size()];
    entries.toArray(allEntries);
    dirList = new DirList3(allEntries, eof);
  }
  return new READDIR3Response(status, postOpDirAttr, cookieVerf, dirList);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:READDIR3Response.java

示例6: deserialize

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static FSSTAT3Response deserialize(XDR xdr) {
  int status = xdr.readInt();
  xdr.readBoolean();
  Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
  long tbytes = 0;
  long fbytes = 0;
  long abytes = 0;
  long tfiles = 0;
  long ffiles = 0;
  long afiles = 0;
  int invarsec = 0;

  if (status == Nfs3Status.NFS3_OK) {
    tbytes = xdr.readHyper();
    fbytes = xdr.readHyper();
    abytes = xdr.readHyper();
    tfiles = xdr.readHyper();
    ffiles = xdr.readHyper();
    afiles = xdr.readHyper();
    invarsec = xdr.readInt();
  }
  return new FSSTAT3Response(status, postOpAttr, tbytes, fbytes, abytes,
      tfiles, ffiles, afiles, invarsec);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:FSSTAT3Response.java

示例7: FSINFO3Response

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public FSINFO3Response(int status, Nfs3FileAttributes postOpAttr, int rtmax,
    int rtpref, int rtmult, int wtmax, int wtpref, int wtmult, int dtpref,
    long maxFileSize, NfsTime timeDelta, int properties) {
  super(status);
  this.postOpAttr = postOpAttr;
  this.rtmax = rtmax;
  this.rtpref = rtpref;
  this.rtmult = rtmult;
  this.wtmax = wtmax;
  this.wtpref = wtpref;
  this.wtmult = wtmult;
  this.dtpref = dtpref;
  this.maxFileSize = maxFileSize;
  this.timeDelta = timeDelta;
  this.properties = properties;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:FSINFO3Response.java

示例8: deserialize

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static READDIRPLUS3Response deserialize(XDR xdr) {
  int status = xdr.readInt();
  xdr.readBoolean();
  Nfs3FileAttributes postOpDirAttr = Nfs3FileAttributes.deserialize(xdr);
  long cookieVerf = 0;
  ArrayList<EntryPlus3> entries = new ArrayList<EntryPlus3>();
  DirListPlus3 dirList = null;

  if (status == Nfs3Status.NFS3_OK) {
    cookieVerf = xdr.readHyper();
    while (xdr.readBoolean()) {
      EntryPlus3 e = EntryPlus3.deseralize(xdr);
      entries.add(e);
    }
    boolean eof = xdr.readBoolean();
    EntryPlus3[] allEntries = new EntryPlus3[entries.size()];
    entries.toArray(allEntries);
    dirList = new DirListPlus3(allEntries, eof);
  }
  return new READDIRPLUS3Response(status, postOpDirAttr, cookieVerf, dirList);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:READDIRPLUS3Response.java

示例9: deserialize

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static READ3Response deserialize(XDR xdr) {
  int status = xdr.readInt();
  xdr.readBoolean();
  Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
  int count = 0;
  boolean eof = false;
  byte[] data = new byte[0];

  if (status == Nfs3Status.NFS3_OK) {
    count = xdr.readInt();
    eof = xdr.readBoolean();
    int len = xdr.readInt();
    assert (len == count);
    data = xdr.readFixedOpaque(count);
  }

  return new READ3Response(status, postOpAttr, count, eof,
      ByteBuffer.wrap(data));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:READ3Response.java

示例10: CommitCtx

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
CommitCtx(long offset, Channel channel, int xid,
    Nfs3FileAttributes preOpAttr) {
  this.offset = offset;
  this.channel = channel;
  this.xid = xid;
  this.preOpAttr = preOpAttr;
  this.startTime = System.nanoTime();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:OpenFileCtx.java

示例11: OpenFileCtx

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath, DFSClient client, IdMappingServiceProvider iug,
    boolean aixCompatMode, NfsConfiguration config) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  this.aixCompatMode = aixCompatMode;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);
  
  pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
  
  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  asyncWriteBackStartOffset = 0;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;  
  enabledDump = dumpFilePath != null;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try {	
    assert(nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {}
  dumpThread = null;
  this.client = client;
  this.iug = iug;
  this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD,
      NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:OpenFileCtx.java

示例12: checkCommit

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
/**
 * Check the commit status with the given offset
 * @param commitOffset the offset to commit
 * @param channel the channel to return response
 * @param xid the xid of the commit request
 * @param preOpAttr the preOp attribute
 * @param fromRead whether the commit is triggered from read request
 * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
 * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
 */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
  if (!fromRead) {
    Preconditions.checkState(channel != null && preOpAttr != null);
    // Keep stream active
    updateLastAccessTime();
  }
  Preconditions.checkState(commitOffset >= 0);

  COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
      preOpAttr, fromRead);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got commit status: " + ret.name());
  }
  // Do the sync outside the lock
  if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
      || ret == COMMIT_STATUS.COMMIT_FINISHED) {
    try {
      // Sync file data and length
      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
      // Nothing to do for metadata since attr related change is pass-through
    } catch (ClosedChannelException cce) {
      if (pendingWrites.isEmpty()) {
        ret = COMMIT_STATUS.COMMIT_FINISHED;
      } else {
        ret = COMMIT_STATUS.COMMIT_ERROR;
      }
    } catch (IOException e) {
      LOG.error("Got stream error during data sync: " + e);
      // Do nothing. Stream will be closed eventually by StreamMonitor.
      // status = Nfs3Status.NFS3ERR_IO;
      ret = COMMIT_STATUS.COMMIT_ERROR;
    }
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:OpenFileCtx.java

示例13: handleSpecialWait

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
private COMMIT_STATUS handleSpecialWait(boolean fromRead, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
  if (!fromRead) {
    // let client retry the same request, add pending commit to sync later
    CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
    pendingCommits.put(commitOffset, commitCtx);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("return COMMIT_SPECIAL_WAIT");
  }
  return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:OpenFileCtx.java

示例14: createWccData

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
public static WccData createWccData(final WccAttr preOpAttr,
    DFSClient dfsClient, final String fileIdPath,
    final IdMappingServiceProvider iug)
    throws IOException {
  Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug);
  return new WccData(preOpAttr, postOpDirAttr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:Nfs3Utils.java

示例15: testCheckCommitAixCompatMode

import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; //导入依赖的package包/类
@Test
public void testCheckCommitAixCompatMode() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);

  NfsConfiguration conf = new NfsConfiguration();
  conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  // Enable AIX compatibility mode.
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(new NfsConfiguration()), true, conf);
  
  // Test fall-through to pendingWrites check in the event that commitOffset
  // is greater than the number of bytes we've so far flushed.
  Mockito.when(fos.getPos()).thenReturn((long) 2);
  COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
  
  // Test the case when we actually have received more bytes than we're trying
  // to commit.
  ctx.getPendingWritesForTest().put(new OffsetRange(0, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  Mockito.when(fos.getPos()).thenReturn((long) 10);
  ctx.setNextOffsetForTest((long)10);
  status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestWrites.java


注:本文中的org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。