当前位置: 首页>>代码示例>>Java>>正文


Java CacheEntryWithPayload.getPayload方法代码示例

本文整理汇总了Java中org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload.getPayload方法的典型用法代码示例。如果您正苦于以下问题:Java CacheEntryWithPayload.getPayload方法的具体用法?Java CacheEntryWithPayload.getPayload怎么用?Java CacheEntryWithPayload.getPayload使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload的用法示例。


在下文中一共展示了CacheEntryWithPayload.getPayload方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: echo

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
/**
 * A server method implemented using {@link RetryCache}.
 * 
 * @param input is returned back in echo, if {@code success} is true.
 * @param failureOuput returned on failure, if {@code success} is false.
 * @param methodTime time taken by the operation. By passing smaller/larger
 *          value one can simulate an operation that takes short/long time.
 * @param success whether this operation completes successfully or not
 * @return return the input parameter {@code input}, if {@code success} is
 *         true, else return {@code failureOutput}.
 */
int echo(int input, int failureOutput, long methodTime, boolean success)
    throws InterruptedException {
  CacheEntryWithPayload entry = RetryCache.waitForCompletion(retryCache,
      null);
  if (entry != null && entry.isSuccess()) {
    System.out.println("retryCount incremented " + retryCount.get());
    retryCount.incrementAndGet();
    return (Integer) entry.getPayload();
  }
  try {
    operationCount.incrementAndGet();
    if (methodTime > 0) {
      Thread.sleep(methodTime);
    }
  } finally {
    RetryCache.setState(entry, success, input);
  }
  return success ? input : failureOutput;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:TestRetryCache.java

示例2: startCheckpoint

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // NamenodeProtocol
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
    throws IOException {
  checkNNStartup();
  namesystem.checkSuperuserPrivilege();
  verifyRequest(registration);
  if(!nn.isRole(NamenodeRole.NAMENODE))
    throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");

  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
    null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (NamenodeCommand) cacheEntry.getPayload();
  }
  NamenodeCommand ret = null;
  try {
    ret = namesystem.startCheckpoint(registration, nn.setRegistration());
  } finally {
    RetryCache.setState(cacheEntry, ret != null, ret);
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:NameNodeRpcServer.java

示例3: createSnapshot

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override
public String createSnapshot(String snapshotRoot, String snapshotName)
    throws IOException {
  checkNNStartup();
  if (!checkPathLength(snapshotRoot)) {
    throw new IOException("createSnapshot: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
    null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (String) cacheEntry.getPayload();
  }

  metrics.incrCreateSnapshotOps();
  String ret = null;
  try {
    ret = namesystem.createSnapshot(snapshotRoot, snapshotName,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, ret != null, ret);
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:NameNodeRpcServer.java

示例4: addCacheDirective

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public long addCacheDirective(
    CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
  checkNNStartup();
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
    (retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (Long) cacheEntry.getPayload();
  }

  boolean success = false;
  long ret = 0;
  try {
    ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, ret);
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:NameNodeRpcServer.java

示例5: createSnapshot

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override
public String createSnapshot(String snapshotRoot, String snapshotName)
    throws IOException {
  checkNNStartup();
  if (!checkPathLength(snapshotRoot)) {
    throw new IOException("createSnapshot: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (String) cacheEntry.getPayload();
  }

  metrics.incrCreateSnapshotOps();
  String ret = null;
  try {
    ret = namesystem.createSnapshot(snapshotRoot, snapshotName,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, ret != null, ret);
  }
  return ret;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:NameNodeRpcServer.java

示例6: addCacheDirective

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public long addCacheDirective(
    CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
  checkNNStartup();
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
    (retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (Long) cacheEntry.getPayload();
  }

  boolean success = false;
  long ret = 0;
  try {
    ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, ret);
  }
  return ret;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:NameNodeRpcServer.java

示例7: appendFile

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
/**
 * Append to an existing file in the namespace.
 */
LocatedBlock appendFile(String src, String holder, String clientMachine)
    throws AccessControlException, SafeModeException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, IOException {
  LocatedBlock lb = null;
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (LocatedBlock) cacheEntry.getPayload();
  }
    
  boolean success = false;
  try {
    lb = appendFileInt(src, holder, clientMachine, cacheEntry != null);
    success = true;
    return lb;
  } catch (AccessControlException e) {
    logAuditEvent(false, "append", src);
    throw e;
  } finally {
    RetryCache.setState(cacheEntry, success, lb);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:FSNamesystem.java

示例8: startCheckpoint

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
NamenodeCommand startCheckpoint(NamenodeRegistration backupNode,
    NamenodeRegistration activeNamenode) throws IOException {
  checkOperation(OperationCategory.CHECKPOINT);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (NamenodeCommand) cacheEntry.getPayload();
  }
  writeLock();
  NamenodeCommand cmd = null;
  try {
    checkOperation(OperationCategory.CHECKPOINT);
    checkNameNodeSafeMode("Checkpoint not started");
    
    LOG.info("Start checkpoint for " + backupNode.getAddress());
    cmd = getFSImage().startCheckpoint(backupNode, activeNamenode);
    getEditLog().logSync();
    return cmd;
  } finally {
    writeUnlock();
    RetryCache.setState(cacheEntry, cmd != null, cmd);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:FSNamesystem.java

示例9: startFile

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
/**
 * Create a new file entry in the namespace.
 * 
 * For description of parameters and exceptions thrown see
 * {@link ClientProtocol#create()}, except it returns valid file status upon
 * success
 * 
 * For retryCache handling details see -
 * {@link #getFileStatus(boolean, CacheEntryWithPayload)}
 * 
 */
HdfsFileStatus startFile(String src, PermissionStatus permissions,
    String holder, String clientMachine, EnumSet<CreateFlag> flag,
    boolean createParent, short replication, long blockSize)
    throws AccessControlException, SafeModeException,
    FileAlreadyExistsException, UnresolvedLinkException,
    FileNotFoundException, ParentNotDirectoryException, IOException {
  HdfsFileStatus status = null;
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }
  
  try {
    status = startFileInt(src, permissions, holder, clientMachine, flag,
        createParent, replication, blockSize, cacheEntry != null);
  } catch (AccessControlException e) {
    logAuditEvent(false, "create", src);
    throw e;
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }
  return status;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:36,代码来源:FSNamesystem.java

示例10: startCheckpoint

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
NamenodeCommand startCheckpoint(NamenodeRegistration backupNode,
    NamenodeRegistration activeNamenode) throws IOException {
  checkOperation(OperationCategory.CHECKPOINT);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (NamenodeCommand) cacheEntry.getPayload();
  }
  writeLock();
  NamenodeCommand cmd = null;
  try {
    checkOperation(OperationCategory.CHECKPOINT);

    if (isInSafeMode()) {
      throw new SafeModeException("Checkpoint not started", safeMode);
    }
    LOG.info("Start checkpoint for " + backupNode.getAddress());
    cmd = getFSImage().startCheckpoint(backupNode, activeNamenode);
    getEditLog().logSync();
    return cmd;
  } finally {
    writeUnlock();
    RetryCache.setState(cacheEntry, cmd != null, cmd);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:FSNamesystem.java

示例11: create

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.create: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  if (!checkPathLength(src)) {
    throw new IOException("create: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }

  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }

  HdfsFileStatus status = null;
  try {
    PermissionStatus perm = new PermissionStatus(getRemoteUser()
        .getShortUserName(), null, masked);
    status = namesystem.startFile(src, perm, clientName, clientMachine,
        flag.get(), createParent, replication, blockSize, supportedVersions,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }

  metrics.incrFilesCreated();
  metrics.incrCreateFileOps();
  return status;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:NameNodeRpcServer.java

示例12: append

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.append: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (LastBlockWithStatus) cacheEntry.getPayload();
  }

  LastBlockWithStatus info = null;
  boolean success = false;
  try {
    info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
        cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, info);
  }
  metrics.incrFilesAppended();
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:NameNodeRpcServer.java

示例13: create

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.create: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  if (!checkPathLength(src)) {
    throw new IOException("create: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }

  HdfsFileStatus status = null;
  try {
    PermissionStatus perm = new PermissionStatus(getRemoteUser()
        .getShortUserName(), null, masked);
    status = namesystem.startFile(src, perm, clientName, clientMachine,
        flag.get(), createParent, replication, blockSize, supportedVersions,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }

  metrics.incrFilesCreated();
  metrics.incrCreateFileOps();
  return status;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:NameNodeRpcServer.java

示例14: append

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
@Override // ClientProtocol
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.append: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (LastBlockWithStatus) cacheEntry.getPayload();
  }

  LastBlockWithStatus info = null;
  boolean success = false;
  try {
    info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
        cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, info);
  }
  metrics.incrFilesAppended();
  return info;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:NameNodeRpcServer.java

示例15: startFile

import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; //导入方法依赖的package包/类
/**
 * Create a new file entry in the namespace.
 * 
 * For description of parameters and exceptions thrown see
 * {@link ClientProtocol#create}, except it returns valid file status upon
 * success
 */
HdfsFileStatus startFile(String src, PermissionStatus permissions,
    String holder, String clientMachine, EnumSet<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws AccessControlException, SafeModeException,
    FileAlreadyExistsException, UnresolvedLinkException,
    FileNotFoundException, ParentNotDirectoryException, IOException {
  HdfsFileStatus status = null;
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }
  
  try {
    status = startFileInt(src, permissions, holder, clientMachine, flag,
        createParent, replication, blockSize, supportedVersions,
        cacheEntry != null);
  } catch (AccessControlException e) {
    logAuditEvent(false, "create", src);
    throw e;
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }
  return status;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload.getPayload方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。