本文整理汇总了Java中org.apache.hadoop.ipc.RemoteException类的典型用法代码示例。如果您正苦于以下问题:Java RemoteException类的具体用法?Java RemoteException怎么用?Java RemoteException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RemoteException类属于org.apache.hadoop.ipc包,在下文中一共展示了RemoteException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: shouldRetry
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
RetryPolicy policy = null;
// ignore Remote Exception
if (e instanceof RemoteException) {
// do nothing
} else {
policy = exceptionToPolicyMap.get(e.getClass());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(
e, retries, failovers, isIdempotentOrAtMostOnce);
}
示例2: call
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
fs.mkdirs(tmpDir);
HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
out.writeBytes(InetAddress.getLocalHost().toString());
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
示例3: testRetryOtherThanRemoteException
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Test
public void testRetryOtherThanRemoteException() throws Throwable {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(
IOException.class, RETRY_FOREVER);
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryOtherThanRemoteException(TRY_ONCE_THEN_FAIL,
exceptionToPolicyMap));
// should retry with local IOException.
unreliable.failsOnceWithIOException();
try {
// won't get retry on remote exception
unreliable.failsOnceWithRemoteException();
fail("Should fail");
} catch (RemoteException e) {
// expected
}
}
示例4: testAllEditsDirFailOnWrite
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Test
public void testAllEditsDirFailOnWrite() throws IOException {
assertTrue(doAnEdit());
// Invalidate both edits journals.
invalidateEditsDirAtIndex(0, true, true);
invalidateEditsDirAtIndex(1, true, true);
// The NN has not terminated (no ExitException thrown)
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, "
+ " should have halted the NN");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage due to " +
"No journals available to flush. " +
"Unsynced transactions: 1", re);
}
}
示例5: setXAttrs
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
public void setXAttrs(Map<String, Map<String, byte[]>> xAttrsToSetRaw,
EnumSet<XAttrSetFlag> flag) throws IOException {
checkOpen();
try {
Map<String, List<XAttr>> xAttrsToSet = new HashMap<>();
for (String src : xAttrsToSetRaw.keySet())
{
List<XAttr> list = new ArrayList<>();
Map<String, byte[]> rawList = xAttrsToSetRaw.get(src);
for (String attrName : rawList.keySet())
list.add(XAttrHelper.buildXAttr(attrName, rawList.get(attrName)));
xAttrsToSet.put(src, list);
}
namenode.setXAttrs(xAttrsToSet, flag);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
示例6: sendRequest
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Override
public RaftClientReply sendRequest(RaftClientRequest request)
throws IOException {
final RaftPeerId serverId = request.getServerId();
final CombinedClientProtocolClientSideTranslatorPB proxy =
getProxies().getProxy(serverId);
try {
if (request instanceof ReinitializeRequest) {
return proxy.reinitialize((ReinitializeRequest) request);
} else if (request instanceof SetConfigurationRequest) {
return proxy.setConfiguration((SetConfigurationRequest) request);
} else if (request instanceof ServerInformatonRequest) {
return proxy.getInfo((ServerInformatonRequest) request);
} else {
return proxy.submitClientRequest(request);
}
} catch (RemoteException e) {
throw e.unwrapRemoteException(
StateMachineException.class,
ReconfigurationTimeoutException.class,
ReconfigurationInProgressException.class,
RaftException.class,
LeaderNotReadyException.class,
GroupMismatchException.class);
}
}
示例7: listPaths
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Used by readdir and readdirplus to get dirents. It retries the listing if
* the startAfter can't be found anymore.
*/
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
byte[] startAfter) throws IOException {
DirectoryListing dlisting;
try {
dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
throw io;
}
// This happens when startAfter was just deleted
LOG.info("Cookie couldn't be found: "
+ new String(startAfter, Charset.forName("UTF-8"))
+ ", do listing from beginning");
dlisting = dfsClient
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
}
return dlisting;
}
示例8: createSymlink
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Creates a symbolic link.
*
* @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)
*/
public void createSymlink(String target, String link, boolean createParent)
throws IOException {
TraceScope scope = getPathTraceScope("createSymlink", target);
try {
FsPermission dirPerm =
FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
namenode.createSymlink(target, link, dirPerm, createParent);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例9: reportTo
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
String[] uuids = { storageUuid };
StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
dnArr, uuids, types) };
try {
bpNamenode.reportBadBlocks(locatedBlock);
} catch (RemoteException re) {
DataNode.LOG.info("reportBadBlock encountered RemoteException for "
+ "block: " + block , re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block "
+ block + " to namenode: ");
}
}
示例10: rename
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Rename file or directory.
* @see ClientProtocol#rename2(String, String, Options.Rename...)
*/
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
try {
namenode.rename2(src, dst, options);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例11: delete
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* delete file or directory.
* delete contents of the directory if non empty and recursive
* set to true
*
* @see ClientProtocol#delete(String, boolean)
*/
public boolean delete(String src, boolean recursive) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("delete", src);
try {
return namenode.delete(src, recursive);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例12: setPermission
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Set permissions to a file or directory.
* @param src path name.
* @param permission permission to set to
*
* @see ClientProtocol#setPermission(String, FsPermission)
*/
public void setPermission(String src, FsPermission permission)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setPermission", src);
try {
namenode.setPermission(src, permission);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例13: setOwner
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Set file or directory owner.
* @param src path name.
* @param username user id.
* @param groupname user group.
*
* @see ClientProtocol#setOwner(String, String, String)
*/
public void setOwner(String src, String username, String groupname)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setOwner", src);
try {
namenode.setOwner(src, username, groupname);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例14: testAllEditsDirsFailOnFlush
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
@Test
public void testAllEditsDirsFailOnFlush() throws IOException {
assertTrue(doAnEdit());
// Invalidate both edits journals.
invalidateEditsDirAtIndex(0, true, false);
invalidateEditsDirAtIndex(1, true, false);
// The NN has not terminated (no ExitException thrown)
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, "
+ "should have halted the NN");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage. " +
"Unsynced transactions: 1", re);
}
}
示例15: testBrokenLogger
import org.apache.hadoop.ipc.RemoteException; //导入依赖的package包/类
/**
* Tests that a broken audit logger causes requests to fail.
*/
@Test
public void testBrokenLogger() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
BrokenAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
FileSystem fs = cluster.getFileSystem();
long time = System.currentTimeMillis();
fs.setTimes(new Path("/"), time, time);
fail("Expected exception due to broken audit logger.");
} catch (RemoteException re) {
// Expected.
} finally {
cluster.shutdown();
}
}