本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException类的典型用法代码示例。如果您正苦于以下问题:Java NotReplicatedYetException类的具体用法?Java NotReplicatedYetException怎么用?Java NotReplicatedYetException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
NotReplicatedYetException类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了NotReplicatedYetException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
String[] favoredNodes)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException {
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
.setSrc(src).setClientName(clientName).setFileId(fileId);
if (previous != null)
req.setPrevious(PBHelper.convert(previous));
if (excludeNodes != null)
req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
if (favoredNodes != null) {
req.addAllFavoredNodes(Arrays.asList(favoredNodes));
}
try {
return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例2: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException {
AddBlockRequestProto.Builder req =
AddBlockRequestProto.newBuilder().setSrc(src).setClientName(clientName);
if (previous != null) {
req.setPrevious(PBHelper.convert(previous));
}
if (excludeNodes != null) {
req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
}
try {
return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例3: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
public LocatedBlock addBlock(String src, String clientName)
throws IOException
{
num_calls++;
if (num_calls > num_calls_allowed) {
throw new IOException("addBlock called more times than "
+ RETRY_CONFIG
+ " allows.");
} else {
throw new RemoteException(NotReplicatedYetException.class.getName(),
ADD_BLOCK_EXCEPTION);
}
}
示例4: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
public LocatedBlock addBlock(final String src, final String clientName,
final ExtendedBlock previous, final DatanodeInfo[] excludeNodes)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException {
ClientActionHandler handler = new ClientActionHandler() {
@Override
public Object doAction(ClientProtocol namenode)
throws RemoteException, IOException {
return namenode.addBlock(src, clientName, previous, excludeNodes);
}
};
return (LocatedBlock) doClientActionWithRetry(handler, "addBlock");
}
示例5: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
public LocatedBlock addBlock(String src, String clientName,
DatanodeInfo[] excludedNode)
throws IOException {
num_calls++;
if (num_calls > num_calls_allowed) {
throw new IOException("addBlock called more times than "
+ RETRY_CONFIG
+ " allows.");
} else {
throw new RemoteException(NotReplicatedYetException.class.getName(),
ADD_BLOCK_EXCEPTION);
}
}
示例6: locateFollowingBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes) throws IOException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 400;
while (true) {
long localstart = Time.monotonicNow();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName,
block, excludedNodes, fileId, favoredNodes);
} catch (RemoteException e) {
IOException ue =
e.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
UnresolvedPathException.class);
if (ue != e) {
throw ue; // no need to retry these exceptions
}
if (NotReplicatedYetException.class.getName().
equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
DFSClient.LOG.info("Exception while adding a block", e);
long elapsed = Time.monotonicNow() - localstart;
if (elapsed > 5000) {
DFSClient.LOG.info("Waiting for replication for "
+ (elapsed / 1000) + " seconds");
}
try {
DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
+ " retries left " + retries);
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
DFSClient.LOG.warn("Caught exception ", ie);
}
}
} else {
throw e;
}
}
}
}
}
示例7: testNotYetReplicatedErrors
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if(retryCount > maxRetries + 1) // First call was not a retry
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(),
exceptionMsg);
}
};
when(mockNN.addBlock(anyString(),
anyString(),
any(ExtendedBlock.class),
any(DatanodeInfo[].class),
anyLong(), any(String[].class))).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
os.write(20); // write one random byte
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
e.getMessage().equals(exceptionMsg));
}
}
示例8: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
static LocatedBlock addBlock(DatanodeInfo[] excludedNodes,
DFSClient dfsClient, String src, ExtendedBlock prevBlock, long fileId,
String[] favoredNodes) throws IOException {
final DfsClientConf conf = dfsClient.getConf();
int retries = conf.getNumBlockWriteLocateFollowingRetry();
long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
long localstart = Time.monotonicNow();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName, prevBlock,
excludedNodes, fileId, favoredNodes);
} catch (RemoteException e) {
IOException ue = e.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class);
if (ue != e) {
throw ue; // no need to retry these exceptions
}
if (NotReplicatedYetException.class.getName()
.equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
LOG.info("Exception while adding a block", e);
long elapsed = Time.monotonicNow() - localstart;
if (elapsed > 5000) {
LOG.info("Waiting for replication for " + (elapsed / 1000)
+ " seconds");
}
try {
LOG.warn("NotReplicatedYetException sleeping " + src
+ " retries left " + retries);
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
LOG.warn("Caught exception", ie);
}
}
} else {
throw e;
}
}
}
}
示例9: testNotYetReplicatedErrors
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if(retryCount > maxRetries + 1) // First call was not a retry
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(),
exceptionMsg);
}
};
when(mockNN.addBlock(anyString(),
anyString(),
any(ExtendedBlock.class),
any(DatanodeInfo[].class),
anyLong(), any(String[].class))).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
os.write(20); // write one random byte
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
e.getMessage().equals(exceptionMsg));
}
}
示例10: addBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException {
RouteInfo routeInfo = router.route(src);
return routeInfo.upstream.addBlock(routeInfo.realPath, clientName, previous, excludeNodes, fileId, favoredNodes);
}
示例11: locateFollowingBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
private LocatedBlock locateFollowingBlock(long start,
DatanodeInfo[] excludedNodes) throws IOException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 400;
while (true) {
long localstart = Time.now();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName,
block, excludedNodes, fileId, favoredNodes);
} catch (RemoteException e) {
IOException ue =
e.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
UnresolvedPathException.class);
if (ue != e) {
throw ue; // no need to retry these exceptions
}
if (NotReplicatedYetException.class.getName().
equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
DFSClient.LOG.info("Exception while adding a block", e);
if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Waiting for replication for "
+ (Time.now() - localstart) / 1000
+ " seconds");
}
try {
DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
+ " retries left " + retries);
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
DFSClient.LOG.warn("Caught exception ", ie);
}
}
} else {
throw e;
}
}
}
}
}
示例12: locateFollowingBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
private LocatedBlock locateFollowingBlock(long start,
DatanodeInfo[] excludedNodes)
throws IOException, UnresolvedLinkException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 400;
while (true) {
long localstart = Time.now();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName,
block, excludedNodes, fileId, favoredNodes);
} catch (RemoteException e) {
IOException ue =
e.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
UnresolvedPathException.class);
if (ue != e) {
throw ue; // no need to retry these exceptions
}
if (NotReplicatedYetException.class.getName().
equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
DFSClient.LOG.info("Exception while adding a block", e);
if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Waiting for replication for "
+ (Time.now() - localstart) / 1000
+ " seconds");
}
try {
DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
+ " retries left " + retries);
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
}
}
} else {
throw e;
}
}
}
}
}
示例13: testNotYetReplicatedErrors
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if(retryCount > maxRetries + 1) // First call was not a retry
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(),
exceptionMsg);
}
};
when(mockNN.addBlock(anyString(),
anyString(),
any(ExtendedBlock.class),
any(DatanodeInfo[].class),
anyLong(), any(String[].class))).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
os.write(20); // write one random byte
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
e.getMessage().equals(exceptionMsg));
}
}
示例14: locateFollowingBlock
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
private LocatedBlock locateFollowingBlock(long start,
DatanodeInfo[] excludedNodes)
throws IOException, UnresolvedLinkException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 1000; //HOP default value was 400
while (true) {
long localstart = Time.now();
while (true) {
try {
return dfsClient
.addBlock(src, dfsClient.clientName, block, excludedNodes);
} catch (RemoteException e) {
IOException ue =
e.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
UnresolvedPathException.class);
if (ue != e) {
throw ue; // no need to retry these exceptions
}
if (NotReplicatedYetException.class.getName().
equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
DFSClient.LOG.debug("Exception while adding a block", e);
if (Time.now() - localstart > 5000) {
DFSClient.LOG.debug("Waiting for replication for " +
(Time.now() - localstart) / 1000 + " seconds");
}
try {
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
}
}
} else {
throw e;
}
}
}
}
}
示例15: testNotYetReplicatedErrors
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; //导入依赖的package包/类
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings("serial")
@Test
public void testNotYetReplicatedErrors() throws IOException {
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
conf.setInt(
DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if (retryCount > maxRetries + 1) // First call was not a retry
{
throw new IOException("Retried too many times: " + retryCount);
} else {
throw new RemoteException(NotReplicatedYetException.class.getName(),
exceptionMsg);
}
}
};
when(mockNN.addBlock(anyString(), anyString(), any(ExtendedBlock.class),
any(DatanodeInfo[].class))).thenAnswer(answer);
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
os.write(20); // write one random byte
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
e.getMessage().equals(exceptionMsg));
}
}