本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.updateReplicaUnderRecovery方法的典型用法代码示例。如果您正苦于以下问题:Java FsDatasetSpi.updateReplicaUnderRecovery方法的具体用法?Java FsDatasetSpi.updateReplicaUnderRecovery怎么用?Java FsDatasetSpi.updateReplicaUnderRecovery使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi
的用法示例。
在下文中一共展示了FsDatasetSpi.updateReplicaUnderRecovery方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testUpdateReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
new RecoveringBlock(b, null, recoveryid));
//check replica
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
fsdataset, bpid, b.getBlockId());
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
FsDatasetImpl.checkReplicaFiles(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
tmp.getBlockId(), newlength);
Assert.fail();
} catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
rri.getBlockId(), newlength);
assertTrue(storageID != null);
} finally {
if (cluster != null) cluster.shutdown();
}
}
示例2: testUpdateReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
new RecoveringBlock(b, null, recoveryid));
//check replica
final Replica replica =
cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
tmp.getBlockId(), newlength);
Assert.fail();
} catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
rri.getBlockId(), newlength);
assertTrue(storageID != null);
} finally {
if (cluster != null) cluster.shutdown();
}
}
示例3: testUpdateReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
new RecoveringBlock(b, null, recoveryid));
//check replica
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
fsdataset, bpid, b.getBlockId());
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
FsDatasetImpl.checkReplicaFiles(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
Assert.fail();
} catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
assertTrue(storageID != null);
} finally {
if (cluster != null) cluster.shutdown();
}
}
示例4: testUpdateReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long,
* long)}
*/
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
//create a file
DistributedFileSystem dfs =
(DistributedFileSystem) cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
//get block info
final LocatedBlock locatedblock =
getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),
filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode =
cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset =
DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset
.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
//check replica
final ReplicaInfo replica =
FsDatasetTestUtil.fetchReplicaInfo(fsdataset, bpid, b.getBlockId());
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
FsDatasetImpl.checkReplicaFiles(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp =
new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(),
rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
assertTrue(storageID != null);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}