本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType类的典型用法代码示例。如果您正苦于以下问题:Java DatanodeReportType类的具体用法?Java DatanodeReportType怎么用?Java DatanodeReportType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DatanodeReportType类属于org.apache.hadoop.hdfs.protocol.HdfsConstants包,在下文中一共展示了DatanodeReportType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doGet
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws IOException {
@SuppressWarnings("unchecked")
final Map<String,String[]> pmap = request.getParameterMap();
final PrintWriter out = response.getWriter();
final InetAddress remoteAddress =
InetAddress.getByName(request.getRemoteAddr());
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
final UserGroupInformation ugi = getUGI(request, conf);
try {
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
final FSNamesystem namesystem = nn.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
final int totalDatanodes =
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
new NamenodeFsck(conf, nn,
bm.getDatanodeManager().getNetworkTopology(), pmap, out,
totalDatanodes, remoteAddress).fsck();
return null;
}
});
} catch (InterruptedException e) {
response.sendError(400, e.getMessage());
}
}
示例2: datanodeReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
DatanodeInfo[] datanodeReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i=0; i<arr.length; i++) {
arr[i] = new DatanodeInfo(results.get(i));
}
return arr;
} finally {
readUnlock();
}
}
示例3: getDatanodeStorageReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
d.getStorageReports());
}
return reports;
} finally {
readUnlock();
}
}
示例4: assertReports
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
static void assertReports(int numDatanodes, DatanodeReportType type,
DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for(int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for(int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(),
computed[j].getStorage().getStorageID());
}
}
}
}
示例5: waitFirstBRCompleted
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Wait until the given namenode gets first block reports from all the datanodes */
public void waitFirstBRCompleted(int nnIndex, int timeout) throws
IOException, TimeoutException, InterruptedException {
if (namenodes.size() == 0 || getNN(nnIndex) == null || getNN(nnIndex).nameNode == null) {
return;
}
final FSNamesystem ns = getNamesystem(nnIndex);
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
List<DatanodeDescriptor> nodes = dm.getDatanodeListForReport
(DatanodeReportType.LIVE);
for (DatanodeDescriptor node : nodes) {
if (!node.checkBlockReportReceived()) {
return false;
}
}
return true;
}
}, 100, timeout);
}
示例6: getNumberOfDataDirsPerHost
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){
HashMap<String, Integer> disksPerHost = new HashMap<>();
try {
@SuppressWarnings("resource")
DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());
DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL);
for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
disksPerHost.put(
datanodeStorageReport.getDatanodeInfo().getHostName(),
datanodeStorageReport.getStorageReports().length);
}
} catch (IOException e) {
LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies).");
}
return disksPerHost;
}
示例7: datanodeReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
try {
return namenode.getDatanodeReport(type);
} finally {
scope.close();
}
}
示例8: getDatanodeStorageReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkOpen();
TraceScope scope =
Trace.startSpan("datanodeStorageReport", traceSampler);
try {
return namenode.getDatanodeStorageReport(type);
} finally {
scope.close();
}
}
示例9: getNumberOfDatanodes
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
int getNumberOfDatanodes(DatanodeReportType type) {
readLock();
try {
return getBlockManager().getDatanodeManager().getDatanodeListForReport(
type).size();
} finally {
readUnlock();
}
}
示例10: getDatanodeReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
checkNNStartup();
DatanodeInfo results[] = namesystem.datanodeReport(type);
return results;
}
示例11: getDatanodeStorageReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkNNStartup();
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
return reports;
}
示例12: getDecommissioningNodes
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** @return list of datanodes where decommissioning is in progress. */
public List<DatanodeDescriptor> getDecommissioningNodes() {
// There is no need to take namesystem reader lock as
// getDatanodeListForReport will synchronize on datanodeMap
// A decommissioning DN may be "alive" or "dead".
return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
}
示例13: fetchDatanodes
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
if (live == null && dead == null) {
throw new HadoopIllegalArgumentException("Both live and dead lists are null");
}
// There is no need to take namesystem reader lock as
// getDatanodeListForReport will synchronize on datanodeMap
final List<DatanodeDescriptor> results =
getDatanodeListForReport(DatanodeReportType.ALL);
for(DatanodeDescriptor node : results) {
if (isDatanodeDead(node)) {
if (dead != null) {
dead.add(node);
}
} else {
if (live != null) {
live.add(node);
}
}
}
if (removeDecommissionNode) {
if (live != null) {
removeDecomNodeFromList(live);
}
if (dead != null) {
removeDecomNodeFromList(dead);
}
}
}
示例14: convert
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public static DatanodeReportTypeProto
convert(DatanodeReportType t) {
switch (t) {
case ALL: return DatanodeReportTypeProto.ALL;
case LIVE: return DatanodeReportTypeProto.LIVE;
case DEAD: return DatanodeReportTypeProto.DEAD;
case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
default:
throw new IllegalArgumentException("Unexpected data type report:" + t);
}
}
示例15: getDatanodeReport
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
.newBuilder()
.setType(PBHelper.convert(type)).build();
try {
return PBHelper.convert(
rpcProxy.getDatanodeReport(null, req).getDiList());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}