本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport类的典型用法代码示例。如果您正苦于以下问题:Java DatanodeStorageReport类的具体用法?Java DatanodeStorageReport怎么用?Java DatanodeStorageReport使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DatanodeStorageReport类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了DatanodeStorageReport类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDatanodeStorageReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
d.getStorageReports());
}
return reports;
} finally {
readUnlock();
}
}
示例2: init
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
/** Get live datanode storage reports and then build the network topology. */
public List<DatanodeStorageReport> init() throws IOException {
final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
final DatanodeInfo datanode = r.getDatanodeInfo();
if (shouldIgnore(datanode)) {
continue;
}
trimmed.add(r);
cluster.add(datanode);
}
return trimmed;
}
示例3: assertReports
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
static void assertReports(int numDatanodes, DatanodeReportType type,
DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for(int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for(int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(),
computed[j].getStorage().getStorageID());
}
}
}
}
示例4: compareTotalPoolUsage
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
/**
* Compare the total blockpool usage on each datanode to ensure that nothing
* was balanced.
*
* @param preReports storage reports from pre balancer run
* @param postReports storage reports from post balancer run
*/
private static void compareTotalPoolUsage(DatanodeStorageReport[] preReports,
DatanodeStorageReport[] postReports) {
Assert.assertNotNull(preReports);
Assert.assertNotNull(postReports);
Assert.assertEquals(preReports.length, postReports.length);
for (DatanodeStorageReport preReport : preReports) {
String dnUuid = preReport.getDatanodeInfo().getDatanodeUuid();
for(DatanodeStorageReport postReport : postReports) {
if(postReport.getDatanodeInfo().getDatanodeUuid().equals(dnUuid)) {
Assert.assertEquals(getTotalPoolUsage(preReport),
getTotalPoolUsage(postReport));
LOG.info("Comparision of datanode pool usage pre/post balancer run. "
+ "PrePoolUsage: " + getTotalPoolUsage(preReport)
+ ", PostPoolUsage: " + getTotalPoolUsage(postReport));
break;
}
}
}
}
示例5: getNumberOfDataDirsPerHost
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){
HashMap<String, Integer> disksPerHost = new HashMap<>();
try {
@SuppressWarnings("resource")
DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());
DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL);
for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
disksPerHost.put(
datanodeStorageReport.getDatanodeInfo().getHostName(),
datanodeStorageReport.getStorageReports().length);
}
} catch (IOException e) {
LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies).");
}
return disksPerHost;
}
示例6: getDatanodeStorageReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkOpen();
TraceScope scope =
Trace.startSpan("datanodeStorageReport", traceSampler);
try {
return namenode.getDatanodeStorageReport(type);
} finally {
scope.close();
}
}
示例7: init
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
void init() throws IOException {
initStoragePolicies();
final List<DatanodeStorageReport> reports = dispatcher.init();
for(DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
for(StorageType t : StorageType.getMovableTypes()) {
final Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher);
final long maxRemaining = getMaxRemaining(r, t);
final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t,
maxRemaining) : null;
storages.add(source, target);
}
}
}
示例8: getMaxRemaining
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) {
long max = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
if (r.getRemaining() > max) {
max = r.getRemaining();
}
}
}
return max;
}
示例9: getDatanodeStorageReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkNNStartup();
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
return reports;
}
示例10: getCapacity
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
long capacity = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
capacity += r.getCapacity();
}
}
return capacity;
}
示例11: getRemaining
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
private static long getRemaining(DatanodeStorageReport report, StorageType t) {
long remaining = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
remaining += r.getRemaining();
}
}
return remaining;
}
示例12: accumulateSpaces
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
@Override
void accumulateSpaces(DatanodeStorageReport r) {
for(StorageReport s : r.getStorageReports()) {
final StorageType t = s.getStorage().getStorageType();
totalCapacities.add(t, s.getCapacity());
totalUsedSpaces.add(t, s.getDfsUsed());
}
}
示例13: getUtilization
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
long capacity = 0L;
long dfsUsed = 0L;
for(StorageReport s : r.getStorageReports()) {
if (s.getStorage().getStorageType() == t) {
capacity += s.getCapacity();
dfsUsed += s.getDfsUsed();
}
}
return capacity == 0L? null: dfsUsed*100.0/capacity;
}
示例14: convertDatanodeStorageReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
public static DatanodeStorageReportProto convertDatanodeStorageReport(
DatanodeStorageReport report) {
return DatanodeStorageReportProto.newBuilder()
.setDatanodeInfo(convert(report.getDatanodeInfo()))
.addAllStorageReports(convertStorageReports(report.getStorageReports()))
.build();
}
示例15: convertDatanodeStorageReports
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; //导入依赖的package包/类
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
DatanodeStorageReport[] reports) {
final List<DatanodeStorageReportProto> protos
= new ArrayList<DatanodeStorageReportProto>(reports.length);
for(int i = 0; i < reports.length; i++) {
protos.add(convertDatanodeStorageReport(reports[i]));
}
return protos;
}