当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeReportType类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType的典型用法代码示例。如果您正苦于以下问题:Java DatanodeReportType类的具体用法?Java DatanodeReportType怎么用?Java DatanodeReportType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeReportType类属于org.apache.hadoop.hdfs.protocol.HdfsConstants包,在下文中一共展示了DatanodeReportType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doGet

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws IOException {
  @SuppressWarnings("unchecked")
  final Map<String,String[]> pmap = request.getParameterMap();
  final PrintWriter out = response.getWriter();
  final InetAddress remoteAddress = 
    InetAddress.getByName(request.getRemoteAddr());
  final ServletContext context = getServletContext();    
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);

  final UserGroupInformation ugi = getUGI(request, conf);
  try {
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
        
        final FSNamesystem namesystem = nn.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final int totalDatanodes = 
            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
        new NamenodeFsck(conf, nn,
            bm.getDatanodeManager().getNetworkTopology(), pmap, out,
            totalDatanodes, remoteAddress).fsck();
        
        return null;
      }
    });
  } catch (InterruptedException e) {
    response.sendError(400, e.getMessage());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FsckServlet.java

示例2: datanodeReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
DatanodeInfo[] datanodeReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

    DatanodeInfo[] arr = new DatanodeInfo[results.size()];
    for (int i=0; i<arr.length; i++) {
      arr[i] = new DatanodeInfo(results.get(i));
    }
    return arr;
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FSNamesystem.java

示例3: getDatanodeStorageReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);

    DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
    for (int i = 0; i < reports.length; i++) {
      final DatanodeDescriptor d = datanodes.get(i);
      reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
          d.getStorageReports());
    }
    return reports;
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSNamesystem.java

示例4: assertReports

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);
  
  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());
    
    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDatanodeReport.java

示例5: waitFirstBRCompleted

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Wait until the given namenode gets first block reports from all the datanodes */
public void waitFirstBRCompleted(int nnIndex, int timeout) throws
    IOException, TimeoutException, InterruptedException {
  if (namenodes.size() == 0 || getNN(nnIndex) == null || getNN(nnIndex).nameNode == null) {
    return;
  }

  final FSNamesystem ns = getNamesystem(nnIndex);
  final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      List<DatanodeDescriptor> nodes = dm.getDatanodeListForReport
          (DatanodeReportType.LIVE);
      for (DatanodeDescriptor node : nodes) {
        if (!node.checkBlockReportReceived()) {
          return false;
        }
      }
      return true;
    }
  }, 100, timeout);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:MiniDFSCluster.java

示例6: getNumberOfDataDirsPerHost

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){
	HashMap<String, Integer> disksPerHost = new HashMap<>();
	
	try {
		@SuppressWarnings("resource")
		DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());
		
		DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL);
		
		for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
			disksPerHost.put(
					datanodeStorageReport.getDatanodeInfo().getHostName(),
					datanodeStorageReport.getStorageReports().length);
			
		}
	} catch (IOException e) {
		LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies).");
	}
	
	return disksPerHost;
}
 
开发者ID:cerndb,项目名称:hdfs-metadata,代码行数:22,代码来源:DistributedFileSystemMetadata.java

示例7: datanodeReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
  try {
    return namenode.getDatanodeReport(type);
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DFSClient.java

示例8: getDatanodeStorageReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkOpen();
  TraceScope scope =
      Trace.startSpan("datanodeStorageReport", traceSampler);
  try {
    return namenode.getDatanodeStorageReport(type);
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:DFSClient.java

示例9: getNumberOfDatanodes

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
int getNumberOfDatanodes(DatanodeReportType type) {
  readLock();
  try {
    return getBlockManager().getDatanodeManager().getDatanodeListForReport(
        type).size(); 
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:FSNamesystem.java

示例10: getDatanodeReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
  checkNNStartup();
  DatanodeInfo results[] = namesystem.datanodeReport(type);
  return results;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:NameNodeRpcServer.java

示例11: getDatanodeStorageReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkNNStartup();
  final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
  return reports;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:NameNodeRpcServer.java

示例12: getDecommissioningNodes

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** @return list of datanodes where decommissioning is in progress. */
public List<DatanodeDescriptor> getDecommissioningNodes() {
  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  // A decommissioning DN may be "alive" or "dead".
  return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DatanodeManager.java

示例13: fetchDatanodes

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live, 
    final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
  if (live == null && dead == null) {
    throw new HadoopIllegalArgumentException("Both live and dead lists are null");
  }

  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  final List<DatanodeDescriptor> results =
      getDatanodeListForReport(DatanodeReportType.ALL);
  for(DatanodeDescriptor node : results) {
    if (isDatanodeDead(node)) {
      if (dead != null) {
        dead.add(node);
      }
    } else {
      if (live != null) {
        live.add(node);
      }
    }
  }
  
  if (removeDecommissionNode) {
    if (live != null) {
      removeDecomNodeFromList(live);
    }
    if (dead != null) {
      removeDecomNodeFromList(dead);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DatanodeManager.java

示例14: convert

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
public static DatanodeReportTypeProto
  convert(DatanodeReportType t) {
  switch (t) {
  case ALL: return DatanodeReportTypeProto.ALL;
  case LIVE: return DatanodeReportTypeProto.LIVE;
  case DEAD: return DatanodeReportTypeProto.DEAD;
  case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
  default: 
    throw new IllegalArgumentException("Unexpected data type report:" + t);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:PBHelper.java

示例15: getDatanodeReport

import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; //导入依赖的package包/类
@Override
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
    throws IOException {
  GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
      .newBuilder()
      .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convert(
        rpcProxy.getDatanodeReport(null, req).getDiList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:ClientNamenodeProtocolTranslatorPB.java


注:本文中的org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。