当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.resetLastUpdatesWithOffset方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.resetLastUpdatesWithOffset方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.resetLastUpdatesWithOffset方法的具体用法?Java DFSTestUtil.resetLastUpdatesWithOffset怎么用?Java DFSTestUtil.resetLastUpdatesWithOffset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.resetLastUpdatesWithOffset方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testChooseTargetWithStaleNodes

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testChooseTargetWithStaleNodes() throws Exception {
  // Set dataNodes[0] as stale
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], -(staleInterval + 1));
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
  assertTrue(namenode.getNamesystem().getBlockManager()
      .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
  DatanodeStorageInfo[] targets;
  // We set the datanode[0] as stale, thus should choose datanode[1] since
  // datanode[1] is on the same rack with datanode[0] (writer)
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  Set<Node> excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[1]);
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
  targets = chooseTarget(1, chosenNodes, excludedNodes);
  assertEquals(targets.length, 1);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));
  
  // reset
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], 0);
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestReplicationPolicy.java

示例2: testChooseTargetWithHalfStaleNodes

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
 * and when the number of replicas is less or equal to 3, all the healthy
 * datanodes should be returned by the chooseTarget method. When the number 
 * of replicas is 4, a stale node should be included.
 * 
 * @throws Exception
 */
@Test
public void testChooseTargetWithHalfStaleNodes() throws Exception {
  // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
  for (int i = 0; i < 3; i++) {
    DFSTestUtil
        .resetLastUpdatesWithOffset(dataNodes[i], -(staleInterval + 1));
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();

  DatanodeStorageInfo[] targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  // Since we have 6 datanodes total, stale nodes should
  // not be returned until we ask for more than 3 targets
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
  assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));

  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestReplicationPolicy.java

示例3: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test for the chooseReplicaToDelete are processed based on 
 * block locality and free space
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
  final Map<String, List<DatanodeStorageInfo>> rackMap
      = new HashMap<String, List<DatanodeStorageInfo>>();
  
  dataNodes[0].setRemaining(4*1024*1024);
  replicaList.add(storages[0]);
  
  dataNodes[1].setRemaining(3*1024*1024);
  replicaList.add(storages[1]);
  
  dataNodes[2].setRemaining(2*1024*1024);
  replicaList.add(storages[2]);
  
  dataNodes[5].setRemaining(1*1024*1024);
  replicaList.add(storages[5]);
  
  // Refresh the last update time for all the datanodes
  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  
  List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
  List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
  replicator.splitNodesWithRack(replicaList, rackMap, first, second);
  // storages[0] and storages[1] are in first set as their rack has two 
  // replica nodes, while storages[2] and dataNodes[5] are in second set.
  assertEquals(2, first.size());
  assertEquals(2, second.size());
  List<StorageType> excessTypes = new ArrayList<StorageType>();
  {
    // test returning null
    excessTypes.add(StorageType.SSD);
    assertNull(replicator.chooseReplicaToDelete(
        null, null, (short)3, first, second, excessTypes));
  }
  excessTypes.add(StorageType.DEFAULT);
  DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
      null, null, (short)3, first, second, excessTypes);
  // Within first set, storages[1] with less free space
  assertEquals(chosen, storages[1]);

  replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
  assertEquals(0, first.size());
  assertEquals(3, second.size());
  // Within second set, storages[5] with less free space
  excessTypes.add(StorageType.DEFAULT);
  chosen = replicator.chooseReplicaToDelete(
      null, null, (short)2, first, second, excessTypes);
  assertEquals(chosen, storages[5]);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestReplicationPolicy.java

示例4: testInitializeBlockRecovery

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testInitializeBlockRecovery() throws Exception {
  DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
  DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
  DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
  DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
  DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
  DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();

  dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
      (short) 3,
      BlockUCState.UNDER_CONSTRUCTION,
      new DatanodeStorageInfo[] {s1, s2, s3});

  // Recovery attempt #1.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
  blockInfo.initializeBlockRecovery(1);
  BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #2.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(2);
  blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #3.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #4.
  // Reset everything. And again pick DN with most recent heart beat.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestBlockInfoUnderConstruction.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.resetLastUpdatesWithOffset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。