本文整理汇总了Java中org.apache.hadoop.net.NetworkTopology类的典型用法代码示例。如果您正苦于以下问题:Java NetworkTopology类的具体用法?Java NetworkTopology怎么用?Java NetworkTopology使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
NetworkTopology类属于org.apache.hadoop.net包,在下文中一共展示了NetworkTopology类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: OneBlockInfo
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
String[] hosts, String[] topologyPaths) {
this.onepath = path;
this.offset = offset;
this.hosts = hosts;
this.length = len;
assert (hosts.length == topologyPaths.length ||
topologyPaths.length == 0);
// if the file system does not have any rack information, then
// use dummy rack location.
if (topologyPaths.length == 0) {
topologyPaths = new String[hosts.length];
for (int i = 0; i < topologyPaths.length; i++) {
topologyPaths[i] = (new NodeBase(hosts[i],
NetworkTopology.DEFAULT_RACK)).toString();
}
}
// The topology paths have the host name included as the last
// component. Strip it.
this.racks = new String[topologyPaths.length];
for (int i = 0; i < topologyPaths.length; i++) {
this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
}
}
示例2: OneBlockInfo
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
String[] hosts, String[] topologyPaths) {
this.onepath = path;
this.offset = offset;
this.hosts = hosts;
this.length = len;
assert (hosts.length == topologyPaths.length ||
topologyPaths.length == 0);
// if the file system does not have any rack information, then
// use dummy rack location.
if (topologyPaths.length == 0) {
topologyPaths = new String[hosts.length];
for (int i = 0; i < topologyPaths.length; i++) {
topologyPaths[i] = (new NodeBase(hosts[i],
NetworkTopology.DEFAULT_RACK)).toString();
}
}
// The topology paths have the host name included as the last
// component. Strip it.
this.racks = new String[topologyPaths.length];
for (int i = 0; i < topologyPaths.length; i++) {
this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
}
}
示例3: testCaching
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
@Test
public void testCaching() {
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
try {
InetAddress iaddr = InetAddress.getByName("host1");
MyResolver.resolvedHost1 = iaddr.getHostAddress();
} catch (UnknownHostException e) {
// Ignore if not found
}
Node node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve(invalidHost);
Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
示例4: validateLabelsRequests
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
private void validateLabelsRequests(ResourceRequest resourceRequest,
boolean isReduce) {
switch (resourceRequest.getResourceName()) {
case "map":
case "reduce":
case NetworkTopology.DEFAULT_RACK:
Assert.assertNull(resourceRequest.getNodeLabelExpression());
break;
case "*":
Assert.assertEquals(isReduce ? "ReduceNodes" : "MapNodes",
resourceRequest.getNodeLabelExpression());
break;
default:
Assert.fail("Invalid resource location "
+ resourceRequest.getResourceName());
}
}
示例5: createReq
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
private ContainerRequestEvent
createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
boolean earlierFailedAttempt, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
taskAttemptId);
Resource containerNeed = Resource.newInstance(memory, 1);
if (earlierFailedAttempt) {
return ContainerRequestEvent
.createContainerRequestEventForFailedContainer(attemptId,
containerNeed);
}
return new ContainerRequestEvent(attemptId, containerNeed, hosts,
new String[] { NetworkTopology.DEFAULT_RACK });
}
示例6: OneBlockInfo
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
String[] hosts, String[] topologyPaths) {
this.onepath = path;
this.offset = offset;
this.hosts = hosts;
this.length = len;
assert (hosts.length == topologyPaths.length ||
topologyPaths.length == 0);
// if the file system does not have any rack information, then
// use dummy rack location.
if (topologyPaths.length == 0) {
topologyPaths = new String[hosts.length];
for (int i = 0; i < topologyPaths.length; i++) {
topologyPaths[i] = (new NodeBase(hosts[i],
NetworkTopology.DEFAULT_RACK)).toString();
}
}
// The topology paths have the host name included as the last
// component. Strip it.
this.racks = new String[topologyPaths.length];
for (int i = 0; i < topologyPaths.length; i++) {
this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
}
}
示例7: chooseTarget
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
boolean chooseTarget(DBlock db, Source source,
List<StorageType> targetTypes, Matcher matcher) {
final NetworkTopology cluster = dispatcher.getCluster();
for (StorageType t : targetTypes) {
for(StorageGroup target : storages.getTargetStorages(t)) {
if (matcher.match(cluster, source.getDatanodeInfo(),
target.getDatanodeInfo())) {
final PendingMove pm = source.addPendingMove(db, target);
if (pm != null) {
dispatcher.executePendingMove(pm);
return true;
}
}
}
}
return false;
}
示例8: Dispatcher
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
Set<String> excludedNodes, long movedWinWidth, int moverThreads,
int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
this.nnc = nnc;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);
this.cluster = NetworkTopology.getInstance(conf);
this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
this.dispatchExecutor = dispatcherThreads == 0? null
: Executors.newFixedThreadPool(dispatcherThreads);
this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
this.saslClient = new SaslDataTransferClient(conf,
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
示例9: chooseRemoteRack
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
protected void chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine, Set<Node> excludedNodes,
long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
final String rackLocation = NetworkTopology.getFirstHalf(
localMachine.getNetworkLocation());
try {
// randomly choose from remote racks
chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
// fall back to the local rack
chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
rackLocation, excludedNodes, blocksize,
maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
}
}
示例10: checkTargetsOnDifferentNodeGroup
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
/**
* Scan the targets list: all targets should be on different NodeGroups.
* Return false if two targets are found on the same NodeGroup.
*/
private static boolean checkTargetsOnDifferentNodeGroup(
DatanodeStorageInfo[] targets) {
if(targets.length == 0)
return true;
Set<String> targetSet = new HashSet<String>();
for(DatanodeStorageInfo storage:targets) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
String nodeGroup = NetworkTopology.getLastHalf(node.getNetworkLocation());
if(targetSet.contains(nodeGroup)) {
return false;
} else {
targetSet.add(nodeGroup);
}
}
return true;
}
示例11: coreResolve
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
private static Node coreResolve(String hostName) {
List <String> tmpList = new ArrayList<String>(1);
tmpList.add(hostName);
List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
String rName = null;
if (rNameList == null || rNameList.get(0) == null) {
rName = NetworkTopology.DEFAULT_RACK;
if (LOG.isDebugEnabled()) {
LOG.debug("Couldn't resolve " + hostName + ". Falling back to "
+ NetworkTopology.DEFAULT_RACK);
}
} else {
rName = rNameList.get(0);
if (LOG.isDebugEnabled()) {
LOG.debug("Resolved " + hostName + " to " + rName);
}
}
return new NodeBase(hostName, rName);
}
示例12: initialize
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
@Override
public void initialize(Configuration conf, FSClusterStats stats,
NetworkTopology clusterMap,
Host2NodesMap host2datanodeMap) {
this.considerLoad = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
this.considerLoadFactor = conf.getDouble(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
this.stats = stats;
this.clusterMap = clusterMap;
this.host2datanodeMap = host2datanodeMap;
this.heartbeatInterval = conf.getLong(
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;
this.tolerateHeartbeatMultiplier = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT);
this.staleInterval = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
}
示例13: BlockPlacementPolicies
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
public BlockPlacementPolicies(Configuration conf, FSClusterStats stats,
NetworkTopology clusterMap,
Host2NodesMap host2datanodeMap){
final Class<? extends BlockPlacementPolicy> replicatorClass = conf
.getClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT,
BlockPlacementPolicy.class);
replicationPolicy = ReflectionUtils.newInstance(replicatorClass, conf);
replicationPolicy.initialize(conf, stats, clusterMap, host2datanodeMap);
final Class<? extends BlockPlacementPolicy> blockPlacementECClass =
conf.getClass(DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY,
DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_DEFAULT,
BlockPlacementPolicy.class);
ecPolicy = ReflectionUtils.newInstance(blockPlacementECClass, conf);
ecPolicy.initialize(conf, stats, clusterMap, host2datanodeMap);
}
示例14: initialize
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
@Override
public void initialize(Configuration conf, FSClusterStats stats,
NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) {
super.initialize(conf, stats, clusterMap, host2datanodeMap);
float balancedPreferencePercent =
conf.getFloat(
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
LOG.info("Available space block placement policy initialized: "
+ DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+ " = " + balancedPreferencePercent);
if (balancedPreferencePercent > 1.0) {
LOG.warn("The value of "
+ DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+ " is greater than 1.0 but should be in the range 0.0 - 1.0");
}
if (balancedPreferencePercent < 0.5) {
LOG.warn("The value of "
+ DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+ " is less than 0.5 so datanodes with more used percent will"
+ " receive more block allocations.");
}
balancedPreference = (int) (100 * balancedPreferencePercent);
}
示例15: chooseRemoteRack
import org.apache.hadoop.net.NetworkTopology; //导入依赖的package包/类
@Override
protected void chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine, Set<Node> excludedNodes,
long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
final String rackLocation = NetworkTopology.getFirstHalf(
localMachine.getNetworkLocation());
try {
// randomly choose from remote racks
chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
// fall back to the local rack
chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
rackLocation, excludedNodes, blocksize,
maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
}
}