本文整理汇总了Java中org.apache.hadoop.net.DNSToSwitchMapping类的典型用法代码示例。如果您正苦于以下问题:Java DNSToSwitchMapping类的具体用法?Java DNSToSwitchMapping怎么用?Java DNSToSwitchMapping使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DNSToSwitchMapping类属于org.apache.hadoop.net包,在下文中一共展示了DNSToSwitchMapping类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
public synchronized static void init(Configuration conf) {
if (initCalled) {
return;
} else {
initCalled = true;
}
Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
conf.getClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
ScriptBasedMapping.class,
DNSToSwitchMapping.class);
try {
DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
dnsToSwitchMappingClass, conf);
// Wrap around the configured class with the Cached implementation so as
// to save on repetitive lookups.
// Check if the impl is already caching, to avoid double caching.
dnsToSwitchMapping =
((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
: new CachedDNSToSwitchMapping(newInstance));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例2: testCaching
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test
public void testCaching() {
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
try {
InetAddress iaddr = InetAddress.getByName("host1");
MyResolver.resolvedHost1 = iaddr.getHostAddress();
} catch (UnknownHostException e) {
// Ignore if not found
}
Node node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve(invalidHost);
Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
示例3: testFillInRacks
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test
public void testFillInRacks() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request =
new ContainerRequest(capability, new String[] {"host1", "host2"},
new String[] {"/rack2"}, Priority.newInstance(1));
client.addContainerRequest(request);
verifyResourceRequest(client, request, "host1", true);
verifyResourceRequest(client, request, "host2", true);
verifyResourceRequest(client, request, "/rack1", true);
verifyResourceRequest(client, request, "/rack2", true);
verifyResourceRequest(client, request, ResourceRequest.ANY, true);
}
示例4: testDifferentLocalityRelaxationSamePriority
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, new String[] {"host3"},
null, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例5: testLocalityRelaxationDifferentLevels
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, null,
new String[] {"rack1"}, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例6: beforeAllTests
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@BeforeClass
public static void beforeAllTests() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
loadBalancer = new MockBalancer();
loadBalancer.setConf(conf);
MasterServices st = Mockito.mock(MasterServices.class);
Mockito.when(st.getServerName()).thenReturn(master);
loadBalancer.setMasterServices(st);
// Set up the rack topologies (5 machines per rack)
rackManager = Mockito.mock(RackManager.class);
for (int i = 0; i < NUM_SERVERS; i++) {
servers[i] = ServerName.valueOf("foo"+i+":1234",-1);
if (i < 5) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1");
}
if (i >= 5 && i < 10) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2");
}
if (i >= 10) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3");
}
}
}
示例7: testFillInRacks
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test
public void testFillInRacks() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest request =
new ContainerRequest(capability, new String[] {"host1", "host2"},
new String[] {"/rack2"}, Priority.newInstance(1));
client.addContainerRequest(request);
verifyResourceRequest(client, request, "host1", true);
verifyResourceRequest(client, request, "host2", true);
verifyResourceRequest(client, request, "/rack1", true);
verifyResourceRequest(client, request, "/rack2", true);
verifyResourceRequest(client, request, ResourceRequest.ANY, true);
}
示例8: testDifferentLocalityRelaxationSamePriority
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, new String[] {"host3"},
null, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例9: testLocalityRelaxationDifferentLevels
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, null,
new String[] {"rack1"}, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例10: run
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
String localPath = System.getProperty("test.build.data",
"build/test/mapred/local");
File f = new File(localPath).getAbsoluteFile();
jc.set("mapred.local.dir", f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
final String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
if (ugi == null) {
ugi = UserGroupInformation.getCurrentUser();
}
tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
public JobTracker run() throws InterruptedException, IOException {
return JobTracker.startTracker(jc, id);
}
});
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例11: setUp
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
conf.set("mapreduce.jobtracker.address", "localhost:0");
conf.set("mapreduce.jobtracker.http.address", "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class,
DNSToSwitchMapping.class);
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
mrCluster = new MiniMRCluster(numSlaves, dfsCluster.getFileSystem()
.getUri().toString(), 1);
jt = mrCluster.getJobTrackerRunner().getJobTracker();
// Set up the Topology Information
for (int i = 0; i < hosts.length; i++) {
StaticMapping.addNodeToRack(hosts[i], racks[i]);
}
for (String s : trackers) {
FakeObjectUtilities.establishFirstContact(jt, s);
}
}
示例12: run
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
File f = new File("build/test/mapred/local").getAbsoluteFile();
jc.set("mapred.local.dir",f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
tracker = JobTracker.startTracker(jc, id);
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例13: initialize
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
/** {@inheritDoc} */
public void initialize(Configuration conf,
FSClusterStats stats,
NetworkTopology clusterMap,
HostsFileReader hostsReader,
DNSToSwitchMapping dnsToSwitchMapping,
FSNamesystem ns) {
super.initialize(
conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, ns);
this.namesystem = ns;
// Default
this.stripeLen = 0;
this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true);
FSNamesystem.LOG.info("F4: Block placement will consider load: "
+ this.considerLoad);
initParityConfigs();
this.stagingDir = conf.get("dfs.f4.staging", "/staging");
this.localDir = conf.get("dfs.f4.local", "/local");
}
示例14: getInstance
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
/**
* Get an instance of the configured Block Placement Policy based on the
* value of the configuration paramater dfs.block.replicator.classname.
*
* @param conf the configuration to be used
* @param stats an object thatis used to retrieve the load on the cluster
* @param clusterMap the network topology of the cluster
* @param namesystem the FSNamesystem
* @return an instance of BlockPlacementPolicy
*/
public static BlockPlacementPolicy getInstance(Configuration conf,
FSClusterStats stats,
NetworkTopology clusterMap,
HostsFileReader hostsReader,
DNSToSwitchMapping dnsToSwitchMapping,
FSNamesystem namesystem) {
Class<? extends BlockPlacementPolicy> replicatorClass =
conf.getClass("dfs.block.replicator.classname",
BlockPlacementPolicyDefault.class,
BlockPlacementPolicy.class);
BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance(
replicatorClass, conf);
replicator.initialize(conf, stats, clusterMap, hostsReader,
dnsToSwitchMapping, namesystem);
return replicator;
}
示例15: initialize
import org.apache.hadoop.net.DNSToSwitchMapping; //导入依赖的package包/类
/** {@inheritDoc} */
public void initialize(Configuration conf, FSClusterStats stats,
NetworkTopology clusterMap, HostsFileReader hostsReader,
DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem ns) {
super.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, ns);
this.rackWindow = conf.getInt("dfs.replication.rackwindow", 2);
this.machineWindow = conf.getInt("dfs.replication.machineWindow", 5);
this.racks = new ArrayList<String>();
this.hostsReader = hostsReader;
this.dnsToSwitchMapping = dnsToSwitchMapping;
hostsUpdated(true);
if (r == null) {
r = new Random();
}
LOG.info("BlockPlacementPolicyConfigurable initialized");
}