本文整理汇总了Java中org.apache.hadoop.http.HttpServer.addServlet方法的典型用法代码示例。如果您正苦于以下问题:Java HttpServer.addServlet方法的具体用法?Java HttpServer.addServlet怎么用?Java HttpServer.addServlet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.http.HttpServer
的用法示例。
在下文中一共展示了HttpServer.addServlet方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
public void setUp() throws Exception {
new File(System.getProperty("build.webapps", "build/webapps") + "/test"
).mkdirs();
server = new HttpServer("test", "0.0.0.0", 0, true);
server.addServlet("delay", "/delay", DelayServlet.class);
server.addServlet("jobend", "/jobend", JobEndServlet.class);
server.addServlet("fail", "/fail", FailServlet.class);
server.start();
int port = server.getPort();
baseUrl = new URL("http://localhost:" + port + "/");
JobEndServlet.calledTimes = 0;
JobEndServlet.requestUri = null;
DelayServlet.calledTimes = 0;
FailServlet.calledTimes = 0;
}
示例2: serviceStart
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
protected void serviceStart() throws Exception {
try {
proxyServer = new HttpServer("proxy", bindAddress, port,
port == 0, getConfig(), acl);
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
proxyServer.start();
} catch (IOException e) {
LOG.fatal("Could not start proxy web server",e);
throw new YarnRuntimeException("Could not start proxy web server",e);
}
super.serviceStart();
}
示例3: testImageTransferTimeout
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Test to verify the read timeout
*/
@Test(timeout = 5000)
public void testImageTransferTimeout() throws Exception {
HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
try {
testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
testServer.start();
URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
TransferFsImage.timeout = 2000;
try {
TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
null, false);
fail("TransferImage Should fail with timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read should timeout", "Read timed out", e.getMessage());
}
} finally {
if (testServer != null) {
testServer.stop();
}
}
}
示例4: testReadURL
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Test
public void testReadURL() throws Exception {
// Start a simple web server which hosts the log data.
HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
server.start();
try {
server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class);
URL url = new URL("http://localhost:" + server.getPort() + "/fakeLog");
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
false);
// Read the edit log and verify that we got all of the data.
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts =
FSImageTestUtil.countEditLogOpTypes(elis);
assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
// Check that length header was picked up.
assertEquals(FAKE_LOG_DATA.length, elis.length());
elis.close();
} finally {
server.stop();
}
}
示例5: setUp
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test");
System.out.println("dir="+dir.getAbsolutePath());
if(!dir.exists()) {
assertTrue(dir.mkdirs());
}
server = new HttpServer("test", "0.0.0.0", 0, true);
server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class);
server.setAttribute(JobTracker.SHUFFLE_SSL_ENABLED_KEY, false);
server.start();
int port = server.getPort();
baseUrl = new URL("http://localhost:" + port + "/");
}
示例6: start
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
public synchronized void start() throws IOException {
if (started) return;
super.start();
// initialize our queues from the config settings
if (null == schedConf) {
schedConf = new CapacitySchedulerConf();
}
// Initialize queues
QueueManager queueManager = taskTrackerManager.getQueueManager();
Set<String> queueNames = queueManager.getQueues();
initialize(queueManager, parseQueues(queueNames, schedConf),
getConf(), schedConf);
// listen to job changes
taskTrackerManager.addJobInProgressListener(jobQueuesManager);
//Start thread for initialization
if (initializationPoller == null) {
this.initializationPoller = new JobInitializationPoller(
jobQueuesManager, schedConf, queueNames, taskTrackerManager);
}
initializationPoller.init(queueNames.size(), schedConf);
initializationPoller.setDaemon(true);
initializationPoller.start();
if (taskTrackerManager instanceof JobTracker) {
JobTracker jobTracker = (JobTracker) taskTrackerManager;
HttpServer infoServer = jobTracker.infoServer;
infoServer.setAttribute("scheduler", this);
infoServer.addServlet("scheduler", "/scheduler",
CapacitySchedulerServlet.class);
}
started = true;
LOG.info("Capacity scheduler initialized " + queueNames.size() + " queues");
}
示例7: startHttpServer
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private static HttpServer startHttpServer() throws Exception {
new File(System.getProperty(
"build.webapps", "build/webapps") + "/test").mkdirs();
HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
server.addServlet("jobend", "/jobend", JobEndServlet.class);
server.start();
JobEndServlet.calledTimes = 0;
JobEndServlet.requestUri = null;
JobEndServlet.baseUrl = "http://localhost:" + server.getPort() + "/";
JobEndServlet.foundJobState = null;
return server;
}
示例8: setUp
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test");
System.out.println("dir="+dir.getAbsolutePath());
if(!dir.exists()) {
assertTrue(dir.mkdirs());
}
server = new HttpServer("test", "0.0.0.0", 0, true);
server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class);
server.start();
int port = server.getPort();
baseUrl = new URL("http://localhost:" + port + "/");
}
示例9: start
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
public void start() throws IOException {
Configuration conf = getConf();
QueueManager queueManager = taskTrackerManager.getQueueManager();
allocations = new Allocations(conf,queueManager);
scheduler = ReflectionUtils.newInstance(
conf.getClass(PrioritySchedulerOptions.DYNAMIC_SCHEDULER_SCHEDULER,
PriorityScheduler.class, QueueTaskScheduler.class), conf);
scheduler.setAllocator(allocations);
scheduler.setConf(conf);
scheduler.setTaskTrackerManager(taskTrackerManager);
scheduler.start();
long interval = conf.getLong(PrioritySchedulerOptions.DYNAMIC_SCHEDULER_ALLOC_INTERVAL,20)*1000;
timer.scheduleAtFixedRate(allocations, interval, interval);
for (String queue: queueManager.getLeafQueueNames()) {
Object info = queueManager.getSchedulerInfo(queue);
QueueInfo queueInfo = new QueueInfo(queue, info, allocations);
queueManager.setSchedulerInfo(queue, queueInfo);
}
if (taskTrackerManager instanceof JobTracker) {
JobTracker jobTracker = (JobTracker) taskTrackerManager;
HttpServer infoServer = jobTracker.infoServer;
infoServer.setAttribute("scheduler", this);
infoServer.addServlet("scheduler", "/scheduler",
DynamicPriorityServlet.class);
}
}
示例10: ProxyJobTracker
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
public ProxyJobTracker(CoronaConf conf) throws IOException {
this.conf = conf;
fs = FileSystem.get(conf);
String infoAddr =
conf.get("mapred.job.tracker.corona.proxyaddr", "0.0.0.0:0");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
String infoBindAddress = infoSocAddr.getHostName();
int port = infoSocAddr.getPort();
LOCALMACHINE = infoBindAddress;
startTime = getClock().getTime();
CoronaConf coronaConf = new CoronaConf(conf);
InetSocketAddress rpcSockAddr = NetUtils.createSocketAddr(
coronaConf.getProxyJobTrackerAddress());
rpcServer = RPC.getServer(
this,
rpcSockAddr.getHostName(),
rpcSockAddr.getPort(),
conf.getInt("corona.proxy.job.tracker.handler.count", 10),
false,
conf);
rpcServer.start();
LOG.info("ProxyJobTracker RPC Server up at " +
rpcServer.getListenerAddress());
infoServer = new HttpServer("proxyjt", infoBindAddress, port,
port == 0, conf);
infoServer.setAttribute("proxy.job.tracker", this);
infoServer.setAttribute("conf", conf);
infoServer.addServlet("proxy", "/proxy",
ProxyJobTrackerServlet.class);
// initialize history parameters.
JobConf jobConf = new JobConf(conf);
boolean historyInitialized = JobHistory.init(
this, jobConf, this.LOCALMACHINE, this.startTime);
if (historyInitialized) {
JobHistory.initDone(jobConf, fs);
String historyLogDir =
JobHistory.getCompletedJobHistoryLocation().toString();
FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
infoServer.setAttribute("historyLogDir", historyLogDir);
infoServer.setAttribute("fileSys", historyFS);
}
infoServer.start();
LOCALPORT = infoServer.getPort();
context = MetricsUtil.getContext("mapred");
metricsRecord = MetricsUtil.createRecord(context, "proxyjobtracker");
context.registerUpdater(this);
expireUnusedFilesInCache = new ExpireUnusedFilesInCache(
conf, getClock(), new Path(getSystemDir()), fs);
sessionHistoryManager = new SessionHistoryManager();
sessionHistoryManager.setConf(conf);
try {
String target = conf.getProxyJobTrackerThriftAddress();
InetSocketAddress addr = NetUtils.createSocketAddr(target);
LOG.info("Trying to start the Thrift Server at: " + target);
ServerSocket serverSocket = new ServerSocket(addr.getPort());
server = new TServerThread(
TFactoryBasedThreadPoolServer.createNewServer(
new CoronaProxyJobTrackerService.Processor(this),
serverSocket,
5000));
server.start();
LOG.info("Thrift server started on: " + target);
} catch (IOException e) {
LOG.info("Exception while starting the Thrift Server on CPJT: ", e);
}
}
示例11: start
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
public void start() {
try {
Configuration conf = getConf();
this.eagerInitListener = new EagerTaskInitializationListener(conf);
eagerInitListener.setTaskTrackerManager(taskTrackerManager);
eagerInitListener.start();
taskTrackerManager.addJobInProgressListener(eagerInitListener);
taskTrackerManager.addJobInProgressListener(jobListener);
poolMgr = new PoolManager(conf);
loadMgr = (LoadManager) ReflectionUtils.newInstance(
conf.getClass("mapred.fairscheduler.loadmanager",
CapBasedLoadManager.class, LoadManager.class), conf);
loadMgr.setTaskTrackerManager(taskTrackerManager);
loadMgr.start();
taskSelector = (TaskSelector) ReflectionUtils.newInstance(
conf.getClass("mapred.fairscheduler.taskselector",
DefaultTaskSelector.class, TaskSelector.class), conf);
taskSelector.setTaskTrackerManager(taskTrackerManager);
taskSelector.start();
Class<?> weightAdjClass = conf.getClass(
"mapred.fairscheduler.weightadjuster", null);
if (weightAdjClass != null) {
weightAdjuster = (WeightAdjuster) ReflectionUtils.newInstance(
weightAdjClass, conf);
}
assignMultiple = conf.getBoolean("mapred.fairscheduler.assignmultiple",
false);
sizeBasedWeight = conf.getBoolean("mapred.fairscheduler.sizebasedweight",
false);
initialized = true;
running = true;
lastUpdateTime = clock.getTime();
// Start a thread to update deficits every UPDATE_INTERVAL
if (runBackgroundUpdates)
new UpdateThread().start();
// Register servlet with JobTracker's Jetty server
if (taskTrackerManager instanceof JobTracker) {
JobTracker jobTracker = (JobTracker) taskTrackerManager;
HttpServer infoServer = jobTracker.infoServer;
infoServer.setAttribute("scheduler", this);
infoServer.addServlet("scheduler", "/scheduler",
FairSchedulerServlet.class);
}
} catch (Exception e) {
// Can't load one of the managers - crash the JobTracker now while it is
// starting up so that the user notices.
throw new RuntimeException("Failed to start FairScheduler", e);
}
LOG.info("Successfully configured FairScheduler");
}