本文整理汇总了Java中org.apache.hadoop.http.HttpServer.addInternalServlet方法的典型用法代码示例。如果您正苦于以下问题:Java HttpServer.addInternalServlet方法的具体用法?Java HttpServer.addInternalServlet怎么用?Java HttpServer.addInternalServlet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.http.HttpServer
的用法示例。
在下文中一共展示了HttpServer.addInternalServlet方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupServlets
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private static void setupServlets(HttpServer httpServer, Configuration conf) {
httpServer.addInternalServlet("startupProgress",
StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
httpServer.addInternalServlet("getDelegationToken",
GetDelegationTokenServlet.PATH_SPEC,
GetDelegationTokenServlet.class, true);
httpServer.addInternalServlet("renewDelegationToken",
RenewDelegationTokenServlet.PATH_SPEC,
RenewDelegationTokenServlet.class, true);
httpServer.addInternalServlet("cancelDelegationToken",
CancelDelegationTokenServlet.PATH_SPEC,
CancelDelegationTokenServlet.class, true);
httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
true);
httpServer.addInternalServlet("getimage", "/getimage",
GetImageServlet.class, true);
httpServer.addInternalServlet("listPaths", "/listPaths/*",
ListPathsServlet.class, false);
httpServer.addInternalServlet("data", "/data/*",
FileDataServlet.class, false);
httpServer.addInternalServlet("checksum", "/fileChecksum/*",
FileChecksumServlets.RedirectServlet.class, false);
httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
ContentSummaryServlet.class, false);
}
示例2: setupServlets
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private static void setupServlets(HttpServer httpServer, Configuration conf) {
httpServer.addInternalServlet("getDelegationToken",
GetDelegationTokenServlet.PATH_SPEC, GetDelegationTokenServlet.class,
true);
httpServer.addInternalServlet("renewDelegationToken",
RenewDelegationTokenServlet.PATH_SPEC,
RenewDelegationTokenServlet.class, true);
httpServer.addInternalServlet("cancelDelegationToken",
CancelDelegationTokenServlet.PATH_SPEC,
CancelDelegationTokenServlet.class, true);
httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true);
httpServer
.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class,
false);
httpServer
.addInternalServlet("data", "/data/*", FileDataServlet.class, false);
httpServer.addInternalServlet("checksum", "/fileChecksum/*",
FileChecksumServlets.RedirectServlet.class, false);
httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
ContentSummaryServlet.class, false);
}
示例3: initialize
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize checkpoint.
*/
private void initialize(Configuration conf) throws IOException {
// Create connection to the namenode.
shouldRun = true;
// Initialize other scheduling parameters from the configuration
checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
// Pull out exact http address for posting url to avoid ip aliasing issues
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
HttpServer httpServer = backupNode.httpServer;
httpServer.setAttribute("name.system.image", getFSImage());
httpServer.setAttribute("name.conf", conf);
httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
LOG.info("Checkpoint Period : " + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.info("Log Size Trigger : " + checkpointSize + " bytes " +
"(" + checkpointSize/1024 + " KB)");
}
示例4: initHttpServer
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
protected void initHttpServer(JobConf conf,
boolean useNettyMapOutputs) throws IOException {
String infoAddr =
NetUtils.getServerAddress(conf,
"tasktracker.http.bindAddress",
"tasktracker.http.port",
"mapred.task.tracker.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
String httpBindAddress = infoSocAddr.getHostName();
int httpPort = infoSocAddr.getPort();
server = new HttpServer("task", httpBindAddress, httpPort,
httpPort == 0, conf);
workerThreads = conf.getInt("tasktracker.http.threads", 40);
server.setThreads(1, workerThreads);
// let the jsp pages get to the task tracker, config, and other relevant
// objects
FileSystem local = FileSystem.getLocal(conf);
this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
server.setAttribute("task.tracker", this);
server.setAttribute("local.file.system", local);
server.setAttribute("conf", conf);
server.setAttribute("log", LOG);
server.setAttribute("localDirAllocator", localDirAllocator);
server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
server.setAttribute(ReconfigurationServlet.
CONF_SERVLET_RECONFIGURABLE_PREFIX + "/ttconfchange",
TaskTracker.this);
server.setAttribute("nettyMapOutputHttpPort", nettyMapOutputHttpPort);
server.addInternalServlet("reconfiguration", "/ttconfchange",
ReconfigurationServlet.class);
server.addInternalServlet(
"mapOutput", "/mapOutput", MapOutputServlet.class);
server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class);
server.start();
this.httpPort = server.getPort();
checkJettyPort();
}
示例5: start
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
final InetSocketAddress bindAddr = getAddress(conf);
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
.get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
bindAddr.getHostName()));
int tmpInfoPort = bindAddr.getPort();
httpServer = new HttpServer("journal", bindAddr.getHostName(),
tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
.get(DFS_ADMIN, " "))) {
{
if (UserGroupInformation.isSecurityEnabled()) {
initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFS_JOURNALNODE_KEYTAB_FILE_KEY);
}
}
};
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal",
GetJournalEditServlet.class, true);
httpServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = httpServer.getPort();
LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
示例6: start
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
final InetSocketAddress bindAddr = getAddress(conf);
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
.get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
bindAddr.getHostName()));
int tmpInfoPort = bindAddr.getPort();
httpServer = new HttpServer("journal", bindAddr.getHostName(),
tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
.get(DFS_ADMIN, " "))) {
{
if (UserGroupInformation.isSecurityEnabled()) {
initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY));
}
}
};
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal",
GetJournalEditServlet.class, true);
httpServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = httpServer.getPort();
LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
示例7: initSecondary
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize the webserver so that the primary namenode can fetch
* transaction logs from standby via http.
*/
void initSecondary(Configuration conf) throws IOException {
nameNodeAddr = avatarNode.getRemoteNamenodeAddress(conf);
this.primaryNamenode =
(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nameNodeAddr, conf);
fsName = avatarNode.getRemoteNamenodeHttpName(conf);
// Initialize other scheduling parameters from the configuration
checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
infoBindAddress = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", fsImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " +
"(" + checkpointSize/1024 + " KB)");
}
示例8: initSecondary
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize the webserver so that the primary namenode can fetch
* transaction logs from standby via http.
*/
void initSecondary(Configuration conf) throws IOException {
fsName = AvatarNode.getRemoteNamenodeHttpName(conf,
avatarNode.getInstanceId());
// Initialize other scheduling parameters from the configuration
checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);
delayedScheduledCheckpointTime = conf.getBoolean("fs.checkpoint.delayed",
false) ? AvatarNode.now() + checkpointPeriod * 1000 : 0;
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", fsImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
avatarNode.httpServer.setAttribute("avatar.node", avatarNode);
avatarNode.httpServer.addInternalServlet("outstandingnodes",
"/outstandingnodes", OutStandingDatanodesServlet.class);
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
if (delayedScheduledCheckpointTime > 0) {
LOG.warn("Standby: Checkpointing will be delayed by: " + checkpointPeriod + " seconds");
}
LOG.warn("Log Size Trigger :" + checkpointTxnCount + " transactions.");
}
示例9: initialize
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize SecondaryNameNode.
*/
private void initialize(Configuration conf) throws IOException {
// initiate Java VM metrics
JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getClientProtocolAddress(conf);
this.conf = conf;
this.namenode =
(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nameNodeAddr, conf);
this.namenode.register();
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = getFileStorageDirs(NNStorageConfiguration
.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"));
checkpointEditsDirs = getFileStorageDirs(NNStorageConfiguration
.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"));
checkpointImage = new CheckpointStorage(conf);
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
// Initialize other scheduling parameters from the configuration
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", checkpointImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.warn("Log Size Trigger :" + checkpointTxnCount + " transactions ");
}
示例10: initialize
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize SecondaryNameNode.
* @param commandLineOpts
*/
private void initialize(final Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
final InetSocketAddress infoSocAddr = getHttpAddress(conf);
infoBindAddress = infoSocAddr.getHostName();
UserGroupInformation.setConfiguration(conf);
if (UserGroupInformation.isSecurityEnabled()) {
SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
}
// initiate Java VM metrics
DefaultMetricsSystem.initialize("SecondaryNameNode");
JvmMetrics.create("SecondaryNameNode",
conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getServiceAddress(conf, true);
this.conf = conf;
this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr,
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = FSImage.getCheckpointDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
checkpointImage.deleteTempEdits();
namesystem = new FSNamesystem(conf, checkpointImage, true);
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
// initialize the webserver for uploading files.
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
tmpInfoPort == 0, conf,
new AccessControlList(conf.get(DFS_ADMIN, " "))) {
{
if (UserGroupInformation.isSecurityEnabled()) {
initSpnego(
conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
}
}
};
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
infoServer.addInternalServlet("getimage", "/getimage",
GetImageServlet.class, true);
infoServer.start();
LOG.info("Web server init done");
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
"(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
}
示例11: initialize
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize SecondaryNameNode.
*/
private void initialize(Configuration conf) throws IOException {
// initiate Java VM metrics
JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getAddress(conf);
this.conf = conf;
this.namenode =
(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nameNodeAddr, conf);
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = FSImage.getCheckpointDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointImage = new CheckpointStorage(conf);
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
// Initialize other scheduling parameters from the configuration
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
infoBindAddress = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", checkpointImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " +
"(" + checkpointSize/1024 + " KB)");
}
示例12: initialize
import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
* Initialize SecondaryNameNode.
*/
private void initialize(Configuration conf) throws IOException {
// initiate Java VM metrics
JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getAddress(conf);
this.conf = conf;
this.namenode =
(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nameNodeAddr, conf);
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = FSImage.getCheckpointDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointImage = new CheckpointStorage();
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
// Initialize other scheduling parameters from the configuration
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
infoBindAddress = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", checkpointImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " +
"(" + checkpointSize/1024 + " KB)");
}