本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode方法的典型用法代码示例。如果您正苦于以下问题:Java NameNode.createNameNode方法的具体用法?Java NameNode.createNameNode怎么用?Java NameNode.createNameNode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.NameNode
的用法示例。
在下文中一共展示了NameNode.createNameNode方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: restartNameNode
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Restart the namenode at a given index. Optionally wait for the cluster
* to become active.
*/
public synchronized void restartNameNode(int nnIndex, boolean waitActive,
String... args) throws IOException {
String nameserviceId = nameNodes[nnIndex].nameserviceId;
String nnId = nameNodes[nnIndex].nnId;
StartupOption startOpt = nameNodes[nnIndex].startOpt;
Configuration conf = nameNodes[nnIndex].conf;
shutdownNameNode(nnIndex);
if (args.length != 0) {
startOpt = null;
} else {
args = createArgs(startOpt);
}
NameNode nn = NameNode.createNameNode(args, conf);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt,
conf);
if (waitActive) {
waitClusterUp();
LOG.info("Restarted the namenode");
waitActive();
}
}
示例2: startNameNode
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Start the namenode.
*/
public NameNode startNameNode(boolean withService) throws IOException {
hdfsDir = new File(TEST_DATA_DIR, "dfs");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
config = new HdfsConfiguration();
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name1")).toString());
FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST);
if (withService) {
NameNode.setServiceAddress(config, THIS_HOST);
}
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
DFSTestUtil.formatNameNode(config);
String[] args = new String[] {};
// NameNode will modify config with the ports it bound to
return NameNode.createNameNode(args, config);
}
示例3: startBackupNode
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Start the BackupNode
*/
public BackupNode startBackupNode(Configuration conf) throws IOException {
// Set up testing environment directories
hdfsDir = new File(TEST_DATA_DIR, "backupNode");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
File currDir = new File(hdfsDir, "name2");
File currDir2 = new File(currDir, "current");
File currDir3 = new File(currDir, "image");
assertTrue(currDir.mkdirs());
assertTrue(currDir2.mkdirs());
assertTrue(currDir3.mkdirs());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
// Start BackupNode
String[] args = new String [] { StartupOption.BACKUP.getName() };
BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
return bu;
}
示例4: canStartNameNode
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Check whether the namenode can be started.
*/
private boolean canStartNameNode(Configuration conf) throws IOException {
NameNode nn2 = null;
try {
nn2 = NameNode.createNameNode(new String[]{}, conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
stopNameNode(nn2);
}
return true;
}
示例5: createNameNode
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
private void createNameNode(int nnIndex, Configuration conf,
int numDataNodes, boolean format, StartupOption operation,
String clusterId, String nameserviceId,
String nnId)
throws IOException {
// Format and clean out DataNode directories
if (format) {
DFSTestUtil.formatNameNode(conf);
}
if (operation == StartupOption.UPGRADE){
operation.setClusterId(clusterId);
}
// Start the NameNode after saving the default file system.
String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY);
String[] args = createArgs(operation);
NameNode nn = NameNode.createNameNode(args, conf);
if (operation == StartupOption.RECOVER) {
return;
}
// After the NN has started, set back the bound ports into
// the conf
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
if (nn.getHttpAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
}
if (nn.getHttpsAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
}
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
DFS_NAMENODE_HTTP_ADDRESS_KEY);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
operation, new Configuration(conf));
// Restore the default fs name
if (originalDefaultFs == null) {
conf.set(FS_DEFAULT_NAME_KEY, "");
} else {
conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs);
}
}