本文整理汇总了Java中org.apache.hadoop.mapred.MiniMRClientClusterFactory.create方法的典型用法代码示例。如果您正苦于以下问题:Java MiniMRClientClusterFactory.create方法的具体用法?Java MiniMRClientClusterFactory.create怎么用?Java MiniMRClientClusterFactory.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.MiniMRClientClusterFactory
的用法示例。
在下文中一共展示了MiniMRClientClusterFactory.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
createKeysAsJson("keys.json");
}
示例2: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
示例3: initCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
示例4: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
createKeysAsJson("keys.json");
}
示例5: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH) +
File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
示例6: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf, false);
createKeysAsJson("keys.json");
}
示例7: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf, false);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
示例8: initCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf,false);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
示例9: startMrCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
private void startMrCluster() throws IOException {
Configuration conf = new JobConf();
FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri());
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
String addr = MiniYARNCluster.getHostname() + ":0";
conf.set(YarnConfiguration.RM_ADDRESS, addr);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr);
m_mrCluster = MiniMRClientClusterFactory.create(
HadoopTestUtils.class,
"MR4CTests",
1, // num node managers
conf
);
// make sure startup is finished
for ( int i=0; i<60; i++ ) {
String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
if ( newAddr.equals(addr) ) {
s_log.warn("MiniYARNCluster startup not complete");
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
} else {
s_log.info("MiniYARNCluster now available at {}", newAddr);
return;
}
}
throw new IOException("MiniYARNCluster taking too long to startup");
}
示例10: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws IOException {
Properties props = new Properties();
InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
props.load(is);
for (Entry<Object, Object> entry : props.entrySet()) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
Map<String, String> envMap = new HashMap<String, String>();
envMap.put("JAVA_HOME", System.getProperty("java.home"));
setEnv(envMap);
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
inDir = new Path(testdir, "in");
outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(inDir, "part_" + i);
createFile(inFiles[i], conf);
}
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(WordCountToolTest.class, 1, new Configuration());
}
示例11: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws IOException {
Properties props = new Properties();
InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
props.load(is);
for (Entry<Object, Object> entry : props.entrySet()) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
Map<String, String> envMap = new HashMap<String, String>();
envMap.put("JAVA_HOME", System.getProperty("java.home"));
setEnv(envMap);
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
inDir = new Path(testdir, "in");
outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(inDir, "part_" + i);
createFile(inFiles[i], conf);
}
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(GrepToolTest.class, 1, new Configuration());
}
示例12: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
Configuration conf = new YarnConfiguration();
cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
cluster.start();
}
示例13: start
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
/**
* Starts DFS and MR clusters, as specified in member-variable options. Also
* writes out configuration and details, if requested.
*
* @throws IOException
* @throws FileNotFoundException
* @throws URISyntaxException
*/
public void start() throws IOException, FileNotFoundException,
URISyntaxException {
if (!noDFS) {
dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
.numDataNodes(numDataNodes).startupOption(dfsOpts).build();
LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort());
}
if (!noMR) {
if (fs == null && dfs != null) {
fs = dfs.getFileSystem().getUri().toString();
} else if (fs == null) {
fs = "file:///tmp/minimr-" + System.nanoTime();
}
FileSystem.setDefaultUri(conf, new URI(fs));
// Instruct the minicluster to use fixed ports, so user will know which
// ports to use when communicating with the cluster.
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.rmPort);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.jhsPort);
mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
conf);
LOG.info("Started MiniMRCluster");
}
if (writeConfig != null) {
FileOutputStream fos = new FileOutputStream(new File(writeConfig));
conf.writeXml(fos);
fos.close();
}
if (writeDetails != null) {
Map<String, Object> map = new TreeMap<String, Object>();
if (dfs != null) {
map.put("namenode_port", dfs.getNameNodePort());
}
if (mr != null) {
map.put("resourcemanager_port", mr.getConfig().get(
YarnConfiguration.RM_ADDRESS).split(":")[1]);
}
FileWriter fw = new FileWriter(new File(writeDetails));
fw.write(new JSON().toJSON(map));
fw.close();
}
}
示例14: start
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
/**
* Starts DFS and MR clusters, as specified in member-variable options. Also
* writes out configuration and details, if requested.
*
* @throws IOException
* @throws FileNotFoundException
* @throws URISyntaxException
*/
public void start() throws IOException, FileNotFoundException,
URISyntaxException {
if (!noDFS) {
dfs = new MiniDFSCluster(nnPort, conf, numDataNodes, true, true,
dfsOpts, null, null);
LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort());
}
if (!noMR) {
if (fs == null && dfs != null) {
fs = dfs.getFileSystem().getUri().toString();
} else if (fs == null) {
fs = "file:///tmp/minimr-" + System.nanoTime();
}
FileSystem.setDefaultUri(conf, new URI(fs));
// Instruct the minicluster to use fixed ports, so user will know which
// ports to use when communicating with the cluster.
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.rmPort);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.jhsPort);
mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
conf);
LOG.info("Started MiniMRCluster");
}
if (writeConfig != null) {
FileOutputStream fos = new FileOutputStream(new File(writeConfig));
conf.writeXml(fos);
fos.close();
}
if (writeDetails != null) {
Map<String, Object> map = new TreeMap<String, Object>();
if (dfs != null) {
map.put("namenode_port", dfs.getNameNodePort());
}
if (mr != null) {
map.put("resourcemanager_port", mr.getConfig().get(
YarnConfiguration.RM_ADDRESS).split(":")[1]);
}
FileWriter fw = new FileWriter(new File(writeDetails));
fw.write(new JSON().toJSON(map));
fw.close();
}
}
示例15: setUpClass
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入方法依赖的package包/类
/**
* Start all required mini clusters.
*/
@BeforeClass
public static void setUpClass() throws Exception {
// Conf dir
if (!new File(confDir).mkdirs()) {
fail("Failed to create config directories.");
}
// HDFS
File minidfsDir = new File("target/minidfs").getAbsoluteFile();
if (!minidfsDir.exists()) {
Assert.assertTrue(minidfsDir.mkdirs());
}
Set<PosixFilePermission> set = new HashSet<>();
set.add(PosixFilePermission.OWNER_EXECUTE);
set.add(PosixFilePermission.OWNER_READ);
set.add(PosixFilePermission.OWNER_WRITE);
set.add(PosixFilePermission.OTHERS_READ);
java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
final Configuration conf = new HdfsConfiguration();
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
miniDFS = new MiniDFSCluster.Builder(conf).build();
miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
miniMR = MiniMRClientClusterFactory.create(BaseHiveIT.class, 1, conf);
writeConfiguration(miniMR.getConfig(), confDir + "/core-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/hdfs-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/mapred-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/yarn-site.xml");
// Configuration for both HMS and HS2
METASTORE_PORT = NetworkUtils.getRandomPort();
HIVE_SERVER_PORT = NetworkUtils.getRandomPort();
final HiveConf hiveConf = new HiveConf(miniDFS.getConfiguration(0), HiveConf.class);
hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=target/metastore_db;create=true");
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, Utils.format("thrift://{}:{}", HOSTNAME, METASTORE_PORT));
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, "localhost");
hiveConf.set("org.jpox.autoCreateSchema", "true");
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
hiveConf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, HIVE_SERVER_PORT);
// Hive metastore
Callable<Void> metastoreService = () -> {
try {
HiveMetaStore.startMetaStore(METASTORE_PORT, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
while(true);
} catch (Throwable e) {
throw new Exception("Error starting metastore", e);
}
};
hiveMetastoreExecutor.submit(metastoreService);
NetworkUtils.waitForStartUp(HOSTNAME, METASTORE_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// HiveServer 2
hiveServer2 = new HiveServer2();
hiveServer2.init(hiveConf);
hiveServer2.start();
writeConfiguration(hiveServer2.getHiveConf(), confDir + "/hive-site.xml");
NetworkUtils.waitForStartUp(HOSTNAME, HIVE_SERVER_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// JDBC Connection to Hive
Class.forName(HIVE_JDBC_DRIVER);
hiveConnection = HiveMetastoreUtil.getHiveConnection(
getHiveJdbcUrl(),
HadoopSecurityUtil.getLoginUser(conf),
Collections.emptyList()
);
// And finally we're initialized
isHiveInitialized = true;
}