本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.Builder.build方法的典型用法代码示例。如果您正苦于以下问题:Java Builder.build方法的具体用法?Java Builder.build怎么用?Java Builder.build使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSCluster.Builder
的用法示例。
在下文中一共展示了Builder.build方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initMiniHACluster
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入方法依赖的package包/类
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
throws IOException {
Configuration confForMiniDFS = new Configuration();
Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
.nnTopology(new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
))
.numDataNodes(1);
MiniDFSCluster cluster = builder.build();
cluster.waitActive();
NameNode nnode1 = cluster.getNameNode(0);
assertTrue(nnode1.isStandbyState());
NameNode nnode2 = cluster.getNameNode(1);
assertTrue(nnode2.isStandbyState());
cluster.transitionToActive(0);
assertFalse(nnode1.isStandbyState());
return cluster;
}
示例2: initMiniCluster
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入方法依赖的package包/类
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
System.setProperty("test.build.data", "hdfs-test-cluster");
Configuration hconf = new HdfsConfiguration();
for (Entry<String, String> entry : map.entrySet()) {
hconf.set(entry.getKey(), entry.getValue());
}
Builder builder = new MiniDFSCluster.Builder(hconf);
builder.numDataNodes(numDN);
builder.nameNodePort(port);
MiniDFSCluster cluster = builder.build();
return cluster;
}
示例3: initMiniCluster
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入方法依赖的package包/类
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
System.setProperty("test.build.data", "hdfs-test-cluster");
Configuration hconf = new HdfsConfiguration();
for (Entry<String, String> entry : map.entrySet()) {
hconf.set(entry.getKey(), entry.getValue());
}
hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
Builder builder = new MiniDFSCluster.Builder(hconf);
builder.numDataNodes(numDN);
builder.nameNodePort(port);
MiniDFSCluster cluster = builder.build();
return cluster;
}
示例4: initMiniCluster
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入方法依赖的package包/类
private void initMiniCluster(Configuration hconf, int numDataNodes)
throws IOException {
Builder builder = new MiniDFSCluster.Builder(hconf);
builder.numDataNodes(numDataNodes);
builder.nameNodePort(CLUSTER_PORT);
cluster = builder.build();
}
示例5: testInsertWithHDFSDown
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入方法依赖的package包/类
public void testInsertWithHDFSDown() throws Exception {
int clusterPort = AvailablePortHelper.getRandomAvailableTCPPort();
System.setProperty("test.build.data", HDFS_DIR);
Configuration hconf = new HdfsConfiguration();
// hconf.set("hadoop.log.dir", "/tmp/hdfs/logs");
hconf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
Builder builder = new MiniDFSCluster.Builder(hconf);
builder.numDataNodes(2);
builder.nameNodePort(clusterPort);
MiniDFSCluster cluster = builder.build();
Properties props = new Properties();
int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
props.put("mcast-port", String.valueOf(mcastPort));
Connection conn = TestUtil.getConnection(props);
Statement st = conn.createStatement();
st.execute("create schema emp");
st.execute("set schema emp");
addExpectedException(ConnectException.class);
st.execute("create hdfsstore myhdfs namenode 'hdfs://localhost:" + clusterPort
+ "' homedir '" + HDFS_DIR + "' BATCHTIMEINTERVAL 1 milliseconds");
GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
st.execute("create table mytab (col1 int primary key) hdfsstore (myhdfs) eviction by criteria (col1 < 1000) evict incoming");
st.execute("insert into mytab values (5)");
//Wait for data to be flushed to hdfs
Thread.sleep(5000);
//query hdfs, which will open a reader
st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n where col1=5");
cluster.shutdownNameNodes();
// try {
// st.execute("insert into mytab values (118)");
// fail("expected exception in connecting to unavailable HDFS store");
// } catch (SQLException e) {
// if (!"X0Z30".equals(e.getSQLState())) {
// throw e;
// }
// if (!HDFSIOException.class.equals(e.getCause().getClass())) {
// throw e;
// }
// }
cluster.restartNameNode();
cluster.restartDataNodes();
//Wait for namenode to leave safe mode
Thread.sleep(10000);
st.execute("insert into mytab values (118)");
//query hdfs to trigger scan
st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n");
GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
st.execute("drop table mytab");
st.execute("drop hdfsstore myhdfs");
cluster.shutdownDataNodes();
cluster.shutdownNameNodes();
TestUtil.shutDown();
}