本文整理汇总了Java中org.apache.hadoop.hbase.IntegrationTestingUtility类的典型用法代码示例。如果您正苦于以下问题:Java IntegrationTestingUtility类的具体用法?Java IntegrationTestingUtility怎么用?Java IntegrationTestingUtility使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IntegrationTestingUtility类属于org.apache.hadoop.hbase包,在下文中一共展示了IntegrationTestingUtility类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
int ret = ToolRunner.run(conf, new IntegrationTestReplication(), args);
System.exit(ret);
}
示例2: runTestFromCommandLine
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
@Override
public int runTestFromCommandLine() throws Exception {
IntegrationTestingUtility.setUseDistributedCluster(getConf());
int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
// create HTableDescriptor for specified table
HTableDescriptor htd = new HTableDescriptor(getTablename());
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
Admin admin = new HBaseAdmin(getConf());
try {
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
} finally {
admin.close();
}
doLoad(getConf(), htd);
doVerify(getConf(), htd);
getTestingUtil(getConf()).deleteTable(htd.getName());
return 0;
}
示例3: runTestFromCommandLine
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public int runTestFromCommandLine() throws Exception {
IntegrationTestingUtility.setUseDistributedCluster(getConf());
int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
// create HTableDescriptor for specified table
String table = getTablename();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = new HBaseAdmin(getConf());
try {
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
} finally {
admin.close();
}
doLoad(getConf(), htd);
doVerify(getConf(), htd);
getTestingUtil(getConf()).deleteTable(htd.getName());
return 0;
}
示例4: beforeMethod
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
@Before
public void beforeMethod() throws Exception {
if(!initialized) {
LOG.info("Setting up IntegrationTestRSGroup");
LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
TEST_UTIL = new IntegrationTestingUtility();
TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
//set shared configs
admin = TEST_UTIL.getAdmin();
cluster = TEST_UTIL.getHBaseClusterInterface();
rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
LOG.info("Done initializing cluster");
initialized = true;
//cluster may not be clean
//cleanup when initializing
afterMethod();
}
}
示例5: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
String[] actualArgs = args;
if (args.length > 0 && "-c".equals(args[0])) {
int argCount = args.length - 2;
if (argCount < 0) {
throw new IllegalArgumentException("Missing path for -c parameter");
}
// load the resource specified by the second parameter
conf.addResource(args[1]);
actualArgs = new String[argCount];
System.arraycopy(args, 2, actualArgs, 0, argCount);
}
IntegrationTestingUtility.setUseDistributedCluster(conf);
int ret = ToolRunner.run(conf, new ChaosMonkeyRunner(), actualArgs);
System.exit(ret);
}
示例6: runTestFromCommandLine
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
@Override
public int runTestFromCommandLine() throws Exception {
IntegrationTestingUtility.setUseDistributedCluster(getConf());
int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
// create HTableDescriptor for specified table
HTableDescriptor htd = new HTableDescriptor(getTablename());
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
}
doLoad(getConf(), htd);
doVerify(getConf(), htd);
getTestingUtil(getConf()).deleteTable(getTablename());
return 0;
}
示例7: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
int ret = ToolRunner.run(conf, new IntegrationTestTableSnapshotInputFormat(), args);
System.exit(ret);
}
示例8: validateDeletedPartitionsFile
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
/**
* Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
*/
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
return;
FileSystem fs = FileSystem.get(conf);
Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
示例9: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
util = new IntegrationTestingUtility(conf);
// not using ToolRunner to avoid unnecessary call to setConf()
args = new GenericOptionsParser(conf, args).getRemainingArgs();
int status = new IntegrationTestImportTsv().run(args);
System.exit(status);
}
示例10: getTestingUtil
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
protected IntegrationTestingUtility getTestingUtil(Configuration conf) {
if (this.util == null) {
if (conf == null) {
this.util = new IntegrationTestingUtility();
this.setConf(util.getConfiguration());
} else {
this.util = new IntegrationTestingUtility(conf);
}
}
return util;
}
示例11: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration configuration = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(configuration);
IntegrationTestZKAndFSPermissions tool = new IntegrationTestZKAndFSPermissions();
int ret = ToolRunner.run(configuration, tool, args);
System.exit(ret);
}
示例12: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String args[]) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
int ret = ToolRunner.run(conf,
new IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas(), args);
System.exit(ret);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas.java
示例13: main
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
util.initializeCluster(1);
ChaosMonkey monkey = new ChaosMonkey(util);
int ret = ToolRunner.run(conf, monkey, args);
System.exit(ret);
}
示例14: getTestingUtil
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
private IntegrationTestingUtility getTestingUtil() {
if (this.util == null) {
if (getConf() == null) {
this.util = new IntegrationTestingUtility();
} else {
this.util = new IntegrationTestingUtility(getConf());
}
}
return util;
}
示例15: commitJob
import org.apache.hadoop.hbase.IntegrationTestingUtility; //导入依赖的package包/类
@Override
public void commitJob(JobContext context) throws IOException {
super.commitJob(context);
// inherit jar dependencies added to distributed cache loaded by parent job
Configuration conf = HBaseConfiguration.create(context.getConfiguration());
conf.set("mapred.job.classpath.archives",
context.getConfiguration().get("mapred.job.classpath.archives", ""));
conf.set("mapreduce.job.cache.archives.visibilities",
context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));
// can't use IntegrationTest instance of util because it hasn't been
// instantiated on the JVM running this method. Create our own.
IntegrationTestingUtility util =
new IntegrationTestingUtility(conf);
// this is why we're here: launch a child job. The rest of this should
// look a lot like TestImportTsv#testMROnTable.
final String table = format("%s-%s-child", NAME, context.getJobID());
final String cf = "FAM";
String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
conf.set(ImportTsv.CREDENTIALS_LOCATION, fileLocation);
String[] args = {
"-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
"-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
table
};
try {
util.createTable(table, cf);
LOG.info("testRunFromOutputCommitter: launching child job.");
TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
} catch (Exception e) {
throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
} finally {
if (util.getHBaseAdmin().tableExists(table)) {
util.deleteTable(table);
}
}
}