本文整理汇总了Java中org.apache.hadoop.hbase.util.ClassLoaderTestHelper类的典型用法代码示例。如果您正苦于以下问题:Java ClassLoaderTestHelper类的具体用法?Java ClassLoaderTestHelper怎么用?Java ClassLoaderTestHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ClassLoaderTestHelper类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ClassLoaderTestHelper类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildCoprocessorJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
static File buildCoprocessorJar(String className) throws Exception {
String code =
"import org.apache.hadoop.hbase.coprocessor.*;" +
"public class " + className + " implements RegionCoprocessor {}";
return ClassLoaderTestHelper.buildJar(
TEST_UTIL.getDataTestDir().toString(), className, code);
}
示例2: buildCoprocessorJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
static File buildCoprocessorJar(String className) throws Exception {
String code = "import org.apache.hadoop.hbase.coprocessor.*;" +
"public class " + className + " extends BaseRegionObserver {}";
return ClassLoaderTestHelper.buildJar(
TEST_UTIL.getDataTestDir().toString(), className, code);
}
示例3: loadingClassFromLibDirInJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(
outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.",
fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
"|" + Coprocessor.PRIORITY_USER);
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
admin.createTable(htd);
waitForTable(htd.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false,
found2_k2 = false, found2_k3 = false;
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
示例4: loadingClassFromLibDirInJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(
outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.",
fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
"|" + Coprocessor.PRIORITY_USER);
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
admin.createTable(htd);
waitForTable(htd.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false,
found2_k2 = false, found2_k3 = false;
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
示例5: loadingClassFromLibDirInJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(
outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.",
fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
"|" + Coprocessor.PRIORITY_USER);
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
admin.createTable(htd);
waitForTable(htd.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false,
found2_k2 = false, found2_k3 = false;
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
示例6: loadingClassFromLibDirInJar
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; //导入依赖的package包/类
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(
outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.",
fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
"|" + Coprocessor.PRIORITY_USER);
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
admin.createTable(htd);
waitForTable(htd.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false,
found2_k2 = false, found2_k3 = false;
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}