本文整理汇总了Java中org.apache.kylin.metadata.realization.IRealizationConstants类的典型用法代码示例。如果您正苦于以下问题:Java IRealizationConstants类的具体用法?Java IRealizationConstants怎么用?Java IRealizationConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IRealizationConstants类属于org.apache.kylin.metadata.realization包,在下文中一共展示了IRealizationConstants类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: check
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
public void check(List<String> segFullNameList) {
issueExistHTables = Lists.newArrayList();
inconsistentHTables = Lists.newArrayList();
for (String segFullName : segFullNameList) {
String[] sepNameList = segFullName.split(",");
try {
HTableDescriptor hTableDescriptor = hbaseAdmin.getTableDescriptor(TableName.valueOf(sepNameList[0]));
String host = hTableDescriptor.getValue(IRealizationConstants.HTableTag);
if (!dstCfg.getMetadataUrlPrefix().equalsIgnoreCase(host)) {
inconsistentHTables.add(segFullName);
}
} catch (IOException e) {
issueExistHTables.add(segFullName);
continue;
}
}
}
示例2: filterByGitCommit
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private static List<String> filterByGitCommit(Admin hbaseAdmin, List<String> tableNames) throws IOException {
List<String> result = Lists.newLinkedList();
List<String> filteredList = Lists.newLinkedList();
String commitInfo = KylinVersion.getGitCommitInfo();
if (StringUtils.isEmpty(commitInfo)) {
return tableNames;
}
logger.info("Commit Information: " + commitInfo);
for (String tableName : tableNames) {
HTableDescriptor tableDesc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
String gitTag = tableDesc.getValue(IRealizationConstants.HTableGitTag);
if (commitInfo.equals(gitTag)) {
filteredList.add(tableName);
} else {
result.add(tableName);
}
}
logger.info("Filtered tables don't need to deploy coprocessors: " + filteredList);
return result;
}
示例3: show
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private static void show() throws IOException {
Map<String, List<String>> envs = Maps.newHashMap();
// get all kylin hbase tables
KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
Admin hbaseAdmin = conn.getAdmin();
String tableNamePrefix = kylinConfig.getHBaseTableNamePrefix();
HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables(tableNamePrefix + ".*");
for (HTableDescriptor desc : tableDescriptors) {
String host = desc.getValue(IRealizationConstants.HTableTag);
if (StringUtils.isEmpty(host)) {
add("unknown", desc.getNameAsString(), envs);
} else {
add(host, desc.getNameAsString(), envs);
}
}
for (Map.Entry<String, List<String>> entry : envs.entrySet()) {
System.out.println(entry.getKey() + " has htable count: " + entry.getValue().size());
}
hbaseAdmin.close();
}
示例4: clean
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private void clean() throws IOException {
Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl());
Admin hbaseAdmin = conn.getAdmin();
for (HTableDescriptor descriptor : hbaseAdmin.listTables()) {
String name = descriptor.getNameAsString().toLowerCase();
if (name.startsWith("kylin") || name.startsWith("_kylin")) {
String x = descriptor.getValue(IRealizationConstants.HTableTag);
System.out.println("table name " + descriptor.getNameAsString() + " host: " + x);
System.out.println(descriptor);
System.out.println();
descriptor.setValue(IRealizationConstants.HTableOwner, "[email protected]");
hbaseAdmin.modifyTable(TableName.valueOf(descriptor.getNameAsString()), descriptor);
}
}
hbaseAdmin.close();
}
示例5: dropHBaseTable
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private void dropHBaseTable(ExecutableContext context) throws IOException {
List<String> oldTables = getOldHTables();
if (oldTables != null && oldTables.size() > 0) {
String metadataUrlPrefix = KylinConfig.getInstanceFromEnv().getMetadataUrlPrefix();
Admin admin = null;
try {
Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl());
admin = conn.getAdmin();
for (String table : oldTables) {
if (admin.tableExists(TableName.valueOf(table))) {
HTableDescriptor tableDescriptor = admin.getTableDescriptor(TableName.valueOf(table));
String host = tableDescriptor.getValue(IRealizationConstants.HTableTag);
if (metadataUrlPrefix.equalsIgnoreCase(host)) {
if (admin.isTableEnabled(TableName.valueOf(table))) {
admin.disableTable(TableName.valueOf(table));
}
admin.deleteTable(TableName.valueOf(table));
logger.debug("Dropped HBase table " + table);
output.append("Dropped HBase table " + table + " \n");
} else {
logger.debug("Skipped HBase table " + table);
output.append("Skipped HBase table " + table + " \n");
}
}
}
} finally {
IOUtils.closeQuietly(admin);
}
}
}
示例6: updateHtable
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private void updateHtable(String tableName) throws IOException {
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
if (oldHostValue.equals(desc.getValue(IRealizationConstants.HTableTag))) {
desc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
hbaseAdmin.disableTable(TableName.valueOf(tableName));
hbaseAdmin.modifyTable(TableName.valueOf(tableName), desc);
hbaseAdmin.enableTable(TableName.valueOf(tableName));
updatedResources.add(tableName);
}
}
示例7: undo
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private static void undo(Opt opt) throws IOException, InterruptedException {
logger.info("Undo operation: " + opt.toString());
switch (opt.type) {
case CHANGE_HTABLE_HOST: {
String tableName = (String) opt.params[0];
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
hbaseAdmin.disableTable(tableName);
desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix());
hbaseAdmin.modifyTable(tableName, desc);
hbaseAdmin.enableTable(tableName);
break;
}
case COPY_FILE_IN_META: {
// no harm
logger.info("Undo for COPY_FILE_IN_META is ignored");
break;
}
case COPY_DICT_OR_SNAPSHOT: {
// no harm
logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored");
break;
}
case RENAME_FOLDER_IN_HDFS: {
String srcPath = (String) opt.params[1];
String dstPath = (String) opt.params[0];
if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) {
hdfsFS.rename(new Path(srcPath), new Path(dstPath));
logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath);
}
break;
}
case ADD_INTO_PROJECT: {
logger.info("Undo for ADD_INTO_PROJECT is ignored");
break;
}
}
}
示例8: generateStorageLocation
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private String generateStorageLocation() {
String namePrefix = IRealizationConstants.IIHbaseStorageLocationPrefix;
String tableName = "";
do {
StringBuffer sb = new StringBuffer();
sb.append(namePrefix);
for (int i = 0; i < HBASE_TABLE_LENGTH; i++) {
int idx = (int) (Math.random() * ALPHA_NUM.length());
sb.append(ALPHA_NUM.charAt(idx));
}
tableName = sb.toString();
} while (this.usedStorageLocation.contains(tableName));
return tableName;
}
示例9: undo
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private void undo(Opt opt) throws IOException, InterruptedException {
logger.info("Undo operation: " + opt.toString());
switch (opt.type) {
case CHANGE_HTABLE_HOST: {
String tableName = (String) opt.params[0];
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
hbaseAdmin.disableTable(tableName);
desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix());
hbaseAdmin.modifyTable(tableName, desc);
hbaseAdmin.enableTable(tableName);
break;
}
case COPY_FILE_IN_META: {
// no harm
logger.info("Undo for COPY_FILE_IN_META is ignored");
String item = (String) opt.params[0];
if (item.startsWith(ACL_PREFIX) && doAclCopy) {
logger.info("Remove acl record");
dstStore.deleteResource(item);
}
break;
}
case COPY_DICT_OR_SNAPSHOT: {
// no harm
logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored");
break;
}
case RENAME_FOLDER_IN_HDFS: {
String srcPath = (String) opt.params[1];
String dstPath = (String) opt.params[0];
if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) {
renameHDFSPath(srcPath, dstPath);
logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath);
}
break;
}
case ADD_INTO_PROJECT: {
logger.info("Undo for ADD_INTO_PROJECT is ignored");
break;
}
case PURGE_AND_DISABLE: {
logger.info("Undo for PURGE_AND_DISABLE is not supported");
break;
}
default: {
//do nothing
break;
}
}
}
示例10: createHTable
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
public static void createHTable(CubeSegment cubeSegment, byte[][] splitKeys) throws IOException {
String tableName = cubeSegment.getStorageLocationIdentifier();
CubeInstance cubeInstance = cubeSegment.getCubeInstance();
CubeDesc cubeDesc = cubeInstance.getDescriptor();
KylinConfig kylinConfig = cubeDesc.getConfig();
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(cubeSegment.getStorageLocationIdentifier()));
tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());
tableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
tableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis()));
if (!StringUtils.isEmpty(kylinConfig.getKylinOwner())) {
//HTableOwner is the team that provides kylin service
tableDesc.setValue(IRealizationConstants.HTableOwner, kylinConfig.getKylinOwner());
}
String commitInfo = KylinVersion.getGitCommitInfo();
if (!StringUtils.isEmpty(commitInfo)) {
tableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
}
//HTableUser is the cube owner, which will be the "user"
tableDesc.setValue(IRealizationConstants.HTableUser, cubeInstance.getOwner());
tableDesc.setValue(IRealizationConstants.HTableSegmentTag, cubeSegment.toString());
Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();
Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
Admin admin = conn.getAdmin();
try {
if (User.isHBaseSecurityEnabled(conf)) {
// add coprocessor for bulk load
tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
}
for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) {
HColumnDescriptor cf = createColumnFamily(kylinConfig, cfDesc.getName(), cfDesc.isMemoryHungry());
tableDesc.addFamily(cf);
}
if (admin.tableExists(TableName.valueOf(tableName))) {
// admin.disableTable(tableName);
// admin.deleteTable(tableName);
throw new RuntimeException("HBase table " + tableName + " exists!");
}
DeployCoprocessorCLI.deployCoprocessor(tableDesc);
admin.createTable(tableDesc, splitKeys);
Preconditions.checkArgument(admin.isTableAvailable(TableName.valueOf(tableName)), "table " + tableName + " created, but is not available due to some reasons");
logger.info("create hbase table " + tableName + " done.");
} finally {
IOUtils.closeQuietly(admin);
}
}
示例11: undo
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
private static void undo(Opt opt) throws IOException, InterruptedException {
logger.info("Undo operation: " + opt.toString());
switch (opt.type) {
case CHANGE_HTABLE_HOST: {
TableName tableName = TableName.valueOf((String) opt.params[0]);
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(tableName);
hbaseAdmin.disableTable(tableName);
desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix());
hbaseAdmin.modifyTable(tableName, desc);
hbaseAdmin.enableTable(tableName);
break;
}
case COPY_FILE_IN_META: {
// no harm
logger.info("Undo for COPY_FILE_IN_META is ignored");
break;
}
case COPY_DICT_OR_SNAPSHOT: {
// no harm
logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored");
break;
}
case RENAME_FOLDER_IN_HDFS: {
String srcPath = (String) opt.params[1];
String dstPath = (String) opt.params[0];
if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) {
renameHDFSPath(srcPath, dstPath);
logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath);
}
break;
}
case ADD_INTO_PROJECT: {
logger.info("Undo for ADD_INTO_PROJECT is ignored");
break;
}
case COPY_ACL: {
String cubeId = (String) opt.params[0];
String modelId = (String) opt.params[1];
Table destAclHtable = null;
try {
destAclHtable = HBaseConnection.get(dstConfig.getStorageUrl()).getTable(TableName.valueOf(dstConfig.getMetadataUrlPrefix() + ACL_TABLE_NAME));
destAclHtable.delete(new Delete(Bytes.toBytes(cubeId)));
destAclHtable.delete(new Delete(Bytes.toBytes(modelId)));
} finally {
IOUtils.closeQuietly(destAclHtable);
}
break;
}
case PURGE_AND_DISABLE: {
logger.info("Undo for PURGE_AND_DISABLE is not supported");
break;
}
default: {
//do nothing
break;
}
}
}
示例12: resetCoprocessor
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
public static boolean resetCoprocessor(String tableName, Admin hbaseAdmin, Path hdfsCoprocessorJar) throws IOException {
KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
//when the table has migrated from dev env to test(prod) env, the dev server
//should not reset the coprocessor of the table.
String host = desc.getValue(IRealizationConstants.HTableTag);
if (!host.equalsIgnoreCase(kylinConfig.getMetadataUrlPrefix())) {
logger.warn("This server doesn't own this table: " + tableName);
return false;
}
logger.info("reset coprocessor on " + tableName);
logger.info("Disable " + tableName);
hbaseAdmin.disableTable(TableName.valueOf(tableName));
while (desc.hasCoprocessor(CubeObserverClassOld2)) {
desc.removeCoprocessor(CubeObserverClassOld2);
}
while (desc.hasCoprocessor(CubeEndpointClass)) {
desc.removeCoprocessor(CubeEndpointClass);
}
while (desc.hasCoprocessor(IIEndpointClass)) {
desc.removeCoprocessor(IIEndpointClass);
}
// remove legacy coprocessor from v1.x
while (desc.hasCoprocessor(CubeObserverClassOld)) {
desc.removeCoprocessor(CubeObserverClassOld);
}
while (desc.hasCoprocessor(IIEndpointClassOld)) {
desc.removeCoprocessor(IIEndpointClassOld);
}
addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
// update commit tags
String commitInfo = KylinVersion.getGitCommitInfo();
if (!StringUtils.isEmpty(commitInfo)) {
desc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
}
hbaseAdmin.modifyTable(TableName.valueOf(tableName), desc);
logger.info("Enable " + tableName);
hbaseAdmin.enableTable(TableName.valueOf(tableName));
return true;
}
示例13: run
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Options options = new Options();
options.addOption(OPTION_CUBE_NAME);
options.addOption(OPTION_PARTITION_FILE_PATH);
options.addOption(OPTION_HTABLE_NAME);
parseOptions(options, args);
Path partitionFilePath = new Path(getOptionValue(OPTION_PARTITION_FILE_PATH));
String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();
KylinConfig config = KylinConfig.getInstanceFromEnv();
CubeManager cubeMgr = CubeManager.getInstance(config);
CubeInstance cube = cubeMgr.getCube(cubeName);
CubeDesc cubeDesc = cube.getDescriptor();
String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
// https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html
tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix());
Configuration conf = HBaseConfiguration.create(getConf());
HBaseAdmin admin = new HBaseAdmin(conf);
try {
if (User.isHBaseSecurityEnabled(conf)) {
// add coprocessor for bulk load
tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
}
for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) {
HColumnDescriptor cf = new HColumnDescriptor(cfDesc.getName());
cf.setMaxVersions(1);
if (LZOSupportnessChecker.getSupportness()) {
logger.info("hbase will use lzo to compress data");
cf.setCompressionType(Algorithm.LZO);
} else {
logger.info("hbase will not use lzo to compress data");
}
cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
cf.setInMemory(false);
cf.setBlocksize(4 * 1024 * 1024); // set to 4MB
tableDesc.addFamily(cf);
}
byte[][] splitKeys = getSplits(conf, partitionFilePath);
if (admin.tableExists(tableName)) {
// admin.disableTable(tableName);
// admin.deleteTable(tableName);
throw new RuntimeException("HBase table " + tableName + " exists!");
}
DeployCoprocessorCLI.deployCoprocessor(tableDesc);
admin.createTable(tableDesc, splitKeys);
logger.info("create hbase table " + tableName + " done.");
return 0;
} catch (Exception e) {
printUsage(options);
e.printStackTrace(System.err);
logger.error(e.getLocalizedMessage(), e);
return 2;
} finally {
admin.close();
}
}
示例14: run
import org.apache.kylin.metadata.realization.IRealizationConstants; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Options options = new Options();
try {
options.addOption(OPTION_II_NAME);
options.addOption(OPTION_HTABLE_NAME);
parseOptions(options, args);
String tableName = getOptionValue(OPTION_HTABLE_NAME);
String iiName = getOptionValue(OPTION_II_NAME);
KylinConfig config = KylinConfig.getInstanceFromEnv();
IIManager iiManager = IIManager.getInstance(config);
IIInstance ii = iiManager.getII(iiName);
int sharding = ii.getDescriptor().getSharding();
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor cf = new HColumnDescriptor(IIDesc.HBASE_FAMILY);
cf.setMaxVersions(1);
//cf.setCompressionType(Algorithm.LZO);
cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
tableDesc.addFamily(cf);
tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix());
Configuration conf = HBaseConfiguration.create(getConf());
if (User.isHBaseSecurityEnabled(conf)) {
// add coprocessor for bulk load
tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
}
DeployCoprocessorCLI.deployCoprocessor(tableDesc);
// drop the table first
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
// create table
byte[][] splitKeys = getSplits(sharding);
if (splitKeys.length == 0)
splitKeys = null;
admin.createTable(tableDesc, splitKeys);
if (splitKeys != null) {
for (int i = 0; i < splitKeys.length; i++) {
System.out.println("split key " + i + ": " + BytesUtil.toHex(splitKeys[i]));
}
}
System.out.println("create hbase table " + tableName + " done.");
admin.close();
return 0;
} catch (Exception e) {
printUsage(options);
throw e;
}
}