本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor.setScope方法的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor.setScope方法的具體用法?Java HColumnDescriptor.setScope怎麽用?Java HColumnDescriptor.setScope使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HColumnDescriptor
的用法示例。
在下文中一共展示了HColumnDescriptor.setScope方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: setUpBeforeClass
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniCluster();
admin = new ReplicationAdmin(conf1);
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2182);
utility2 = new HBaseTestingUtility(conf2);
utility2.startMiniCluster();
ReplicationPeerConfig config = new ReplicationPeerConfig();
config.setClusterKey(utility2.getClusterKey());
admin.addPeer(peerId, config, null);
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
utility1.getHBaseAdmin().createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
utility1.waitUntilAllRegionsAssigned(tableName);
}
示例2: setTableRep
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
* Set the table's replication switch if the table's replication switch is already not set.
* @param tableName name of the table
* @param isRepEnabled is replication switch enable or disable
* @throws IOException if a remote or network exception occurs
*/
private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
Admin admin = null;
try {
admin = this.connection.getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(tableName);
if (isTableRepEnabled(htd) ^ isRepEnabled) {
boolean isOnlineSchemaUpdateEnabled =
this.connection.getConfiguration()
.getBoolean("hbase.online.schema.update.enable", true);
if (!isOnlineSchemaUpdateEnabled) {
admin.disableTable(tableName);
}
for (HColumnDescriptor hcd : htd.getFamilies()) {
hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL
: HConstants.REPLICATION_SCOPE_LOCAL);
}
admin.modifyTable(tableName, htd);
if (!isOnlineSchemaUpdateEnabled) {
admin.enableTable(tableName);
}
}
} finally {
if (admin != null) {
try {
admin.close();
} catch (IOException e) {
LOG.warn("Failed to close admin connection.");
LOG.debug("Details on failure to close admin connection.", e);
}
}
}
}
示例3: setUpBeforeClass
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.setInt("hfile.format.version", 3);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
conf1.setInt("replication.source.size.capacity", 10240);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
TestCoprocessorForTagsAtSource.class.getName());
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reget conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
replicationAdmin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
// Base conf2 on conf1 so it gets the right zk cluster.
conf2 = HBaseConfiguration.create(conf1);
conf2.setInt("hfile.format.version", 3);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
TestCoprocessorForTagsAtSink.class.getName());
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
replicationAdmin.addPeer("2", utility2.getClusterKey());
LOG.info("Setup second Zk");
utility1.startMiniCluster(2);
utility2.startMiniCluster(2);
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
fam.setMaxVersions(3);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
try (Connection conn = ConnectionFactory.createConnection(conf1);
Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
try (Connection conn = ConnectionFactory.createConnection(conf2);
Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
htable1 = utility1.getConnection().getTable(TABLE_NAME);
htable2 = utility2.getConnection().getTable(TABLE_NAME);
}