本文整理汇总了Java中org.apache.hadoop.hbase.client.replication.ReplicationAdmin.close方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicationAdmin.close方法的具体用法?Java ReplicationAdmin.close怎么用?Java ReplicationAdmin.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.replication.ReplicationAdmin
的用法示例。
在下文中一共展示了ReplicationAdmin.close方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupRegionReplicaReplication
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
/**
* Create replication peer for replicating to region replicas if needed.
* @param conf configuration to use
* @throws IOException
*/
public static void setupRegionReplicaReplication(Configuration conf) throws IOException {
if (!isRegionReplicaReplicationEnabled(conf)) {
return;
}
ReplicationAdmin repAdmin = new ReplicationAdmin(conf);
try {
if (repAdmin.getPeerConfig(REGION_REPLICA_REPLICATION_PEER) == null) {
ReplicationPeerConfig peerConfig = new ReplicationPeerConfig();
peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf));
peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName());
repAdmin.addPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, null);
}
} catch (ReplicationException ex) {
throw new IOException(ex);
} finally {
repAdmin.close();
}
}
示例2: testRegionReplicaReplicationPeerIsCreated
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
@Test
public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException {
// create a table with region replicas. Check whether the replication peer is created
// and replication started.
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
String peerId = "region_replica_replication";
if (admin.getPeerConfig(peerId) != null) {
admin.removePeer(peerId);
}
HTableDescriptor htd = HTU.createTableDescriptor(
"testReplicationPeerIsCreated_no_region_replicas");
HTU.getHBaseAdmin().createTable(htd);
ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
assertNull(peerConfig);
htd = HTU.createTableDescriptor("testReplicationPeerIsCreated");
htd.setRegionReplication(2);
HTU.getHBaseAdmin().createTable(htd);
// assert peer configuration is correct
peerConfig = admin.getPeerConfig(peerId);
assertNotNull(peerConfig);
assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(
HTU.getConfiguration()));
assertEquals(peerConfig.getReplicationEndpointImpl(),
RegionReplicaReplicationEndpoint.class.getName());
admin.close();
}
示例3: testRegionReplicaReplicationPeerIsCreatedForModifyTable
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
@Test (timeout=240000)
public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exception {
// modify a table by adding region replicas. Check whether the replication peer is created
// and replication started.
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
String peerId = "region_replica_replication";
if (admin.getPeerConfig(peerId) != null) {
admin.removePeer(peerId);
}
HTableDescriptor htd
= HTU.createTableDescriptor("testRegionReplicaReplicationPeerIsCreatedForModifyTable");
HTU.getHBaseAdmin().createTable(htd);
// assert that replication peer is not created yet
ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
assertNull(peerConfig);
HTU.getHBaseAdmin().disableTable(htd.getTableName());
htd.setRegionReplication(2);
HTU.getHBaseAdmin().modifyTable(htd.getTableName(), htd);
HTU.getHBaseAdmin().enableTable(htd.getTableName());
// assert peer configuration is correct
peerConfig = admin.getPeerConfig(peerId);
assertNotNull(peerConfig);
assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(
HTU.getConfiguration()));
assertEquals(peerConfig.getReplicationEndpointImpl(),
RegionReplicaReplicationEndpoint.class.getName());
admin.close();
}
示例4: setupReplication
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
private void setupReplication() throws Exception {
ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
HBaseAdmin ha = new HBaseAdmin(conf1);
ha.createTable(t1_syncupSource);
ha.createTable(t2_syncupSource);
ha.close();
ha = new HBaseAdmin(conf2);
ha.createTable(t1_syncupTarget);
ha.createTable(t2_syncupTarget);
ha.close();
// Get HTable from Master
ht1Source = new HTable(conf1, t1_su);
ht1Source.setWriteBufferSize(1024);
ht2Source = new HTable(conf1, t2_su);
ht1Source.setWriteBufferSize(1024);
// Get HTable from Peer1
ht1TargetAtPeer1 = new HTable(conf2, t1_su);
ht1TargetAtPeer1.setWriteBufferSize(1024);
ht2TargetAtPeer1 = new HTable(conf2, t2_su);
ht2TargetAtPeer1.setWriteBufferSize(1024);
/**
* set M-S : Master: utility1 Slave1: utility2
*/
admin1.addPeer("1", utility2.getClusterKey());
admin1.close();
admin2.close();
}
示例5: setupReplication
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
protected void setupReplication() throws Exception {
ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
Admin ha = utility1.getAdmin();
ha.createTable(t1_syncupSource);
ha.createTable(t2_syncupSource);
ha.close();
ha = utility2.getAdmin();
ha.createTable(t1_syncupTarget);
ha.createTable(t2_syncupTarget);
ha.close();
Connection connection1 = ConnectionFactory.createConnection(utility1.getConfiguration());
Connection connection2 = ConnectionFactory.createConnection(utility2.getConfiguration());
// Get HTable from Master
ht1Source = connection1.getTable(t1_su);
ht2Source = connection1.getTable(t2_su);
// Get HTable from Peer1
ht1TargetAtPeer1 = connection2.getTable(t1_su);
ht2TargetAtPeer1 = connection2.getTable(t2_su);
/**
* set M-S : Master: utility1 Slave1: utility2
*/
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
admin1.addPeer("1", rpc, null);
admin1.close();
admin2.close();
}
示例6: setUpBeforeTest
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
@Before
public void setUpBeforeTest() throws Exception {
if (!firstTest) {
// Delete /ngdata from zookeeper
System.out.println(">>> Deleting /ngdata node from ZooKeeper");
cleanZooKeeper("localhost:" + hbaseTestUtil.getZkCluster().getClientPort(), "/ngdata");
// Delete all hbase tables
System.out.println(">>> Deleting all HBase tables");
Admin admin = connection.getAdmin();
for (HTableDescriptor table : admin.listTables()) {
admin.disableTable(table.getTableName());
admin.deleteTable(table.getTableName());
}
admin.close();
// Delete all replication peers
System.out.println(">>> Deleting all replication peers from HBase");
ReplicationAdmin replAdmin = new ReplicationAdmin(conf);
for (String peerId : replAdmin.listPeerConfigs().keySet()) {
replAdmin.removePeer(peerId);
}
replAdmin.close();
SepTestUtil.waitOnAllReplicationPeersStopped();
// Clear Solr indexes
System.out.println(">>> Clearing Solr indexes");
collection1.deleteByQuery("*:*");
collection1.commit();
collection2.deleteByQuery("*:*");
collection2.commit();
} else {
firstTest = false;
}
main = new Main();
main.startServices(conf);
}
示例7: testRegionReplicaReplicationIgnoresDisabledTables
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable)
throws Exception {
// tests having edits from a disabled or dropped table is handled correctly by skipping those
// entries and further edits after the edits from dropped/disabled table can be replicated
// without problems.
TableName tableName = TableName.valueOf("testRegionReplicaReplicationIgnoresDisabledTables"
+ dropTable);
HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
int regionReplication = 3;
htd.setRegionReplication(regionReplication);
HTU.deleteTableIfAny(tableName);
HTU.getHBaseAdmin().createTable(htd);
TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
HTU.deleteTableIfAny(toBeDisabledTable);
htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
htd.setRegionReplication(regionReplication);
HTU.getHBaseAdmin().createTable(htd);
// both tables are created, now pause replication
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
// now that the replication is disabled, write to the table to be dropped, then drop the table.
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
AtomicLong skippedEdits = new AtomicLong();
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
(ClusterConnection) connection,
Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
Entry entry = new Entry(
new WALKey(encodedRegionName, toBeDisabledTable, 1),
new WALEdit());
HTU.getHBaseAdmin().disableTable(toBeDisabledTable); // disable the table
if (dropTable) {
HTU.getHBaseAdmin().deleteTable(toBeDisabledTable);
}
sinkWriter.append(toBeDisabledTable, encodedRegionName,
HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
assertEquals(2, skippedEdits.get());
try {
// load some data to the to-be-dropped table
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
// now enable the replication
admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
admin.close();
table.close();
rl.close();
tableToBeDisabled.close();
HTU.deleteTableIfAny(toBeDisabledTable);
connection.close();
}
}
示例8: testRegionReplicaReplicationIgnoresDisabledTables
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; //导入方法依赖的package包/类
public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable)
throws Exception {
// tests having edits from a disabled or dropped table is handled correctly by skipping those
// entries and further edits after the edits from dropped/disabled table can be replicated
// without problems.
final TableName tableName = TableName.valueOf(name.getMethodName() + dropTable);
HTableDescriptor htd = HTU.createTableDescriptor(tableName);
int regionReplication = 3;
htd.setRegionReplication(regionReplication);
HTU.deleteTableIfAny(tableName);
HTU.getAdmin().createTable(htd);
TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
HTU.deleteTableIfAny(toBeDisabledTable);
htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
htd.setRegionReplication(regionReplication);
HTU.getAdmin().createTable(htd);
// both tables are created, now pause replication
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
// now that the replication is disabled, write to the table to be dropped, then drop the table.
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
AtomicLong skippedEdits = new AtomicLong();
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
(ClusterConnection) connection,
Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
Entry entry = new Entry(
new WALKeyImpl(encodedRegionName, toBeDisabledTable, 1),
new WALEdit());
HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
if (dropTable) {
HTU.getAdmin().deleteTable(toBeDisabledTable);
}
sinkWriter.append(toBeDisabledTable, encodedRegionName,
HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
assertEquals(2, skippedEdits.get());
try {
// load some data to the to-be-dropped table
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
// now enable the replication
admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
admin.close();
table.close();
rl.close();
tableToBeDisabled.close();
HTU.deleteTableIfAny(toBeDisabledTable);
connection.close();
}
}