本文整理汇总了Java中org.apache.hadoop.hbase.TableDescriptors类的典型用法代码示例。如果您正苦于以下问题:Java TableDescriptors类的具体用法?Java TableDescriptors怎么用?Java TableDescriptors使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TableDescriptors类属于org.apache.hadoop.hbase包,在下文中一共展示了TableDescriptors类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Context
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
final Configuration conf,
final FileSystem fs,
final ReplicationPeerConfig peerConfig,
final String peerId,
final UUID clusterId,
final ReplicationPeer replicationPeer,
final MetricsSource metrics,
final TableDescriptors tableDescriptors) {
this.peerConfig = peerConfig;
this.conf = conf;
this.fs = fs;
this.clusterId = clusterId;
this.peerId = peerId;
this.replicationPeer = replicationPeer;
this.metrics = metrics;
this.tableDescriptors = tableDescriptors;
}
示例2: RegionReplicaOutputSink
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors,
EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool,
int numWriters, int operationTimeout) {
super(controller, entryBuffers, numWriters);
this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout);
this.tableDescriptors = tableDescriptors;
// A cache for the table "memstore replication enabled" flag.
// It has a default expiry of 5 sec. This means that if the table is altered
// with a different flag value, we might miss to replicate for that amount of
// time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
int memstoreReplicationEnabledCacheExpiryMs = connection.getConfiguration()
.getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
this.memstoreReplicationEnabled = CacheBuilder.newBuilder()
.expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
.initialCapacity(10)
.maximumSize(1000)
.build();
}
示例3: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
示例4: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
示例5: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
示例6: Context
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
final Configuration localConf,
final Configuration conf,
final FileSystem fs,
final String peerId,
final UUID clusterId,
final ReplicationPeer replicationPeer,
final MetricsSource metrics,
final TableDescriptors tableDescriptors,
final Abortable abortable) {
this.localConf = localConf;
this.conf = conf;
this.fs = fs;
this.clusterId = clusterId;
this.peerId = peerId;
this.replicationPeer = replicationPeer;
this.metrics = metrics;
this.tableDescriptors = tableDescriptors;
this.abortable = abortable;
}
示例7: fixTableStates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
final Map<String, TableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@Override
public boolean visit(Result r) throws IOException {
TableState state = MetaTableAccessor.getTableState(r);
if (state != null)
states.put(state.getTableName().getNameAsString(), state);
return true;
}
});
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString())) {
continue;
}
if (!states.containsKey(table)) {
LOG.warn(table + " has no state, assuming ENABLED");
MetaTableAccessor.updateTableState(connection, TableName.valueOf(table),
TableState.State.ENABLED);
}
}
}
示例8: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
示例9: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(Bytes.toBytes("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((byte[]) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
示例10: testRemoves
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
}
示例11: testNoSuchTable
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable")));
}
示例12: testUpdates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testUpdates() throws IOException {
final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
}
示例13: testRemoves
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
}
示例14: testNoSuchTable
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
assertNull("There shouldn't be any HTD for this table",
htds.get(TableName.valueOf("NoSuchTable")));
}
示例15: testUpdates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testUpdates() throws IOException {
final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
}