本文整理汇总了Java中org.apache.hadoop.hbase.util.ServerRegionReplicaUtil类的典型用法代码示例。如果您正苦于以下问题:Java ServerRegionReplicaUtil类的具体用法?Java ServerRegionReplicaUtil怎么用?Java ServerRegionReplicaUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ServerRegionReplicaUtil类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ServerRegionReplicaUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addTableToMeta
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env,
final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regions) throws IOException {
if (regions != null && regions.size() > 0) {
ProcedureSyncWait.waitMetaRegions(env);
// Add regions to META
addRegionsToMeta(env, hTableDescriptor, regions);
// Add replicas if needed
List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions);
// Setup replication for region replicas if needed
if (hTableDescriptor.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
return newRegions;
}
return regions;
}
示例2: triggerFlushInPrimaryRegion
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Trigger a flush in the primary region replica if this region is a secondary replica. Does not
* block this thread. See RegionReplicaFlushHandler for details.
*/
void triggerFlushInPrimaryRegion(final HRegion region) {
if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) {
return;
}
if (!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf)
|| !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(region.conf)) {
region.setReadsEnabled(true);
return;
}
region.setReadsEnabled(false); // disable reads before marking the region as opened.
// RegionReplicaFlushHandler might reset this.
// submit it to be handled by one of the handlers so that we do not block OpenRegionHandler
this.service.submit(
new RegionReplicaFlushHandler(this, clusterConnection, rpcRetryingCallerFactory,
rpcControllerFactory, operationTimeout, region));
}
示例3: beforeClass
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
conf.setInt("replication.source.size.capacity", 10240);
conf.setLong("replication.source.sleepforretries", 100);
conf.setInt("hbase.regionserver.maxlogs", 10);
conf.setLong("hbase.master.logcleaner.ttl", 10);
conf.setInt("zookeeper.recovery.retry", 1);
conf.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); // less number of retries is needed
conf.setInt("hbase.client.serverside.retries.multiplier", 1);
HTU.startMiniCluster(NB_SERVERS);
}
示例4: before
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@Before
public void before() throws Exception {
Configuration conf = HTU.getConfiguration();
// Up the handlers; this test needs more than usual.
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
HTU.startMiniCluster(NB_SERVERS);
htd = HTU.createTableDescriptor(
name.getMethodName().substring(0, name.getMethodName().length()-3));
htd.setRegionReplication(3);
HTU.getHBaseAdmin().createTable(htd);
}
示例5: setConf
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@Override
public void setConf(Configuration conf) {
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION),
String.valueOf(DEFAULT_REGION_REPLICATION));
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES),
StringUtils.join(",", DEFAULT_COLUMN_FAMILIES));
conf.setBoolean("hbase.table.sanity.checks", true);
// enable async wal replication to region replicas for unit tests
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB
conf.setInt("hbase.hstore.blockingStoreFiles", 100);
super.setConf(conf);
}
示例6: triggerFlushInPrimaryRegion
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Trigger a flush in the primary region replica if this region is a secondary replica. Does not
* block this thread. See RegionReplicaFlushHandler for details.
*/
void triggerFlushInPrimaryRegion(final HRegion region) {
if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) {
return;
}
if (!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf) ||
!ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(
region.conf)) {
region.setReadsEnabled(true);
return;
}
region.setReadsEnabled(false); // disable reads before marking the region as opened.
// RegionReplicaFlushHandler might reset this.
// submit it to be handled by one of the handlers so that we do not block OpenRegionHandler
if (this.executorService != null) {
this.executorService.submit(new RegionReplicaFlushHandler(this, clusterConnection,
rpcRetryingCallerFactory, rpcControllerFactory, operationTimeout, region));
}
}
示例7: getStoreFiles
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Returns the store files available for the family.
* This methods performs the filtering based on the valid store files.
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
throws IOException {
Path familyDir = getStoreDir(familyName);
FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
if (files == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No StoreFiles for: " + familyDir);
}
return null;
}
ArrayList<StoreFileInfo> storeFiles = new ArrayList<>(files.length);
for (FileStatus status: files) {
if (validate && !StoreFileInfo.isValid(status)) {
LOG.warn("Invalid StoreFile: " + status.getPath());
continue;
}
StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
regionInfoForFs, familyName, status.getPath());
storeFiles.add(info);
}
return storeFiles;
}
示例8: beforeClass
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
conf.setInt("replication.source.size.capacity", 10240);
conf.setLong("replication.source.sleepforretries", 100);
conf.setInt("hbase.regionserver.maxlogs", 10);
conf.setLong("hbase.master.logcleaner.ttl", 10);
conf.setInt("zookeeper.recovery.retry", 1);
conf.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); // less number of retries is needed
conf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
HTU.startMiniCluster(NB_SERVERS);
}
示例9: before
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@Before
public void before() throws Exception {
Configuration conf = HTU.getConfiguration();
// Up the handlers; this test needs more than usual.
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
HTU.startMiniCluster(NB_SERVERS);
htd = HTU.createTableDescriptor(
name.getMethodName().substring(0, name.getMethodName().length()-3));
htd.setRegionReplication(3);
HTU.getAdmin().createTable(htd);
}
示例10: setConf
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@Override
public void setConf(Configuration conf) {
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION),
String.valueOf(DEFAULT_REGION_REPLICATION));
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES),
StringUtils.join(",", DEFAULT_COLUMN_FAMILIES));
conf.setBoolean("hbase.table.sanity.checks", true);
// enable async wal replication to region replicas for unit tests
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB
conf.setInt("hbase.hstore.blockingStoreFiles", 100);
super.setConf(conf);
}
示例11: updateReplicaColumnsIfNeeded
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* update replica column families if necessary.
* @param env MasterProcedureEnv
* @throws IOException
*/
private void updateReplicaColumnsIfNeeded(
final MasterProcedureEnv env,
final HTableDescriptor oldHTableDescriptor,
final HTableDescriptor newHTableDescriptor) throws IOException {
final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
final int newReplicaCount = newHTableDescriptor.getRegionReplication();
if (newReplicaCount < oldReplicaCount) {
Set<byte[]> tableRows = new HashSet<byte[]>();
Connection connection = env.getMasterServices().getConnection();
Scan scan = MetaTableAccessor.getScanForTableName(getTableName());
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
ResultScanner resScanner = metaTable.getScanner(scan);
for (Result result : resScanner) {
tableRows.add(result.getRow());
}
MetaTableAccessor.removeRegionReplicasFromMeta(
tableRows,
newReplicaCount,
oldReplicaCount - newReplicaCount,
connection);
}
}
// Setup replication for region replicas if needed
if (newReplicaCount > 1 && oldReplicaCount <= 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
}
示例12: HRegionFileSystem
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Create a view to the on-disk region
*
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} that contains the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link HRegionInfo} for region
*/
HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
final HRegionInfo regionInfo) {
this.fs = fs;
this.conf = conf;
this.tableDir = tableDir;
this.regionInfo = regionInfo;
this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
this.hdfsClientRetriesNumber =
conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
this.baseSleepBeforeRetries =
conf.getInt("hdfs.client.sleep.before.retries", DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
}
示例13: getStoreFiles
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Returns the store files available for the family. This methods performs the filtering based on
* the valid store files.
*
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
throws IOException {
Path familyDir = getStoreDir(familyName);
// FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
FileStatus[] files = FSUtils.listStatus(this.fs, familyDir, new PathFilter() {
@Override public boolean accept(Path path) {
String name = path.getName();
if (name.endsWith(IndexConstants.REGION_INDEX_DIR_NAME) || name
.endsWith(LMDIndexConstants.BUCKET_FILE_SUFFIX) || name
.endsWith(LMDIndexConstants.DATA_FILE_SUFFIX)) return false;
return true;
}
});
if (files == null) {
LOG.debug("No StoreFiles for: " + familyDir);
return null;
}
ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
for (FileStatus status : files) {
if (validate && !StoreFileInfo.isValid(status)) {
LOG.warn("Invalid StoreFile: " + status.getPath());
continue;
}
StoreFileInfo info = ServerRegionReplicaUtil
.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, familyName, status.getPath());
storeFiles.add(info);
}
return storeFiles;
}
示例14: getStoreFileInfo
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
/**
* Return the store file information of the specified family/file.
*
* @param familyName Column Family Name
* @param fileName File Name
* @return The {@link StoreFileInfo} for the specified family/file
*/
StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
throws IOException {
Path familyDir = getStoreDir(familyName);
return ServerRegionReplicaUtil
.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, familyName,
new Path(familyDir, fileName));
}
示例15: beforeClass
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false);
// install WALObserver coprocessor for tests
String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
if (walCoprocs == null) {
walCoprocs = WALEditCopro.class.getName();
} else {
walCoprocs += "," + WALEditCopro.class.getName();
}
HTU.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
walCoprocs);
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
table = HTU.createTable(htd, new byte[][]{f}, HTU.getConfiguration());
hriPrimary = table.getRegionLocation(row, false).getRegionInfo();
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(),
hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
}