本文整理汇总了Java中org.apache.hadoop.hbase.TableName.equals方法的典型用法代码示例。如果您正苦于以下问题:Java TableName.equals方法的具体用法?Java TableName.equals怎么用?Java TableName.equals使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.TableName
的用法示例。
在下文中一共展示了TableName.equals方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: assertExistsMatchingSnapshot
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Make sure that there is only one snapshot returned from the master and its
* name and table match the passed in parameters.
*/
public static List<SnapshotDescription> assertExistsMatchingSnapshot(
Admin admin, String snapshotName, TableName tableName)
throws IOException {
// list the snapshot
List<SnapshotDescription> snapshots = admin.listSnapshots();
List<SnapshotDescription> returnedSnapshots = new ArrayList<SnapshotDescription>();
for (SnapshotDescription sd : snapshots) {
if (snapshotName.equals(sd.getName()) &&
tableName.equals(TableName.valueOf(sd.getTable()))) {
returnedSnapshots.add(sd);
}
}
Assert.assertTrue("No matching snapshots found.", returnedSnapshots.size()>0);
return returnedSnapshots;
}
示例2: modifyTable
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Modify an existing table, more IRB friendly version.
* Asynchronous operation. This means that it may be a while before your
* schema change is updated across all of the table.
*
* @param tableName name of table.
* @param htd modified description of the table
* @throws IOException if a remote or network exception occurs
*/
@Override
public void modifyTable(final TableName tableName, final HTableDescriptor htd)
throws IOException {
if (!tableName.equals(htd.getTableName())) {
throw new IllegalArgumentException("the specified table name '" + tableName +
"' doesn't match with the HTD one: " + htd.getTableName());
}
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call(int callTimeout) throws ServiceException {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
controller.setPriority(tableName);
ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
tableName, htd, ng.getNonceGroup(), ng.newNonce());
master.modifyTable(controller, request);
return null;
}
});
}
示例3: locateRegion
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public RegionLocations locateRegion(final TableName tableName,
final byte [] row, boolean useCache, boolean retry, int replicaId)
throws IOException {
if (this.closed) throw new IOException(toString() + " closed");
if (tableName== null || tableName.getName().length == 0) {
throw new IllegalArgumentException(
"table name cannot be null or zero length");
}
if (tableName.equals(TableName.META_TABLE_NAME)) {
return locateMeta(tableName, useCache, replicaId);
} else {
// Region not in the cache - have to go to the meta RS
return locateRegionInMeta(tableName, row, useCache, retry, replicaId);
}
}
示例4: preScannerOpen
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
RegionScanner s) throws IOException {
if (!initialized) {
throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
}
// Nothing to do if authorization is not enabled
if (!authorizationEnabled) {
return s;
}
Region region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
try {
authorizations = scan.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
if (authorizations == null) {
// No Authorizations present for this scan/Get!
// In case of system tables other than "labels" just scan with out visibility check and
// filtering. Checking visibility labels for META and NAMESPACE table is not needed.
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
return s;
}
}
Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region,
authorizations);
if (visibilityLabelFilter != null) {
Filter filter = scan.getFilter();
if (filter != null) {
scan.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
scan.setFilter(visibilityLabelFilter);
}
}
return s;
}
示例5: preGetOp
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
List<Cell> results) throws IOException {
if (!initialized) {
throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized");
}
// Nothing useful to do if authorization is not enabled
if (!authorizationEnabled) {
return;
}
Region region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
try {
authorizations = get.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
if (authorizations == null) {
// No Authorizations present for this scan/Get!
// In case of system tables other than "labels" just scan with out visibility check and
// filtering. Checking visibility labels for META and NAMESPACE table is not needed.
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
return;
}
}
Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment()
.getRegion(), authorizations);
if (visibilityLabelFilter != null) {
Filter filter = get.getFilter();
if (filter != null) {
get.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
get.setFilter(visibilityLabelFilter);
}
}
}
示例6: getTablePriority
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
private int getTablePriority(TableName tableName) {
if (tableName.equals(TableName.META_TABLE_NAME)) {
return metaTablePriority;
} else if (tableName.isSystemTable()) {
return sysTablePriority;
}
return userTablePriority;
}
示例7: merge
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Scans the table and merges two adjacent regions if they are small. This
* only happens when a lot of rows are deleted.
*
* When merging the hbase:meta region, the HBase instance must be offline.
* When merging a normal table, the HBase instance must be online, but the
* table must be disabled.
*
* @param conf - configuration object for HBase
* @param fs - FileSystem where regions reside
* @param tableName - Table to be compacted
* @param testMasterRunning True if we are to verify master is down before
* running merge
* @throws IOException
*/
public static void merge(Configuration conf, FileSystem fs,
final TableName tableName, final boolean testMasterRunning)
throws IOException {
boolean masterIsRunning = false;
if (testMasterRunning) {
masterIsRunning = HConnectionManager
.execute(new HConnectable<Boolean>(conf) {
@Override
public Boolean connect(HConnection connection) throws IOException {
return connection.isMasterRunning();
}
});
}
if (tableName.equals(TableName.META_TABLE_NAME)) {
if (masterIsRunning) {
throw new IllegalStateException(
"Can not compact hbase:meta table if instance is on-line");
}
// TODO reenable new OfflineMerger(conf, fs).process();
} else {
if(!masterIsRunning) {
throw new IllegalStateException(
"HBase instance must be running to merge a normal table");
}
Admin admin = new HBaseAdmin(conf);
try {
if (!admin.isTableDisabled(tableName)) {
throw new TableNotDisabledException(tableName);
}
} finally {
admin.close();
}
new OnlineMerger(conf, fs, tableName).process();
}
}
示例8: isTableAvailable
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public boolean isTableAvailable(final TableName tableName) throws IOException {
final AtomicBoolean available = new AtomicBoolean(true);
final AtomicInteger regionCount = new AtomicInteger(0);
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@Override
public boolean processRow(Result row) throws IOException {
HRegionInfo info = MetaScanner.getHRegionInfo(row);
if (info != null && !info.isSplitParent()) {
if (tableName.equals(info.getTable())) {
ServerName server = HRegionInfo.getServerName(row);
if (server == null) {
available.set(false);
return false;
}
regionCount.incrementAndGet();
} else if (tableName.compareTo(info.getTable()) < 0) {
// Return if we are done with the current table
return false;
}
}
return true;
}
};
MetaScanner.metaScan(this, visitor, tableName);
return available.get() && (regionCount.get() > 0);
}
示例9: relocateRegion
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public RegionLocations relocateRegion(final TableName tableName,
final byte [] row, int replicaId) throws IOException{
// Since this is an explicit request not to use any caching, finding
// disabled tables should not be desirable. This will ensure that an exception is thrown when
// the first time a disabled table is interacted with.
if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
}
return locateRegion(tableName, row, false, true, replicaId);
}
示例10: isCatalogTable
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
private static boolean isCatalogTable(final TableName tableName) {
return tableName.equals(TableName.META_TABLE_NAME);
}
示例11: fixOrphanTables
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* To fix orphan table by creating a .tableinfo file under tableDir <br>
* 1. if TableInfo is cached, to recover the .tableinfo accordingly <br>
* 2. else create a default .tableinfo file with following items<br>
* 2.1 the correct tablename <br>
* 2.2 the correct colfamily list<br>
* 2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
public void fixOrphanTables() throws IOException {
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
List<TableName> tmpList = new ArrayList<TableName>();
tmpList.addAll(orphanTableDirs.keySet());
HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<TableName, Set<String>>> iter =
orphanTableDirs.entrySet().iterator();
int j = 0;
int numFailedCase = 0;
FSTableDescriptors fstd = new FSTableDescriptors(getConf());
while (iter.hasNext()) {
Entry<TableName, Set<String>> entry =
iter.next();
TableName tableName = entry.getKey();
LOG.info("Trying to fix orphan table error: " + tableName);
if (j < htds.length) {
if (tableName.equals(htds[j].getTableName())) {
HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(htd, true);
j++;
iter.remove();
}
} else {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove();
} else {
LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
numFailedCase++;
}
}
fixes++;
}
if (orphanTableDirs.isEmpty()) {
// all orphanTableDirs are luckily recovered
// re-run doFsck after recovering the .tableinfo file
setShouldRerun();
LOG.warn("Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed");
} else if (numFailedCase > 0) {
LOG.error("Failed to fix " + numFailedCase
+ " OrphanTables with default .tableinfo files");
}
}
//cleanup the list
orphanTableDirs.clear();
}
示例12: loadHdfsRegionDirs
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Scan HDFS for all regions, recording their information into
* regionInfoMap
*/
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
Path rootDir = FSUtils.getRootDir(getConf());
FileSystem fs = rootDir.getFileSystem(getConf());
// list all tables from HDFS
List<FileStatus> tableDirs = Lists.newArrayList();
boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
for (Path path : paths) {
TableName tableName = FSUtils.getTableName(path);
if ((!checkMetaOnly &&
isTableIncluded(tableName)) ||
tableName.equals(TableName.META_TABLE_NAME)) {
tableDirs.add(fs.getFileStatus(path));
}
}
// verify that version file exists
if (!foundVersionFile) {
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
"Version file does not exist in root dir " + rootDir);
if (shouldFixVersionFile()) {
LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME
+ " file.");
setShouldRerun();
FSUtils.setVersion(fs, rootDir, getConf().getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
}
// level 1: <HBASE_DIR>/*
List<WorkItemHdfsDir> dirs = new ArrayList<WorkItemHdfsDir>(tableDirs.size());
List<Future<Void>> dirsFutures;
for (FileStatus tableDir : tableDirs) {
LOG.debug("Loading region dirs from " +tableDir.getPath());
dirs.add(new WorkItemHdfsDir(this, fs, errors, tableDir));
}
// Invoke and wait for Callables to complete
dirsFutures = executor.invokeAll(dirs);
for(Future<Void> f: dirsFutures) {
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Could not load region dir " , e.getCause());
}
}
errors.print("");
}