本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getEncodedName方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getEncodedName方法的具體用法?Java HRegionInfo.getEncodedName怎麽用?Java HRegionInfo.getEncodedName使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HRegionInfo
的用法示例。
在下文中一共展示了HRegionInfo.getEncodedName方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deleteFamilyFromFS
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
if (fs.exists(familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
}
示例2: createReferences
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* @param services Master services instance.
* @param htd
* @param parent
* @param daughter
* @param midkey
* @param top True if we are to write a 'top' reference.
* @return Path to reference we created.
* @throws IOException
*/
private Path createReferences(final MasterServices services,
final HTableDescriptor htd, final HRegionInfo parent,
final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, daughter,
htd.getColumnFamilies()[0].getName());
Reference ref =
top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
ref.write(fs, p);
return p;
}
示例3: archiveFamily
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Remove from the specified region the store files of the specified column family,
* either by archiving them or outright deletion
* @param fs the filesystem where the store files live
* @param conf {@link Configuration} to examine to determine the archive directory
* @param parent Parent region hosting the store files
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveFamily(FileSystem fs, Configuration conf,
HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
if (storeFiles == null) {
LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
", family=" + Bytes.toString(family));
return;
}
FileStatusConverter getAsFile = new FileStatusConverter(fs);
Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
// do the actual archive
if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
throw new IOException("Failed to archive/delete all the files for region:"
+ Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
示例4: deleteRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Remove a region from all state maps.
*/
@VisibleForTesting
public synchronized void deleteRegion(final HRegionInfo hri) {
String encodedName = hri.getEncodedName();
regionsInTransition.remove(encodedName);
regionStates.remove(encodedName);
TableName table = hri.getTable();
Map<String, RegionState> indexMap = regionStatesTableIndex.get(table);
indexMap.remove(encodedName);
if (indexMap.size() == 0)
regionStatesTableIndex.remove(table);
lastAssignments.remove(encodedName);
ServerName sn = regionAssignments.remove(hri);
if (sn != null) {
Set<HRegionInfo> regions = serverHoldings.get(sn);
regions.remove(hri);
}
}
示例5: createRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
byte[] startKey, byte[] endKey) throws IOException {
Table meta = new HTable(conf, TableName.META_TABLE_NAME);
HTableDescriptor htd = htbl.getTableDescriptor();
HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);
LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
hri.getEncodedName());
fs.mkdirs(p);
Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
FSDataOutputStream out = fs.create(riPath);
out.write(hri.toDelimitedByteArray());
out.close();
// add to meta.
MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}
示例6: verifyRegionState
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Verifies that the specified region is in the specified state in ZooKeeper.
* <p>
* Returns true if region is in transition and in the specified state in
* ZooKeeper. Returns false if the region does not exist in ZK or is in
* a different state.
* <p>
* Method synchronizes() with ZK so will yield an up-to-date result but is
* a slow read.
* @param zkw
* @param region
* @param expectedState
* @return true if region exists and is in expected state
* @throws DeserializationException
*/
static boolean verifyRegionState(ZooKeeperWatcher zkw, HRegionInfo region, EventType expectedState)
throws KeeperException, DeserializationException {
String encoded = region.getEncodedName();
String node = ZKAssign.getNodeName(zkw, encoded);
zkw.sync(node);
// Read existing data of the node
byte [] existingBytes = null;
try {
existingBytes = ZKUtil.getDataAndWatch(zkw, node);
} catch (KeeperException.NoNodeException nne) {
return false;
} catch (KeeperException e) {
throw e;
}
if (existingBytes == null) return false;
RegionTransition rt = RegionTransition.parseFrom(existingBytes);
return rt.getEventType().equals(expectedState);
}
示例7: getRegionName
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override
public String getRegionName() {
HRegionInfo regionInfo = this.region.getRegionInfo();
if (regionInfo == null) {
return UNKNOWN;
}
return regionInfo.getEncodedName();
}
示例8: transitionFromOfflineToOpening
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Transition ZK node from OFFLINE to OPENING.
* @param regionInfo region info instance
* @param ord - instance of open region details, for ZK implementation
* will include version Of OfflineNode that needs to be compared
* before changing the node's state from OFFLINE
* @return True if successful transition.
*/
@Override
public boolean transitionFromOfflineToOpening(HRegionInfo regionInfo,
OpenRegionDetails ord) {
ZkOpenRegionDetails zkOrd = (ZkOpenRegionDetails) ord;
// encoded name is used as znode encoded name in ZK
final String encodedName = regionInfo.getEncodedName();
// TODO: should also handle transition from CLOSED?
try {
// Initialize the znode version.
zkOrd.setVersion(ZKAssign.transitionNode(watcher, regionInfo,
zkOrd.getServerName(), EventType.M_ZK_REGION_OFFLINE,
EventType.RS_ZK_REGION_OPENING, zkOrd.getVersionOfOfflineNode()));
} catch (KeeperException e) {
LOG.error("Error transition from OFFLINE to OPENING for region=" +
encodedName, e);
zkOrd.setVersion(-1);
return false;
}
boolean b = isGoodVersion(zkOrd);
if (!b) {
LOG.warn("Failed transition from OFFLINE to OPENING for region=" +
encodedName);
}
return b;
}
示例9: commitMergedRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Commit a merged region, moving it from the merges temporary directory to the proper location in
* the filesystem.
*
* @param mergedRegionInfo merged region {@link HRegionInfo}
* @throws IOException
*/
void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
// Move the tmp dir in the expected location
if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
if (!fs.rename(mergedRegionTmpDir, regionDir)) {
throw new IOException("Unable to rename " + mergedRegionTmpDir + " to " + regionDir);
}
}
}
示例10: testOpenFailed
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* This tests region open failed
*/
@Test (timeout=60000)
public void testOpenFailed() throws Exception {
String table = "testOpenFailed";
try {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri.getEncodedName();
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
AssignmentManager am = master.getAssignmentManager();
assertFalse(am.waitForAssignment(hri));
RegionState state = am.getRegionStates().getRegionState(hri);
assertEquals(RegionState.State.FAILED_OPEN, state.getState());
// Failed to open since no plan, so it's on no server
assertNull(state.getServerName());
MyLoadBalancer.controledRegion = null;
master.assignRegion(hri);
assertTrue(am.waitForAssignment(hri));
ServerName serverName = master.getAssignmentManager().
getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);
} finally {
MyLoadBalancer.controledRegion = null;
TEST_UTIL.deleteTable(Bytes.toBytes(table));
}
}
示例11: regionOffline
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* A region is offline, won't be in transition any more. Its state
* should be the specified expected state, which can only be
* Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew.
*/
public void regionOffline(
final HRegionInfo hri, final State expectedState) {
Preconditions.checkArgument(expectedState == null
|| RegionState.isUnassignable(expectedState),
"Offlined region should not be " + expectedState);
if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
// Remove it from all region maps
deleteRegion(hri);
return;
}
State newState =
expectedState == null ? State.OFFLINE : expectedState;
updateRegionState(hri, newState);
String encodedName = hri.getEncodedName();
synchronized (this) {
regionsInTransition.remove(encodedName);
ServerName oldServerName = regionAssignments.remove(hri);
if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
if (newState == State.MERGED || newState == State.SPLIT
|| hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
// Offline the region only if it's merged/split, or the table is disabled/disabling.
// Otherwise, offline it from this server only when it is online on a different server.
LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
removeFromServerHoldings(oldServerName, hri);
removeFromReplicaMapping(hri);
} else {
// Need to remember it so that we can offline it from this
// server when it is online on a different server.
oldAssignments.put(encodedName, oldServerName);
}
}
}
}
示例12: cloneHdfsRegions
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Clone specified regions. For each region create a new region
* and create a HFileLink for each hfile.
*/
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
final Map<String, SnapshotRegionManifest> regionManifests,
final List<HRegionInfo> regions) throws IOException {
if (regions == null || regions.size() == 0) return null;
final Map<String, HRegionInfo> snapshotRegions =
new HashMap<String, HRegionInfo>(regions.size());
// clone region info (change embedded tableName with the new one)
HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
for (int i = 0; i < clonedRegionsInfo.length; ++i) {
// clone the region info from the snapshot region info
HRegionInfo snapshotRegionInfo = regions.get(i);
clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);
// add the region name mapping between snapshot and cloned
String snapshotRegionName = snapshotRegionInfo.getEncodedName();
String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);
// Add mapping between cloned region name and snapshot region info
snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
}
// create the regions on disk
ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDir,
tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
@Override
public void fillRegion(final HRegion region) throws IOException {
HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
}
});
return clonedRegionsInfo;
}
示例13: processAlreadyOpenedRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
//If use ZkForAssignment, region already Opened event should not be handled,
//leave it to zk event. See HBase-14407.
if(useZKForAssignment){
String node = ZKAssign.getNodeName(watcher, encodedName);
Stat stat = new Stat();
try {
byte[] existingBytes = ZKUtil.getDataNoWatch(watcher, node, stat);
if(existingBytes!=null){
RegionTransition rt= RegionTransition.parseFrom(existingBytes);
EventType et = rt.getEventType();
if (et.equals(EventType.RS_ZK_REGION_OPENED)) {
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " and node in "+et+" state");
return;
}
}
} catch (KeeperException ke) {
LOG.warn("Unexpected ZK exception getData " + node
+ " node for the region " + encodedName, ke);
} catch (DeserializationException e) {
LOG.warn("Get RegionTransition from zk deserialization failed! ", e);
}
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
}
regionStates.regionOnline(region, sn);
}
示例14: cleanupDaughterRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Remove daughter region
*
* @param regionInfo daughter {@link HRegionInfo}
* @throws IOException
*/
void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
throw new IOException("Failed delete of " + regionDir);
}
}
示例15: testFindsSnapshotFilesWhenCleaning
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
Path rootDir = FSUtils.getRootDir(conf);
Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
FileSystem fs = FileSystem.get(conf);
SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
cleaner.setConf(conf);
// write an hfile to the snapshot directory
String snapshotName = "snapshot";
byte[] snapshot = Bytes.toBytes(snapshotName);
TableName tableName = TableName.valueOf("table");
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
HRegionInfo mockRegion = new HRegionInfo(tableName);
Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
Path familyDir = new Path(regionSnapshotDir, "family");
// create a reference to a supposedly valid hfile
String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
Path refFile = new Path(familyDir, hfile);
// make sure the reference file exists
fs.create(refFile);
// create the hfile in the archive
fs.mkdirs(archivedHfileDir);
fs.createNewFile(new Path(archivedHfileDir, hfile));
// make sure that the file isn't deletable
assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}