本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaEditor类的典型用法代码示例。如果您正苦于以下问题:Java MetaEditor类的具体用法?Java MetaEditor怎么用?Java MetaEditor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MetaEditor类属于org.apache.hadoop.hbase.catalog包,在下文中一共展示了MetaEditor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: fixupDaughter
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* Check individual daughter is up in .META.; fixup if its not.
* @param result The contents of the parent row in .META.
* @param qualifier Which daughter to check for.
* @return 1 if the daughter is missing and fixed. Otherwise 0
* @throws IOException
*/
static int fixupDaughter(final Result result, final byte [] qualifier,
final AssignmentManager assignmentManager,
final CatalogTracker catalogTracker)
throws IOException {
HRegionInfo daughter =
MetaReader.parseHRegionInfoFromCatalogResult(result, qualifier);
if (daughter == null) return 0;
if (isDaughterMissing(catalogTracker, daughter)) {
LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
MetaEditor.addDaughter(catalogTracker, daughter, null);
// TODO: Log WARN if the regiondir does not exist in the fs. If its not
// there then something wonky about the split -- things will keep going
// but could be missing references to parent region.
// And assign it.
assignmentManager.assign(daughter, true);
return 1;
} else {
LOG.debug("Daughter " + daughter.getRegionNameAsString() + " present");
}
return 0;
}
示例2: cleanMergeRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* If merged region no longer holds reference to the merge regions, archive
* merge region on hdfs and perform deleting references in hbase:meta
* @param mergedRegion
* @param regionA
* @param regionB
* @return true if we delete references in merged region on hbase:meta and archive
* the files on the file system
* @throws IOException
*/
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
+ regionB.getRegionNameAsString()
+ " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
return true;
}
return false;
}
示例3: updateMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
@Override
protected void updateMeta(final byte [] oldRegion1,
final byte [] oldRegion2,
HRegion newRegion)
throws IOException {
byte[][] regionsToDelete = {oldRegion1, oldRegion2};
for (int r = 0; r < regionsToDelete.length; r++) {
if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
latestRegion = null;
}
Delete delete = new Delete(regionsToDelete[r]);
table.delete(delete);
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
}
}
newRegion.getRegionInfo().setOffline(true);
MetaEditor.addRegionToMeta(table, newRegion.getRegionInfo());
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: "
+ Bytes.toStringBinary(newRegion.getRegionName()));
}
}
示例4: resetSplitParent
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaEditor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
meta.flushCommits();
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例5: prepareMutationsForMerge
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
HRegionInfo regionB, ServerName serverName, List<Mutation> mutations) throws IOException {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// Put for parent
Put putOfMerged = MetaEditor.makePutFromRegionInfo(copyOfMerged);
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
mutations.add(putOfMerged);
// Deletes for merging regions
Delete deleteA = MetaEditor.makeDeleteFromRegionInfo(regionA);
Delete deleteB = MetaEditor.makeDeleteFromRegionInfo(regionB);
mutations.add(deleteA);
mutations.add(deleteB);
// The merged is a new region, openSeqNum = 1 is fine.
addLocation(putOfMerged, serverName, 1);
}
示例6: offlineParentInMetaAndputMetaEntries
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
private void offlineParentInMetaAndputMetaEntries(CatalogTracker catalogTracker,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List<Mutation> metaEntries) throws IOException {
List<Mutation> mutations = metaEntries;
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
Put putA = MetaEditor.makePutFromRegionInfo(splitA);
Put putB = MetaEditor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
MetaEditor.mutateMetaTable(catalogTracker, mutations);
}
示例7: createMultiRegionsInMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* Create rows in hbase:meta for regions of the specified table with the specified
* start keys. The first startKey should be a 0 length byte array if you
* want to form a proper range of regions.
* @param conf
* @param htd
* @param startKeys
* @return list of region info for regions added to meta
* @throws IOException
*/
public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
final HTableDescriptor htd, byte [][] startKeys)
throws IOException {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
// add custom ones
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
startKeys[j]);
MetaEditor.addRegionToMeta(meta, hri);
newRegions.add(hri);
}
meta.close();
return newRegions;
}
示例8: preMergeCommit
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
@Override
public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException {
preMergeBeforePONRCalled = true;
RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
List<HRegion> onlineRegions =
rs.getOnlineRegions(TableName.valueOf("testRegionServerObserver_2"));
rmt = new RegionMergeTransaction(onlineRegions.get(0), onlineRegions.get(1), true);
if (!rmt.prepare(rs)) {
LOG.error("Prepare for the region merge of table "
+ onlineRegions.get(0).getTableDesc().getNameAsString()
+ " failed. So returning null. ");
ctx.bypass();
return;
}
mergedRegion = rmt.stepsBeforePONR(rs, rs, false);
rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(),
regionB.getRegionInfo(), rs.getServerName(), metaEntries);
MetaEditor.mutateMetaTable(rs.getCatalogTracker(), metaEntries);
}
示例9: createRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
protected HRegionInfo createRegion(Configuration conf, final HTable htbl,
byte[] startKey, byte[] endKey) throws IOException {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HTableDescriptor htd = htbl.getTableDescriptor();
HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);
LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
hri.getEncodedName());
fs.mkdirs(p);
Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
FSDataOutputStream out = fs.create(riPath);
out.write(hri.toDelimitedByteArray());
out.close();
// add to meta.
MetaEditor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}
示例10: testOpenClosingRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
@Test
public void testOpenClosingRegion() throws Exception {
Assert.assertTrue(getRS().getRegion(regionName).isAvailable());
try {
// we re-opened meta so some of its data is lost
ServerName sn = getRS().getServerName();
MetaEditor.updateRegionLocation(getRS().catalogTracker,
hri, sn, getRS().getRegion(regionName).getOpenSeqNum());
// fake region to be closing now, need to clear state afterwards
getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);
AdminProtos.OpenRegionRequest orr =
RequestConverter.buildOpenRegionRequest(sn, hri, 0, null);
getRS().rpcServices.openRegion(null, orr);
Assert.fail("The closing region should not be opened");
} catch (ServiceException se) {
Assert.assertTrue("The region should be already in transition",
se.getCause() instanceof RegionAlreadyInTransitionException);
} finally {
getRS().regionsInTransitionInRS.remove(hri.getEncodedNameAsBytes());
}
}
示例11: fixupDaughter
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* Check individual daughter is up in .META.; fixup if its not.
* @param result The contents of the parent row in .META. - not used
* @param daughter Which daughter to check for.
* @return 1 if the daughter is missing and fixed. Otherwise 0
* @throws IOException
*/
static int fixupDaughter(final Result result, HRegionInfo daughter,
final AssignmentManager assignmentManager,
final CatalogTracker catalogTracker)
throws IOException {
if (daughter == null) return 0;
if (isDaughterMissing(catalogTracker, daughter)) {
LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
MetaEditor.addDaughter(catalogTracker, daughter, null);
// TODO: Log WARN if the regiondir does not exist in the fs. If its not
// there then something wonky about the split -- things will keep going
// but could be missing references to parent region.
// And assign it.
assignmentManager.assign(daughter, true, true);
return 1;
} else {
LOG.debug("Daughter " + daughter.getRegionNameAsString() + " present");
}
return 0;
}
示例12: createMultiRegionsInMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* Create rows in META for regions of the specified table with the specified
* start keys. The first startKey should be a 0 length byte array if you
* want to form a proper range of regions.
* @param conf
* @param htd
* @param startKeys
* @return list of region info for regions added to meta
* @throws IOException
*/
public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
final HTableDescriptor htd, byte [][] startKeys)
throws IOException {
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
// add custom ones
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
startKeys[j]);
MetaEditor.addRegionToMeta(meta, hri);
newRegions.add(hri);
}
meta.close();
return newRegions;
}
示例13: testAssignRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* This tests region assignment
*/
@Test
public void testAssignRegion() throws Exception {
String table = "testAssignRegion";
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaEditor.addRegionToMeta(meta, hri);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
master.getAssignmentManager().waitForAssignment(hri);
ServerName serverName = master.getAssignmentManager().
getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
} finally {
TEST_UTIL.deleteTable(Bytes.toBytes(table));
}
}
示例14: createRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
protected HRegionInfo createRegion(Configuration conf, final HTable htbl,
byte[] startKey, byte[] endKey) throws IOException {
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
HTableDescriptor htd = htbl.getTableDescriptor();
HRegionInfo hri = new HRegionInfo(htbl.getTableName(), startKey, endKey);
LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(rootDir + "/" + htd.getNameAsString(),
hri.getEncodedName());
fs.mkdirs(p);
Path riPath = new Path(p, HRegion.REGIONINFO_FILE);
FSDataOutputStream out = fs.create(riPath);
out.write(hri.toDelimitedByteArray());
out.close();
// add to meta.
MetaEditor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}
示例15: cleanParent
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入依赖的package包/类
/**
* If daughters no longer hold reference to the parents, delete the parent.
* @param server HRegionInterface of meta server to talk to
* @param parent HRegionInfo of split offlined parent
* @param rowContent Content of <code>parent</code> row in
* <code>metaRegionName</code>
* @return True if we removed <code>parent</code> from meta table and from
* the filesystem.
* @throws IOException
*/
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
boolean result = false;
// Run checks on each daughter split.
HRegionInfo a_region = getDaughterRegionInfo(rowContent, HConstants.SPLITA_QUALIFIER);
HRegionInfo b_region = getDaughterRegionInfo(rowContent, HConstants.SPLITB_QUALIFIER);
Pair<Boolean, Boolean> a =
checkDaughterInFs(parent, a_region, HConstants.SPLITA_QUALIFIER);
Pair<Boolean, Boolean> b =
checkDaughterInFs(parent, b_region, HConstants.SPLITB_QUALIFIER);
if (hasNoReferences(a) && hasNoReferences(b)) {
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
" because daughter splits no longer hold references");
// This latter regionOffline should not be necessary but is done for now
// until we let go of regionserver to master heartbeats. See HBASE-3368.
if (this.services.getAssignmentManager() != null) {
// The mock used in testing catalogjanitor returns null for getAssignmnetManager.
// Allow for null result out of getAssignmentManager.
this.services.getAssignmentManager().regionOffline(parent);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
result = true;
}
return result;
}