本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaEditor.makePutFromRegionInfo方法的典型用法代码示例。如果您正苦于以下问题:Java MetaEditor.makePutFromRegionInfo方法的具体用法?Java MetaEditor.makePutFromRegionInfo怎么用?Java MetaEditor.makePutFromRegionInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.catalog.MetaEditor
的用法示例。
在下文中一共展示了MetaEditor.makePutFromRegionInfo方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resetSplitParent
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaEditor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
meta.flushCommits();
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例2: prepareMutationsForMerge
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
HRegionInfo regionB, ServerName serverName, List<Mutation> mutations) throws IOException {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// Put for parent
Put putOfMerged = MetaEditor.makePutFromRegionInfo(copyOfMerged);
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
mutations.add(putOfMerged);
// Deletes for merging regions
Delete deleteA = MetaEditor.makeDeleteFromRegionInfo(regionA);
Delete deleteB = MetaEditor.makeDeleteFromRegionInfo(regionB);
mutations.add(deleteA);
mutations.add(deleteB);
// The merged is a new region, openSeqNum = 1 is fine.
addLocation(putOfMerged, serverName, 1);
}
示例3: offlineParentInMetaAndputMetaEntries
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
private void offlineParentInMetaAndputMetaEntries(CatalogTracker catalogTracker,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List<Mutation> metaEntries) throws IOException {
List<Mutation> mutations = metaEntries;
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
Put putA = MetaEditor.makePutFromRegionInfo(splitA);
Put putB = MetaEditor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
MetaEditor.mutateMetaTable(catalogTracker, mutations);
}
示例4: makePutFromRegionInfo
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
/**
* Generates and returns a Put containing the region info for the catalog table
* and the servers
* @param regionInfo
* @param favoredNodeList
* @return Put object
*/
static Put makePutFromRegionInfo(HRegionInfo regionInfo, List<ServerName>favoredNodeList)
throws IOException {
Put put = null;
if (favoredNodeList != null) {
put = MetaEditor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
" with favored nodes " + Bytes.toString(favoredNodes));
}
return put;
}
示例5: generatePuts
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
/**
* Generate set of puts to add to new meta. This expects the tables to be
* clean with no overlaps or holes. If there are any problems it returns null.
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
private ArrayList<Put> generatePuts(
SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
ArrayList<Put> puts = new ArrayList<Put>();
boolean hasProblems = false;
for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
TableName name = e.getKey();
// skip "hbase:meta"
if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
continue;
}
TableInfo ti = e.getValue();
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap()
.entrySet()) {
Collection<HbckInfo> his = spl.getValue();
int sz = his.size();
if (sz != 1) {
// problem
LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey())
+ " had " + sz + " regions instead of exactly 1." );
hasProblems = true;
continue;
}
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
Put p = MetaEditor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
return hasProblems ? null : puts;
}
示例6: preSplitBeforePONR
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
RegionCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
List<HRegion> onlineRegions =
rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"));
HRegion region = onlineRegions.get(0);
for (HRegion r : onlineRegions) {
if (r.getRegionInfo().containsRow(splitKey)) {
region = r;
break;
}
}
st = new SplitTransaction(region, splitKey);
if (!st.prepare()) {
LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString()
+ " failed. So returning null. ");
ctx.bypass();
return;
}
region.forceSplit(splitKey);
daughterRegions = st.stepsBeforePONR(rs, rs, false);
HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo());
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
// Put for parent
Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
MetaEditor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
daughterRegions.getSecond().getRegionInfo());
metaEntries.add(putParent);
// Puts for daughters
Put putA = MetaEditor.makePutFromRegionInfo(daughterRegions.getFirst().getRegionInfo());
Put putB = MetaEditor.makePutFromRegionInfo(daughterRegions.getSecond().getRegionInfo());
st.addLocation(putA, rs.getServerName(), 1);
st.addLocation(putB, rs.getServerName(), 1);
metaEntries.add(putA);
metaEntries.add(putB);
}
示例7: makePutFromRegionInfo
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
/**
* Generates and returns a Put containing the region info for the catalog table
* and the servers
* @param regionInfo
* @param favoredNodeList
* @return Put object
*/
static Put makePutFromRegionInfo(HRegionInfo regionInfo, List<ServerName>favoredNodeList)
throws IOException {
Put put = null;
if (favoredNodeList != null) {
put = MetaEditor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
" with favored nodes " + favoredNodes);
}
return put;
}
示例8: updateMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
protected void updateMeta(final byte [] oldRegion1,
final byte [] oldRegion2, HRegion newRegion)
throws IOException {
byte[][] regionsToDelete = {oldRegion1, oldRegion2};
for(int r = 0; r < regionsToDelete.length; r++) {
Delete delete = new Delete(regionsToDelete[r]);
delete.deleteColumns(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
delete.deleteColumns(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
delete.deleteColumns(HConstants.CATALOG_FAMILY,
HConstants.STARTCODE_QUALIFIER);
delete.deleteColumns(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER);
delete.deleteColumns(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER);
root.delete(delete, null, true);
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
}
}
HRegionInfo newInfo = newRegion.getRegionInfo();
newInfo.setOffline(true);
Put put = MetaEditor.makePutFromRegionInfo(newInfo);
root.put(put);
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName()));
}
}
示例9: generatePuts
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
/**
* Generate set of puts to add to new meta. This expects the tables to be
* clean with no overlaps or holes. If there are any problems it returns null.
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
private ArrayList<Put> generatePuts(SortedMap<String, TableInfo> tablesInfo) throws IOException {
ArrayList<Put> puts = new ArrayList<Put>();
boolean hasProblems = false;
for (Entry<String, TableInfo> e : tablesInfo.entrySet()) {
String name = e.getKey();
// skip "-ROOT-" and ".META."
if (Bytes.compareTo(Bytes.toBytes(name), HConstants.ROOT_TABLE_NAME) == 0
|| Bytes.compareTo(Bytes.toBytes(name), HConstants.META_TABLE_NAME) == 0) {
continue;
}
TableInfo ti = e.getValue();
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap()
.entrySet()) {
Collection<HbckInfo> his = spl.getValue();
int sz = his.size();
if (sz != 1) {
// problem
LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey())
+ " had " + sz + " regions instead of exactly 1." );
hasProblems = true;
continue;
}
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
Put p = MetaEditor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
return hasProblems ? null : puts;
}