本文整理汇总了Java中org.apache.hadoop.hbase.client.RowMutations类的典型用法代码示例。如果您正苦于以下问题:Java RowMutations类的具体用法?Java RowMutations怎么用?Java RowMutations使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RowMutations类属于org.apache.hadoop.hbase.client包,在下文中一共展示了RowMutations类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resetSplitParent
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例2: buildRegionAction
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @return a data-laden RegionMutation.Builder
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte [] regionName,
final RowMutations rowMutations)
throws IOException {
RegionAction.Builder builder =
getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
for (Mutation mutation: rowMutations.getMutations()) {
MutationType mutateType = null;
if (mutation instanceof Put) {
mutateType = MutationType.PUT;
} else if (mutation instanceof Delete) {
mutateType = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
actionBuilder.clear();
actionBuilder.setMutation(mp);
builder.addAction(actionBuilder.build());
}
return builder;
}
示例3: buildNoDataRegionAction
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations that does not hold data. Data/Cells
* are carried outside of protobuf. Return references to the Cells in <code>cells</code> param.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @param cells Return in here a list of Cells as CellIterable.
* @return a region mutation minus data
* @throws IOException
*/
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName,
final RowMutations rowMutations, final List<CellScannable> cells,
final RegionAction.Builder regionActionBuilder,
final ClientProtos.Action.Builder actionBuilder,
final MutationProto.Builder mutationBuilder)
throws IOException {
for (Mutation mutation: rowMutations.getMutations()) {
MutationType type = null;
if (mutation instanceof Put) {
type = MutationType.PUT;
} else if (mutation instanceof Delete) {
type = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, mutationBuilder);
cells.add(mutation);
actionBuilder.clear();
regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
}
return regionActionBuilder;
}
示例4: mutateRow
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
@Override
public void mutateRow(RowMutations rm) throws IOException {
// ColumnManager validation
if (includedInRepositoryProcessing
&& mTableDescriptor.hasColDescriptorWithColDefinitionsEnforced()) {
repository.validateColumns(mTableDescriptor, rm);
}
// Standard HBase processing (with aliasing, if necessary)
if (includedInRepositoryProcessing
&& mTableDescriptor.hasColDescriptorWithColAliasesEnabled()) {
wrappedTable.mutateRow(repository.convertQualifiersToAliases(mTableDescriptor, rm));
} else {
wrappedTable.mutateRow(rm);
}
// ColumnManager auditing
if (includedInRepositoryProcessing) {
repository.putColumnAuditorSchemaEntities(mTableDescriptor, rm);
}
}
示例5: checkAndMutate
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
@Override
public boolean checkAndMutate(byte[] rowId, byte[] colFamily, byte[] colQualifier,
CompareOp co, byte[] colValue, RowMutations rm)
throws IOException {
// ColumnManager validation
if (includedInRepositoryProcessing
&& mTableDescriptor.hasColDescriptorWithColDefinitionsEnforced()) {
repository.validateColumns(mTableDescriptor, rm);
}
// Standard HBase processing (with aliasing, if necessary)
boolean mutationsPerformed;
if (includedInRepositoryProcessing
&& mTableDescriptor.hasColDescriptorWithColAliasesEnabled()) {
mutationsPerformed = wrappedTable.checkAndMutate(rowId, colFamily,
repository.getAlias(mTableDescriptor, colFamily, colQualifier), co, colValue,
repository.convertQualifiersToAliases(mTableDescriptor, rm));
} else {
mutationsPerformed
= wrappedTable.checkAndMutate(rowId, colFamily, colQualifier, co, colValue, rm);
}
// ColumnManager auditing
if (mutationsPerformed && includedInRepositoryProcessing) {
repository.putColumnAuditorSchemaEntities(mTableDescriptor, rm);
}
return mutationsPerformed;
}
示例6: getFamilyQualifierToAliasMap
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
NavigableMap<byte[], NavigableMap<byte[], byte[]>> getFamilyQualifierToAliasMap(
MTableDescriptor mTableDescriptor, Mutation mutation)
throws IOException {
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap
= new TreeMap<>(Bytes.BYTES_COMPARATOR);
Class<?> mutationClass = mutation.getClass();
if (Append.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Append)mutation);
} else if (Increment.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Increment)mutation);
} else if (Delete.class.isAssignableFrom(mutationClass)
|| Put.class.isAssignableFrom(mutationClass)
|| RowMutations.class.isAssignableFrom(mutationClass)) {
// ignore: familyQualifierToAliasMap not passed to alias-processing for these mutation-types
}
return familyQualifierToAliasMap;
}
示例7: convertQualifiersToAliases
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
Row convertQualifiersToAliases(MTableDescriptor mTableDescriptor, final Row originalRow,
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap,
int intForUniqueSignature)
throws IOException {
// Append, Delete, Get, Increment, Mutation, Put, RowMutations
Class<?> originalRowClass = originalRow.getClass();
if (Append.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Append)originalRow, familyQualifierToAliasMap);
} else if (Delete.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Delete)originalRow);
} else if (Get.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Get)originalRow, familyQualifierToAliasMap);
} else if (Increment.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Increment)originalRow, familyQualifierToAliasMap);
} else if (Put.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Put)originalRow);
} else if (RowMutations.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (RowMutations)originalRow);
}
return null;
}
示例8: resetSplitParent
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
Put p = new Put(hi.metaEntry.getRegionName());
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(hri));
mutations.add(p);
meta.mutateRow(mutations);
meta.flushCommits();
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例9: mutateRow
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
@Override
public void mutateRow(byte[] regionName, RowMutations rm) throws IOException {
checkOpen();
if (regionName == null) {
throw new IOException("Invalid arguments to mutateRow " + "regionName is null");
}
requestCount.incrementAndGet();
try {
HRegion region = getRegion(regionName);
if (!region.getRegionInfo().isMetaTable()) {
this.cacheFlusher.reclaimMemStoreMemory();
}
region.mutateRow(rm);
} catch (IOException e) {
checkFileSystem();
throw e;
}
}
示例10: testmutateRowsWithLocks_wrongCF
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
public void testmutateRowsWithLocks_wrongCF() throws IOException {
this.region = initHRegion(tableName, this.getName(), conf, fam1, fam2);
try {
Put put = new Put(row2);
put.add(fam3, qual1, value1);
RowMutations rm = new RowMutations(row2);
rm.add(put);
try {
region.mutateRow(rm);
fail();
} catch (DoNotRetryIOException expected) {
// expected exception.
LOG.debug("Caught expected exception: " + expected.getMessage());
}
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例11: mutateRow
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
@Override
public void mutateRow(RowMutations rm) throws IOException {
LOG.trace("mutateRow(RowMutation)");
MutateRowRequest.Builder requestBuilder = rowMutationsAdapter.adapt(rm);
metadataSetter.setMetadata(requestBuilder);
try {
client.mutateRow(requestBuilder.build());
} catch (Throwable throwable) {
throw new IOException(
makeGenericExceptionMessage(
"mutateRow",
options.getProjectId(),
tableName.getQualifierAsString(),
rm.getRow()),
throwable);
}
}
示例12: issueRequest
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
ListenableFuture<? extends GeneratedMessage> issueRequest(Row row) {
if (row instanceof Put) {
return issuePutRequest((Put) row);
} else if (row instanceof Delete) {
return issueDeleteRequest((Delete) row);
} else if (row instanceof Append) {
return issueAppendRequest((Append) row);
} else if (row instanceof Increment) {
return issueIncrementRequest((Increment) row);
} else if (row instanceof Get) {
return issueGetRequest((Get) row);
} else if (row instanceof RowMutations) {
return issueRowMutationsRequest((RowMutations) row);
}
LOG.error("Encountered unknown action type %s", row.getClass());
return Futures.immediateFailedFuture(
new IllegalArgumentException("Encountered unknown action type: " + row.getClass()));
}
示例13: resetSplitParent
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaEditor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
meta.flushCommits();
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例14: mutateRow
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
@Override
public void mutateRow(byte[] regionName, RowMutations rm)
throws IOException {
checkOpen();
if (regionName == null) {
throw new IOException("Invalid arguments to mutateRow " +
"regionName is null");
}
requestCount.incrementAndGet();
try {
HRegion region = getRegion(regionName);
if (!region.getRegionInfo().isMetaTable()) {
this.cacheFlusher.reclaimMemStoreMemory();
}
region.mutateRow(rm);
} catch (IOException e) {
checkFileSystem();
throw e;
}
}
示例15: resetSplitParent
import org.apache.hadoop.hbase.client.RowMutations; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
RegionInfo hri = RegionInfoBuilder.newBuilder(hi.metaEntry)
.setOffline(false)
.setSplit(false)
.build();
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}