本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType类的典型用法代码示例。如果您正苦于以下问题:Java MutationType类的具体用法?Java MutationType怎么用?Java MutationType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MutationType类属于org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto包,在下文中一共展示了MutationType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildMutateRequest
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Create a protocol buffer MutateRequest for a conditioned delete
*
* @param regionName
* @param row
* @param family
* @param qualifier
* @param comparator
* @param compareType
* @param delete
* @return a mutate request
* @throws IOException
*/
public static MutateRequest buildMutateRequest(
final byte[] regionName, final byte[] row, final byte[] family,
final byte [] qualifier, final ByteArrayComparable comparator,
final CompareType compareType, final Delete delete) throws IOException {
MutateRequest.Builder builder = MutateRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
Condition condition = buildCondition(
row, family, qualifier, comparator, compareType);
builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete,
MutationProto.newBuilder()));
builder.setCondition(condition);
return builder.build();
}
示例2: parseMutateInfo
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
private Map<String, MutationType> parseMutateInfo(byte[] mutateInfo) {
Map<String, MutationType> mi = new HashMap<String, MutationType>();
if (mutateInfo != null) {
String mutateInfoStr = Bytes.toString(mutateInfo);
String[] mutations = mutateInfoStr.split("#");
for (String mutation: mutations) {
if (mutation.isEmpty()) continue;
Preconditions.checkArgument(mutation.contains(":"),
"Invalid mutation info " + mutation);
int p = mutation.indexOf(":");
String column = mutation.substring(0, p);
MutationType type = MutationType.valueOf(
Integer.parseInt(mutation.substring(p+1)));
mi.put(column, type);
}
}
return mi;
}
示例3: buildRegionAction
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @return a data-laden RegionMutation.Builder
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte [] regionName,
final RowMutations rowMutations)
throws IOException {
RegionAction.Builder builder =
getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
for (Mutation mutation: rowMutations.getMutations()) {
MutationType mutateType = null;
if (mutation instanceof Put) {
mutateType = MutationType.PUT;
} else if (mutation instanceof Delete) {
mutateType = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
actionBuilder.clear();
actionBuilder.setMutation(mp);
builder.addAction(actionBuilder.build());
}
return builder;
}
示例4: buildNoDataRegionAction
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations that does not hold data. Data/Cells
* are carried outside of protobuf. Return references to the Cells in <code>cells</code> param.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @param cells Return in here a list of Cells as CellIterable.
* @return a region mutation minus data
* @throws IOException
*/
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName,
final RowMutations rowMutations, final List<CellScannable> cells,
final RegionAction.Builder regionActionBuilder,
final ClientProtos.Action.Builder actionBuilder,
final MutationProto.Builder mutationBuilder)
throws IOException {
for (Mutation mutation: rowMutations.getMutations()) {
MutationType type = null;
if (mutation instanceof Put) {
type = MutationType.PUT;
} else if (mutation instanceof Delete) {
type = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, mutationBuilder);
cells.add(mutation);
actionBuilder.clear();
regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
}
return regionActionBuilder;
}
示例5: getMutationBuilderAndSetCommonFields
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
* {@link #toMutationNoData(MutationType, Mutation)}
* @param type
* @param mutation
* @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
builder.setRow(ByteStringer.wrap(mutation.getRow()));
builder.setMutateType(type);
builder.setDurability(toDurability(mutation.getDurability()));
builder.setTimestamp(mutation.getTimeStamp());
Map<String, byte[]> attributes = mutation.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
return builder;
}
示例6: multiMutate
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Performs an atomic multi-Mutate operation against the given table.
*/
private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
CoprocessorRpcChannel channel = table.coprocessorService(row);
MutateRowsRequest.Builder mmrBuilder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
+ mutation.getClass().getName());
}
}
MultiRowMutationService.BlockingInterface service =
MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
示例7: getMutationBuilderAndSetCommonFields
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
* {@link #toMutationNoData(MutationType, Mutation)}
* @param type
* @param mutation
* @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
builder.setRow(HBaseZeroCopyByteString.wrap(mutation.getRow()));
builder.setMutateType(type);
builder.setDurability(toDurability(mutation.getDurability()));
builder.setTimestamp(mutation.getTimeStamp());
Map<String, byte[]> attributes = mutation.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(HBaseZeroCopyByteString.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
return builder;
}
示例8: buildMutateRequest
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Create a protocol buffer MutateRequest for a client increment
*
* @param regionName
* @param row
* @param family
* @param qualifier
* @param amount
* @param durability
* @return a mutate request
*/
public static MutateRequest buildMutateRequest(
final byte[] regionName, final byte[] row, final byte[] family,
final byte [] qualifier, final long amount, final Durability durability) {
MutateRequest.Builder builder = MutateRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
mutateBuilder.setRow(ZeroCopyLiteralByteString.wrap(row));
mutateBuilder.setMutateType(MutationType.INCREMENT);
mutateBuilder.setDurability(ProtobufUtil.toDurability(durability));
ColumnValue.Builder columnBuilder = ColumnValue.newBuilder();
columnBuilder.setFamily(ZeroCopyLiteralByteString.wrap(family));
QualifierValue.Builder valueBuilder = QualifierValue.newBuilder();
valueBuilder.setValue(ZeroCopyLiteralByteString.wrap(Bytes.toBytes(amount)));
valueBuilder.setQualifier(ZeroCopyLiteralByteString.wrap(qualifier));
columnBuilder.addQualifierValue(valueBuilder.build());
mutateBuilder.addColumnValue(columnBuilder.build());
builder.setMutation(mutateBuilder.build());
return builder.build();
}
示例9: toMutation
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
public static MutationProto toMutation(final MutationType type, final Mutation mutation,
MutationProto.Builder builder)
throws IOException {
builder = getMutationBuilderAndSetCommonFields(type, mutation, builder);
ColumnValue.Builder columnBuilder = ColumnValue.newBuilder();
QualifierValue.Builder valueBuilder = QualifierValue.newBuilder();
for (Map.Entry<byte[],List<Cell>> family: mutation.getFamilyCellMap().entrySet()) {
columnBuilder.clear();
columnBuilder.setFamily(ZeroCopyLiteralByteString.wrap(family.getKey()));
for (Cell cell: family.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
valueBuilder.setQualifier(ZeroCopyLiteralByteString.wrap(
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()));
valueBuilder.setValue(ZeroCopyLiteralByteString.wrap(
kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
valueBuilder.setTimestamp(kv.getTimestamp());
if (type == MutationType.DELETE) {
KeyValue.Type keyValueType = KeyValue.Type.codeToType(kv.getType());
valueBuilder.setDeleteType(toDeleteType(keyValueType));
}
columnBuilder.addQualifierValue(valueBuilder.build());
}
builder.addColumnValue(columnBuilder.build());
}
return builder.build();
}
示例10: getMutationBuilderAndSetCommonFields
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
* {@link #toMutationNoData(MutationType, Mutation)}
* @param type
* @param mutation
* @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
builder.setRow(ZeroCopyLiteralByteString.wrap(mutation.getRow()));
builder.setMutateType(type);
builder.setDurability(toDurability(mutation.getDurability()));
builder.setTimestamp(mutation.getTimeStamp());
Map<String, byte[]> attributes = mutation.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ZeroCopyLiteralByteString.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
return builder;
}
示例11: MutationReplay
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
public MutationReplay(MutationType type, Mutation mutation, long nonceGroup, long nonce) {
this.type = type;
this.mutation = mutation;
if(this.mutation.getDurability() != Durability.SKIP_WAL) {
// using ASYNC_WAL for relay
this.mutation.setDurability(Durability.ASYNC_WAL);
}
this.nonceGroup = nonceGroup;
this.nonce = nonce;
}
示例12: serialize
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
@Override
public void serialize(Mutation mutation) throws IOException {
MutationType type;
if (mutation instanceof Put) {
type = MutationType.PUT;
} else if (mutation instanceof Delete) {
type = MutationType.DELETE;
} else {
throw new IllegalArgumentException("Only Put and Delete are supported");
}
ProtobufUtil.toMutation(type, mutation).writeDelimitedTo(out);
}
示例13: testAppend
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Test Append Mutate conversions.
*
* @throws IOException
*/
@Test
public void testAppend() throws IOException {
long timeStamp = 111111;
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
mutateBuilder.setMutateType(MutationType.APPEND);
mutateBuilder.setTimestamp(timeStamp);
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
qualifierBuilder.setValue(ByteString.copyFromUtf8("v1"));
qualifierBuilder.setTimestamp(timeStamp);
valueBuilder.addQualifierValue(qualifierBuilder.build());
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
qualifierBuilder.setValue(ByteString.copyFromUtf8("v2"));
valueBuilder.addQualifierValue(qualifierBuilder.build());
qualifierBuilder.setTimestamp(timeStamp);
mutateBuilder.addColumnValue(valueBuilder.build());
MutationProto proto = mutateBuilder.build();
// default fields
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
// set the default value for equal comparison
mutateBuilder = MutationProto.newBuilder(proto);
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
Append append = ProtobufUtil.toAppend(proto, null);
// append always use the latest timestamp,
// reset the timestamp to the original mutate
mutateBuilder.setTimestamp(append.getTimeStamp());
assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.APPEND, append));
}
示例14: testIncrement
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
/**
* Test Increment Mutate conversions.
*
* @throws IOException
*/
@Test
public void testIncrement() throws IOException {
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
mutateBuilder.setMutateType(MutationType.INCREMENT);
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
qualifierBuilder.setValue(ByteStringer.wrap(Bytes.toBytes(11L)));
valueBuilder.addQualifierValue(qualifierBuilder.build());
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
qualifierBuilder.setValue(ByteStringer.wrap(Bytes.toBytes(22L)));
valueBuilder.addQualifierValue(qualifierBuilder.build());
mutateBuilder.addColumnValue(valueBuilder.build());
MutationProto proto = mutateBuilder.build();
// default fields
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
// set the default value for equal comparison
mutateBuilder = MutationProto.newBuilder(proto);
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
Increment increment = ProtobufUtil.toIncrement(proto, null);
assertEquals(mutateBuilder.build(),
ProtobufUtil.toMutation(increment, MutationProto.newBuilder(), HConstants.NO_NONCE));
}
示例15: testMultiRowMutation
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; //导入依赖的package包/类
@Test
public void testMultiRowMutation() throws Exception {
LOG.info("Starting testMultiRowMutation");
final TableName TABLENAME = TableName.valueOf("testMultiRowMutation");
final byte [] ROW1 = Bytes.toBytes("testRow1");
Table t = TEST_UTIL.createTable(TABLENAME, FAMILY);
Put p = new Put(ROW);
p.add(FAMILY, QUALIFIER, VALUE);
MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, p);
p = new Put(ROW1);
p.add(FAMILY, QUALIFIER, VALUE);
MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, p);
MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
mrmBuilder.addMutationRequest(m1);
mrmBuilder.addMutationRequest(m2);
MutateRowsRequest mrm = mrmBuilder.build();
CoprocessorRpcChannel channel = t.coprocessorService(ROW);
MultiRowMutationService.BlockingInterface service =
MultiRowMutationService.newBlockingStub(channel);
service.mutateRows(null, mrm);
Get g = new Get(ROW);
Result r = t.get(g);
assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)));
g = new Get(ROW1);
r = t.get(g);
assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)));
}