当前位置: 首页>>代码示例>>Java>>正文


Java RowMutations.add方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.RowMutations.add方法的典型用法代码示例。如果您正苦于以下问题:Java RowMutations.add方法的具体用法?Java RowMutations.add怎么用?Java RowMutations.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.RowMutations的用法示例。


在下文中一共展示了RowMutations.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: resetSplitParent

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
/**
 * Reset the split parent region info in meta table
 */
private void resetSplitParent(HbckInfo hi) throws IOException {
  RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
  Delete d = new Delete(hi.metaEntry.getRegionName());
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
  mutations.add(d);

  HRegionInfo hri = new HRegionInfo(hi.metaEntry);
  hri.setOffline(false);
  hri.setSplit(false);
  Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
  mutations.add(p);

  meta.mutateRow(mutations);
  LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HBaseFsck.java

示例2: convertQualifiersToAliases

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
RowMutations convertQualifiersToAliases(MTableDescriptor mTableDescriptor,
        final RowMutations originalRowMutations)
        throws IOException{
  RowMutations modifiedRowMutations = new RowMutations(originalRowMutations.getRow());
  for (Mutation originalMutation : originalRowMutations.getMutations()) {
    Class<?> mutationClass = originalMutation.getClass();
    if (Put.class.isAssignableFrom(mutationClass)) {
      modifiedRowMutations.add(
              convertQualifiersToAliases(mTableDescriptor, (Put)originalMutation));
    } else if (Delete.class.isAssignableFrom(mutationClass)) {
      modifiedRowMutations.add(
              convertQualifiersToAliases(mTableDescriptor, (Delete)originalMutation));
    }
  }
  return modifiedRowMutations;
}
 
开发者ID:dvimont,项目名称:ColumnManagerForHBase,代码行数:17,代码来源:Repository.java

示例3: resetSplitParent

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
/**
 * Reset the split parent region info in meta table
 */
private void resetSplitParent(HbckInfo hi) throws IOException {
  RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
  Delete d = new Delete(hi.metaEntry.getRegionName());
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
  mutations.add(d);

  Put p = new Put(hi.metaEntry.getRegionName());
  HRegionInfo hri = new HRegionInfo(hi.metaEntry);
  hri.setOffline(false);
  hri.setSplit(false);
  p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
    Writables.getBytes(hri));
  mutations.add(p);

  meta.mutateRow(mutations);
  meta.flushCommits();
  LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:23,代码来源:HBaseFsck.java

示例4: testmutateRowsWithLocks_wrongCF

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
public void testmutateRowsWithLocks_wrongCF() throws IOException {
  this.region = initHRegion(tableName, this.getName(), conf, fam1, fam2);
  try {
    Put put = new Put(row2);
    put.add(fam3, qual1, value1);
    RowMutations rm = new RowMutations(row2);
    rm.add(put);
    try {
      region.mutateRow(rm);
      fail();
    } catch (DoNotRetryIOException expected) {
      // expected exception.
      LOG.debug("Caught expected exception: " + expected.getMessage());
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:TestHRegion.java

示例5: resetSplitParent

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
/**
 * Reset the split parent region info in meta table
 */
private void resetSplitParent(HbckInfo hi) throws IOException {
  RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
  Delete d = new Delete(hi.metaEntry.getRegionName());
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
  mutations.add(d);

  HRegionInfo hri = new HRegionInfo(hi.metaEntry);
  hri.setOffline(false);
  hri.setSplit(false);
  Put p = MetaEditor.makePutFromRegionInfo(hri);
  mutations.add(p);

  meta.mutateRow(mutations);
  meta.flushCommits();
  LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:HBaseFsck.java

示例6: buildJoinMutation

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Override
protected RowMutations buildJoinMutation(
		final byte[] secondaryIndexRowId,
		final byte[] adapterId,
		final byte[] indexedAttributeFieldId,
		final byte[] primaryIndexId,
		final byte[] primaryIndexRowId,
		final byte[] attributeVisibility )
		throws IOException {
	final RowMutations m = new RowMutations(
			secondaryIndexRowId);
	final Put p = new Put(
			secondaryIndexRowId);
	p.setCellVisibility(new CellVisibility(
			StringUtils.stringFromBinary(attributeVisibility)));
	p.addColumn(
			SecondaryIndexUtils.constructColumnFamily(
					adapterId,
					indexedAttributeFieldId),
			SecondaryIndexUtils.constructColumnQualifier(
					primaryIndexId,
					primaryIndexRowId),
			EMPTY_VALUE);
	m.add(p);
	return m;
}
 
开发者ID:locationtech,项目名称:geowave,代码行数:27,代码来源:HBaseSecondaryIndexDataStore.java

示例7: getDeleteMutations

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
public static RowMutations getDeleteMutations(
		final byte[] rowId,
		final byte[] columnFamily,
		final byte[] columnQualifier,
		final String[] authorizations )
		throws IOException {
	final RowMutations m = new RowMutations(
			rowId);
	final Delete d = new Delete(
			rowId);
	d.addColumns(
			columnFamily,
			columnQualifier);
	m.add(d);
	return m;
}
 
开发者ID:locationtech,项目名称:geowave,代码行数:17,代码来源:HBaseUtils.java

示例8: testMutateRow_WriteRequestCount

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Test
public void testMutateRow_WriteRequestCount() throws Exception {
  byte[] row1 = Bytes.toBytes("row1");
  byte[] fam1 = Bytes.toBytes("fam1");
  byte[] qf1 = Bytes.toBytes("qualifier");
  byte[] val1 = Bytes.toBytes("value1");

  RowMutations rm = new RowMutations(row1);
  Put put = new Put(row1);
  put.addColumn(fam1, qf1, val1);
  rm.add(put);

  this.region = initHRegion(tableName, method, CONF, fam1);
  try {
    long wrcBeforeMutate = this.region.writeRequestsCount.longValue();
    this.region.mutateRow(rm);
    long wrcAfterMutate = this.region.writeRequestsCount.longValue();
    Assert.assertEquals(wrcBeforeMutate + rm.getMutations().size(), wrcAfterMutate);
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
    this.region = null;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:24,代码来源:TestHRegion.java

示例9: buildFullDeleteMutation

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Override
protected RowMutations buildFullDeleteMutation(
		final byte[] secondaryIndexRowId,
		final byte[] adapterId,
		final byte[] indexedAttributeFieldId,
		final byte[] dataId,
		final byte[] fieldId )
		throws IOException {
	final RowMutations m = new RowMutations(
			secondaryIndexRowId);
	final Delete d = new Delete(
			secondaryIndexRowId);
	d.addColumn(
			SecondaryIndexUtils.constructColumnFamily(
					adapterId,
					indexedAttributeFieldId),
			SecondaryIndexUtils.constructColumnQualifier(
					fieldId,
					dataId));
	m.add(d);
	return m;
}
 
开发者ID:locationtech,项目名称:geowave,代码行数:23,代码来源:HBaseSecondaryIndexDataStore.java

示例10: mutateRows

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
/**
 * Mutate a list of rows atomically.
 *
 * @param region
 * @param mutates
 * @throws IOException
 */
protected void mutateRows(final HRegion region,
    final List<Mutate> mutates) throws IOException {
  Mutate firstMutate = mutates.get(0);
  if (!region.getRegionInfo().isMetaTable()) {
    cacheFlusher.reclaimMemStoreMemory();
  }
  byte[] row = firstMutate.getRow().toByteArray();
  RowMutations rm = new RowMutations(row);
  for (Mutate mutate: mutates) {
    MutateType type = mutate.getMutateType();
    switch (mutate.getMutateType()) {
    case PUT:
      rm.add(ProtobufUtil.toPut(mutate));
      break;
    case DELETE:
      rm.add(ProtobufUtil.toDelete(mutate));
      break;
      default:
        throw new DoNotRetryIOException(
          "mutate supports atomic put and/or delete, not "
            + type.name());
    }
  }
  region.mutateRow(rm);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:33,代码来源:HRegionServer.java

示例11: set

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
public Set<HBeanRow> set(HBeanReferences provided, RowMutations mutations) {
    if (provided == null) {
        return new HashSet<>();
    }
    Set<HBeanRow> removedRowKeys = merge(provided, mutations);
    Map<String, KeyValue> providedRefs = provided.getReferencesKeyValue();
    for (String propertyName : references.keySet()) {
        KeyValue providedkv = providedRefs.get(propertyName);
        if (providedkv == null) {
            KeyValue existingkv = references.get(propertyName);
            try {
                Delete d = new Delete(mutations.getRow());
                d.deleteColumns(existingkv.getFamily(), existingkv.getQualifier());
                mutations.add(d);

                Map<String, HBeanRow> existingRowKeys = getRowKeys(existingkv);
                removedRowKeys.addAll(existingRowKeys.values());
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }

    }
    return removedRowKeys;
}
 
开发者ID:deephacks,项目名称:confit,代码行数:26,代码来源:HBeanReferences.java

示例12: rowMutationsFromThrift

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
/**
 * Creates a {@link RowMutations} (HBase) from a {@link TRowMutations} (Thrift)
 *
 * @param in the <code>TRowMutations</code> to convert
 *
 * @return converted <code>RowMutations</code>
 */
public static RowMutations rowMutationsFromThrift(TRowMutations in) throws IOException {
  RowMutations out = new RowMutations(in.getRow());
  List<TMutation> mutations = in.getMutations();
  for (TMutation mutation : mutations) {
    if (mutation.isSetPut()) {
      out.add(putFromThrift(mutation.getPut()));
    }
    if (mutation.isSetDeleteSingle()) {
      out.add(deleteFromThrift(mutation.getDeleteSingle()));
    }
  }
  return out;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:ThriftUtilities.java

示例13: testRow

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  byte [] bytes = format(i);
  // Put a known value so when we go to check it, it is there.
  Put put = new Put(bytes);
  put.addColumn(FAMILY_NAME, getQualifier(), bytes);
  this.table.put(put);
  RowMutations mutations = new RowMutations(bytes);
  mutations.add(put);
  this.table.checkAndMutate(bytes, FAMILY_NAME, getQualifier(), CompareOp.EQUAL, bytes,
      mutations);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:PerformanceEvaluation.java

示例14: testMutateRow

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Test
public void testMutateRow() throws Exception {
  final byte[] qual2 = Bytes.toBytes("qual2");
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor col = new HColumnDescriptor(fam);
  desc.addFamily(col);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
  try (Table table = TEST_UTIL.getConnection().getTable(tableName)){
    Put p1 = new Put(row1);
    p1.add(fam, qual, value);
    p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));

    Put p2 = new Put(row1);
    p2.add(fam, qual2, value);
    p2.setCellVisibility(new CellVisibility(SECRET));

    RowMutations rm = new RowMutations(row1);
    rm.add(p1);
    rm.add(p2);

    table.mutateRow(rm);

    Get get = new Get(row1);
    get.setAuthorizations(new Authorizations(CONFIDENTIAL));
    Result result = table.get(get);
    assertTrue(result.containsColumn(fam, qual));
    assertFalse(result.containsColumn(fam, qual2));

    get.setAuthorizations(new Authorizations(SECRET));
    result = table.get(get);
    assertFalse(result.containsColumn(fam, qual));
    assertTrue(result.containsColumn(fam, qual2));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestVisibilityLabels.java

示例15: testFlushedFileWithVisibilityTags

import org.apache.hadoop.hbase.client.RowMutations; //导入方法依赖的package包/类
@Test
public void testFlushedFileWithVisibilityTags() throws Exception {
  final byte[] qual2 = Bytes.toBytes("qual2");
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor col = new HColumnDescriptor(fam);
  desc.addFamily(col);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
  try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
    Put p1 = new Put(row1);
    p1.add(fam, qual, value);
    p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));

    Put p2 = new Put(row1);
    p2.add(fam, qual2, value);
    p2.setCellVisibility(new CellVisibility(SECRET));

    RowMutations rm = new RowMutations(row1);
    rm.add(p1);
    rm.add(p2);

    table.mutateRow(rm);
  }
  TEST_UTIL.getHBaseAdmin().flush(tableName);
  List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
  Store store = regions.get(0).getStore(fam);
  Collection<StoreFile> storefiles = store.getStorefiles();
  assertTrue(storefiles.size() > 0);
  for (StoreFile storeFile : storefiles) {
    assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestVisibilityLabels.java


注:本文中的org.apache.hadoop.hbase.client.RowMutations.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。