当前位置: 首页>>代码示例>>Java>>正文


Java Put.add方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Put.add方法的典型用法代码示例。如果您正苦于以下问题:Java Put.add方法的具体用法?Java Put.add怎么用?Java Put.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Put的用法示例。


在下文中一共展示了Put.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRow

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  byte[] row = format(i);
  Put put = new Put(row);
  for (int column = 0; column < opts.columns; column++) {
    byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
    byte[] value = generateData(this.rand, getValueLength(this.rand));
    if (opts.useTags) {
      byte[] tag = generateData(this.rand, TAG_LENGTH);
      Tag[] tags = new Tag[opts.noOfTags];
      for (int n = 0; n < opts.noOfTags; n++) {
        Tag t = new Tag((byte) n, tag);
        tags[n] = t;
      }
      KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
          value, tags);
      put.add(kv);
      updateValueSize(kv.getValueLength());
    } else {
      put.add(FAMILY_NAME, qualifier, value);
      updateValueSize(value.length);
    }
  }
  put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:PerformanceEvaluation.java

示例2: createHFileInRegion

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Create a new hfile in the passed region
 * @param region region to operate on
 * @param columnFamily family for which to add data
 * @throws IOException
 */
private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
  // put one row in the region
  Put p = new Put(Bytes.toBytes("row"));
  p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
  region.put(p);
  // flush the region to make a store file
  region.flush(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例3: doPuts

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void doPuts(Region region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new Tag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.add(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestEncodedSeekers.java

示例4: testRowMutation

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testRowMutation() throws IOException {
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
  Table table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    verifyMethodResult(SimpleRegionObserver.class,
      new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
          "hadDeleted"},
      tableName,
      new Boolean[] {false, false, false, false, false});
    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);

    Delete delete = new Delete(ROW);
    delete.deleteColumn(A, A);
    delete.deleteColumn(B, B);
    delete.deleteColumn(C, C);

    RowMutations arm = new RowMutations(ROW);
    arm.add(put);
    arm.add(delete);
    table.mutateRow(arm);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
    "hadDeleted"},
    tableName,
    new Boolean[] {false, false, true, true, true}
        );
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestRegionObserverInterface.java

示例5: putRows

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void putRows(HRegion r, int numRows, String value, String key) throws IOException {
  for (int i = 0; i < numRows; i++) {
    String row = key + "_" + i/* UUID.randomUUID().toString() */;
    System.out.println(String.format("Saving row: %s, with value %s", row, value));
    Put put = new Put(Bytes.toBytes(row));
    put.setDurability(Durability.SKIP_WAL);
    put.add(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob"));
    put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
    put.add(Bytes.toBytes("trans-date"), null, Bytes.toBytes("20090921010101999"));
    put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes.toBytes(value));
    put.add(Bytes.toBytes("trans-group"), null, Bytes.toBytes("adhocTransactionGroupId"));
    r.put(put);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestHRegion.java

示例6: setupBeforeClass

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
 * loaded during region startup.
 * @throws Exception
 */
@BeforeClass
public static void setupBeforeClass() throws Exception {

  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

  util.startMiniCluster(2);
  final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
  /**
   * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
   * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
   */
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    BigDecimal bd = new BigDecimal(i);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(bd));
    table.put(put);
    Put p2 = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(bd)),
      Bytes.toBytes(bd.multiply(new BigDecimal("0.10"))));
    table.put(p2);
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestBigDecimalColumnInterpreter.java

示例7: Test

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void Test() throws Exception {
  String cf = "f";
  String table = "TestFuzzyAndColumnRangeFilterClient";
  Table ht = TEST_UTIL.createTable(TableName.valueOf(table),
          Bytes.toBytes(cf), Integer.MAX_VALUE);

  // 10 byte row key - (2 bytes 4 bytes 4 bytes)
  // 4 byte qualifier
  // 4 byte value

  for (int i1 = 0; i1 < 2; i1++) {
    for (int i2 = 0; i2 < 5; i2++) {
      byte[] rk = new byte[10];

      ByteBuffer buf = ByteBuffer.wrap(rk);
      buf.clear();
      buf.putShort((short) 2);
      buf.putInt(i1);
      buf.putInt(i2);

      for (int c = 0; c < 5; c++) {
        byte[] cq = new byte[4];
        Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);

        Put p = new Put(rk);
        p.setDurability(Durability.SKIP_WAL);
        p.add(cf.getBytes(), cq, Bytes.toBytes(c));
        ht.put(p);
        LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
                + Bytes.toStringBinary(cq));
      }
    }
  }

  TEST_UTIL.flush();

  // test passes
  runTest(ht, 0, 10);

  // test fails
  runTest(ht, 1, 8);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestFuzzyRowAndColumnRangeFilter.java

示例8: testRegionObserverStacking

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public void testRegionObserverStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A } ;

  Configuration conf = HBaseConfiguration.create();
  HRegion region = initHRegion(TABLE, getClass().getName(),
    conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
  h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);

  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);

  Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
  long idA = ((ObserverA)c).id;
  c = h.findCoprocessor(ObserverB.class.getName());
  long idB = ((ObserverB)c).id;
  c = h.findCoprocessor(ObserverC.class.getName());
  long idC = ((ObserverC)c).id;

  assertTrue(idA < idB);
  assertTrue(idB < idC);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestRegionObserverStacking.java

示例9: writeTestData

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void writeTestData(Region region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
            Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestForceCacheImportantBlocks.java

示例10: testFlushedFileWithVisibilityTags

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void testFlushedFileWithVisibilityTags() throws Exception {
  final byte[] qual2 = Bytes.toBytes("qual2");
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor col = new HColumnDescriptor(fam);
  desc.addFamily(col);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
  try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
    Put p1 = new Put(row1);
    p1.add(fam, qual, value);
    p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));

    Put p2 = new Put(row1);
    p2.add(fam, qual2, value);
    p2.setCellVisibility(new CellVisibility(SECRET));

    RowMutations rm = new RowMutations(row1);
    rm.add(p1);
    rm.add(p2);

    table.mutateRow(rm);
  }
  TEST_UTIL.getHBaseAdmin().flush(tableName);
  List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
  Store store = regions.get(0).getStore(fam);
  Collection<StoreFile> storefiles = store.getStorefiles();
  assertTrue(storefiles.size() > 0);
  for (StoreFile storeFile : storefiles) {
    assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestVisibilityLabels.java

示例11: setupBeforeClass

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName(),
      ColumnAggregationEndpointWithErrors.class.getName(),
      ColumnAggregationEndpointNullResponse.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);
  Admin admin = new HBaseAdmin(conf);
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);
  admin.close();

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestBatchCoprocessorEndpoint.java

示例12: generateHBaseDatasetIntOB

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例13: generateHBaseDatasetDoubleOBDesc

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例14: loadData

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void loadData(Table table) throws IOException {
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.add(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestRegionMergeTransactionOnCluster.java

示例15: testMultipleColumnPrefixFilterWithManyFamilies

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException {
  String family1 = "Family1";
  String family2 = "Family2";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter"));
  HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
  hcd2.setMaxVersions(3);
  htd.addFamily(hcd2);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.
    getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

  List<String> rows = generateRandomWords(100, "row");
  List<String> columns = generateRandomWords(10000, "column");
  long maxTimestamp = 3;

  List<Cell> kvList = new ArrayList<Cell>();

  Map<String, List<Cell>> prefixMap = new HashMap<String,
      List<Cell>>();

  prefixMap.put("p", new ArrayList<Cell>());
  prefixMap.put("q", new ArrayList<Cell>());
  prefixMap.put("s", new ArrayList<Cell>());

  String valueString = "ValueString";

  for (String row: rows) {
    Put p = new Put(Bytes.toBytes(row));
    p.setDurability(Durability.SKIP_WAL);
    for (String column: columns) {
      for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
        double rand = Math.random();
        Cell kv;
        if (rand < 0.5) 
          kv = KeyValueTestUtil.create(row, family1, column, timestamp,
              valueString);
        else 
          kv = KeyValueTestUtil.create(row, family2, column, timestamp,
              valueString);
        p.add(kv);
        kvList.add(kv);
        for (String s: prefixMap.keySet()) {
          if (column.startsWith(s)) {
            prefixMap.get(s).add(kv);
          }
        }
      }
    }
    region.put(p);
  }

  MultipleColumnPrefixFilter filter;
  Scan scan = new Scan();
  scan.setMaxVersions();
  byte [][] filter_prefix = new byte [2][];
  filter_prefix[0] = new byte [] {'p'};
  filter_prefix[1] = new byte [] {'q'};
  
  filter = new MultipleColumnPrefixFilter(filter_prefix);
  scan.setFilter(filter);
  List<Cell> results = new ArrayList<Cell>();  
  InternalScanner scanner = region.getScanner(scan);
  while (scanner.next(results))
    ;
  assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestMultipleColumnPrefixFilter.java


注:本文中的org.apache.hadoop.hbase.client.Put.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。