当前位置: 首页>>代码示例>>Java>>正文


Java Put.setDurability方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Put.setDurability方法的典型用法代码示例。如果您正苦于以下问题:Java Put.setDurability方法的具体用法?Java Put.setDurability怎么用?Java Put.setDurability使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Put的用法示例。


在下文中一共展示了Put.setDurability方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result result,
    Context context)
throws IOException {
  List<Long> tsList = new ArrayList<Long>();
  for (Cell kv : result.listCells()) {
    tsList.add(kv.getTimestamp());
  }

  List<Put> puts = new ArrayList<>();
  for (Long ts : tsList) {
    Put put = new Put(key.get());
    put.setDurability(Durability.SKIP_WAL);
    put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
    puts.add(put);
  }
  table.put(puts);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestTimeRangeMapRed.java

示例2: write

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Writes an action (Put or Delete) to the specified table.
 *
 * @param tableName
 *          the table being updated.
 * @param action
 *          the update, either a put or a delete.
 * @throws IllegalArgumentException
 *          if the action is not a put or a delete.
 */
@Override
public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
  BufferedMutator mutator = getBufferedMutator(tableName);
  // The actions are not immutable, so we defensively copy them
  if (action instanceof Put) {
    Put put = new Put((Put) action);
    put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL
        : Durability.SKIP_WAL);
    mutator.mutate(put);
  } else if (action instanceof Delete) {
    Delete delete = new Delete((Delete) action);
    mutator.mutate(delete);
  } else
    throw new IllegalArgumentException(
        "action must be either Delete or Put");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:MultiTableOutputFormat.java

示例3: testRow

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  byte[] row = getRandomRow(this.rand, opts.totalRows);
  Put put = new Put(row);
  for (int column = 0; column < opts.columns; column++) {
    byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
    byte[] value = generateData(this.rand, getValueLength(this.rand));
    if (opts.useTags) {
      byte[] tag = generateData(this.rand, TAG_LENGTH);
      Tag[] tags = new Tag[opts.noOfTags];
      for (int n = 0; n < opts.noOfTags; n++) {
        Tag t = new Tag((byte) n, tag);
        tags[n] = t;
      }
      KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
          value, tags);
      put.add(kv);
      updateValueSize(kv.getValueLength());
    } else {
      put.add(FAMILY_NAME, qualifier, value);
      updateValueSize(value.length);
    }
  }
  put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:PerformanceEvaluation.java

示例4: testTimeRangeMapRed

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void testTimeRangeMapRed()
throws IOException, InterruptedException, ClassNotFoundException {
  final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
  final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME);
  col.setMaxVersions(Integer.MAX_VALUE);
  desc.addFamily(col);
  admin.createTable(desc);
  List<Put> puts = new ArrayList<Put>();
  for (Map.Entry<Long, Boolean> entry : TIMESTAMP.entrySet()) {
    Put put = new Put(KEY);
    put.setDurability(Durability.SKIP_WAL);
    put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
    puts.add(put);
  }
  Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
  table.put(puts);
  runTestOnTable();
  verify(table);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestTimeRangeMapRed.java

示例5: putData

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
static void putData(HRegion region, Durability durability,
    int startRow, int numRows, byte[] qf, byte[]... families) throws IOException {
  for (int i = startRow; i < startRow + numRows; i++) {
    Put put = new Put(Bytes.toBytes("" + i));
    put.setDurability(durability);
    for (byte[] family : families) {
      put.add(family, qf, null);
    }
    region.put(put);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestHRegion.java

示例6: createMultiRegionsWithWritableSerialization

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
 */
public int createMultiRegionsWithWritableSerialization(final Configuration c,
    final TableName tableName, byte [][] startKeys)
throws IOException {
  Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
  HTable meta = new HTable(c, TableName.META_TABLE_NAME);

  List<HRegionInfo> newRegions
      = new ArrayList<HRegionInfo>(startKeys.length);
  int count = 0;
  for (int i = 0; i < startKeys.length; i++) {
    int j = (i + 1) % startKeys.length;
    HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
    Put put = new Put(hri.getRegionName());
    put.setDurability(Durability.SKIP_WAL);
    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
      getBytes(hri)); //this is the old Writable serialization

    //also add the region as it's daughters
    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    meta.put(put);
    LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());

    newRegions.add(hri);
    count++;
  }
  meta.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestMetaMigrationConvertingToPB.java

示例7: loadRegion

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Load region with rows from 'aaa' to 'zzz', skip the rows which are out of
 * range of the region
 * @param r Region
 * @param f Family
 * @param flush flush the cache if true
 * @return Count of rows loaded.
 * @throws IOException
 */
private int loadRegion(final HRegion r, final byte[] f, final boolean flush)
    throws IOException {
  byte[] k = new byte[3];
  int rowCount = 0;
  for (byte b1 = 'a'; b1 <= 'z'; b1++) {
    for (byte b2 = 'a'; b2 <= 'z'; b2++) {
      for (byte b3 = 'a'; b3 <= 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        if (!HRegion.rowIsInRange(r.getRegionInfo(), k)) {
          continue;
        }
        Put put = new Put(k);
        put.add(f, null, k);
        if (r.getWAL() == null)
          put.setDurability(Durability.SKIP_WAL);
        r.put(put);
        rowCount++;
      }
    }
    if (flush) {
      r.flush(true);
    }
  }
  return rowCount;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestRegionMergeTransaction.java

示例8: loadRegion

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Load region with rows from 'aaa' to 'zzz'.
 * @param r Region
 * @param f Family
 * @param flush flush the cache if true
 * @return Count of rows loaded.
 * @throws IOException
 */
public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
throws IOException {
  byte[] k = new byte[3];
  int rowCount = 0;
  for (byte b1 = 'a'; b1 <= 'z'; b1++) {
    for (byte b2 = 'a'; b2 <= 'z'; b2++) {
      for (byte b3 = 'a'; b3 <= 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.add(f, null, k);
        if (r.getWAL() == null) {
          put.setDurability(Durability.SKIP_WAL);
        }
        int preRowCount = rowCount;
        int pause = 10;
        int maxPause = 1000;
        while (rowCount == preRowCount) {
          try {
            r.put(put);
            rowCount++;
          } catch (RegionTooBusyException e) {
            pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
            Threads.sleep(pause);
          }
        }
      }
    }
    if (flush) {
      r.flush(true);
    }
  }
  return rowCount;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:HBaseTestingUtility.java

示例9: doPuts

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void doPuts(Region region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new Tag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.add(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestEncodedSeekers.java

示例10: addToEachStartKey

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private static int addToEachStartKey(final int expected) throws IOException {
  Table t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  Table meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if (!hri.getTable().equals(TABLENAME)) {
      continue;
    }

    // If start key, add 'aaa'.
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestMasterTransitions.java

示例11: map

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Convert a line of TSV text into an HBase table row after transforming the
 * values by multiplying them by 3.
 */
@Override
public void map(LongWritable offset, Text value, Context context)
      throws IOException {
  byte[] family = Bytes.toBytes("FAM");
  final byte[][] qualifiers = { Bytes.toBytes("A"), Bytes.toBytes("B") };

  // do some basic line parsing
  byte[] lineBytes = value.getBytes();
  String[] valueTokens = new String(lineBytes, "UTF-8").split("\u001b");

  // create the rowKey and Put
  ImmutableBytesWritable rowKey =
    new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0]));
  Put put = new Put(rowKey.copyBytes());
  put.setDurability(Durability.SKIP_WAL);

  //The value should look like this: VALUE1 or VALUE2. Let's multiply
  //the integer by 3
  for(int i = 1; i < valueTokens.length; i++) {
    String prefix = valueTokens[i].substring(0, "VALUE".length());
    String suffix = valueTokens[i].substring("VALUE".length());
    String newValue = prefix + Integer.parseInt(suffix) * 3;

    KeyValue kv = new KeyValue(rowKey.copyBytes(), family,
        qualifiers[i-1], Bytes.toBytes(newValue));
    put.add(kv);
  }

  try {
    context.write(rowKey, put);
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TsvImporterCustomTestMapper.java

示例12: setupBeforeClass

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
 * loaded during region startup.
 * @throws Exception
 */
@BeforeClass
public static void setupBeforeClass() throws Exception {

  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

  util.startMiniCluster(2);
  final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
  /**
   * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
   * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
   */
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    BigDecimal bd = new BigDecimal(i);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(bd));
    table.put(put);
    Put p2 = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(bd)),
      Bytes.toBytes(bd.multiply(new BigDecimal("0.10"))));
    table.put(p2);
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestBigDecimalColumnInterpreter.java

示例13: createRegion

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private HRegion createRegion(final HTableDescriptor desc,
    byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
throws IOException {
  HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
  HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
  LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
  for(int i = firstRow; i < firstRow + nrows; i++) {
    Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
    put.setDurability(Durability.SKIP_WAL);
    put.add(COLUMN_NAME, null,  VALUE);
    region.put(put);
    if (i % 10000 == 0) {
      LOG.info("Flushing write #" + i);
      region.flush(true);
    }
  }
  HRegion.closeHRegion(region);
  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMergeTable.java

示例14: Test

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void Test() throws Exception {
  String cf = "f";
  String table = "TestFuzzyAndColumnRangeFilterClient";
  Table ht = TEST_UTIL.createTable(TableName.valueOf(table),
          Bytes.toBytes(cf), Integer.MAX_VALUE);

  // 10 byte row key - (2 bytes 4 bytes 4 bytes)
  // 4 byte qualifier
  // 4 byte value

  for (int i1 = 0; i1 < 2; i1++) {
    for (int i2 = 0; i2 < 5; i2++) {
      byte[] rk = new byte[10];

      ByteBuffer buf = ByteBuffer.wrap(rk);
      buf.clear();
      buf.putShort((short) 2);
      buf.putInt(i1);
      buf.putInt(i2);

      for (int c = 0; c < 5; c++) {
        byte[] cq = new byte[4];
        Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);

        Put p = new Put(rk);
        p.setDurability(Durability.SKIP_WAL);
        p.add(cf.getBytes(), cq, Bytes.toBytes(c));
        ht.put(p);
        LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
                + Bytes.toStringBinary(cq));
      }
    }
  }

  TEST_UTIL.flush();

  // test passes
  runTest(ht, 0, 10);

  // test fails
  runTest(ht, 1, 8);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestFuzzyRowAndColumnRangeFilter.java

示例15: setWAL

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public void setWAL(Durability wal) {
	for (Put put : puts.values()) {
		put.setDurability(wal);
	}
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:IndexPut.java


注:本文中的org.apache.hadoop.hbase.client.Put.setDurability方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。