当前位置: 首页>>代码示例>>Java>>正文


Java Put类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Put的典型用法代码示例。如果您正苦于以下问题:Java Put类的具体用法?Java Put怎么用?Java Put使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Put类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Put类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkAndPutName

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Override
public boolean checkAndPutName(final byte[] bNewUid, final byte[] name) {
    if (LOGGER.isTraceEnabled()) {
        final String rowKeyStr = ByteArrayUtils.byteArrayToHex(bNewUid);

        final String valueStr = Bytes.toString(name);

        LOGGER.trace("checkAndPutName - Key: [" + rowKeyStr + "], Value: [" + valueStr + "]");
    }

    final Put put = new Put(bNewUid);
    put.addColumn(NAME_FAMILY, NAME_COL_QUALIFIER, name);

    boolean result;

    // pass null as the expected value to ensure we only put if it didn't
    // exist before
    result = doCheckAndPut(bNewUid, NAME_FAMILY, NAME_COL_QUALIFIER, null, put);

    return result;
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:22,代码来源:HBaseUniqueIdReverseMapTable.java

示例2: loadData

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
private static void loadData(final Table ht, final byte[][] families,
    final int rows, final int flushes) throws IOException {
  List<Put> puts = new ArrayList<Put>(rows);
  byte[] qualifier = Bytes.toBytes("val");
  for (int i = 0; i < flushes; i++) {
    for (int k = 0; k < rows; k++) {
      byte[] row = Bytes.toBytes(random.nextLong());
      Put p = new Put(row);
      for (int j = 0; j < families.length; ++j) {
        p.add(families[ j ], qualifier, row);
      }
      puts.add(p);
    }
    ht.put(puts);
    TEST_UTIL.flush();
    puts.clear();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCompactionState.java

示例3: testCheckAndDelete

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Test
public void testCheckAndDelete() throws IOException {
  Get get = new Get(ROW_1);
  Result result = remoteTable.get(get);
  byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
  byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
  assertNotNull(value1);
  assertTrue(Bytes.equals(VALUE_1, value1));
  assertNull(value2);
  assertTrue(remoteTable.exists(get));
  assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
  Delete delete = new Delete(ROW_1);

  remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
  assertFalse(remoteTable.exists(get));

  Put put = new Put(ROW_1);
  put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
  remoteTable.put(put);

  assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
      put));
  assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
      put));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestRemoteTable.java

示例4: recordFailure

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
private void recordFailure(final Table table, final Put put, final long keyBase,
    final long start, IOException e) {
  failedKeySet.add(keyBase);
  String exceptionInfo;
  if (e instanceof RetriesExhaustedWithDetailsException) {
    RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
    exceptionInfo = aggEx.getExhaustiveDescription();
  } else {
    StringWriter stackWriter = new StringWriter();
    PrintWriter pw = new PrintWriter(stackWriter);
    e.printStackTrace(pw);
    pw.flush();
    exceptionInfo = StringUtils.stringifyException(e);
  }
  LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
      + "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
      + exceptionInfo);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:MultiThreadedWriterWithACL.java

示例5: getActions

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Override
public List<Row> getActions() throws FlumeException {
  List<Row> actions = new LinkedList<Row>();
  if (plCol != null) {
    byte[] rowKey;
    try {
      if (keyType == KeyType.TS) {
        rowKey = SimpleRowKeyGenerator.getTimestampKey(rowPrefix);
      } else if (keyType == KeyType.RANDOM) {
        rowKey = SimpleRowKeyGenerator.getRandomKey(rowPrefix);
      } else if (keyType == KeyType.TSNANO) {
        rowKey = SimpleRowKeyGenerator.getNanoTimestampKey(rowPrefix);
      } else {
        rowKey = SimpleRowKeyGenerator.getUUIDKey(rowPrefix);
      }
      Put put = new Put(rowKey);
      put.add(cf, plCol, payload);
      actions.add(put);
    } catch (Exception e) {
      throw new FlumeException("Could not get row key!", e);
    }

  }
  return actions;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:26,代码来源:SimpleHbaseEventSerializer.java

示例6: testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Test
public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck()
    throws Exception {
  final TableName tableName =
      TableName.valueOf("testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck");
  try {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("f"));
    htd.addFamily(new HColumnDescriptor("i_f"));
    htd.setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName());
    admin.createTable(htd);
    List<HRegion> regions = awaitTableRegions(tableName);
    HRegion region = regions.get(0);
    for(int i = 3;i<9;i++) {
      Put p = new Put(Bytes.toBytes("row"+i));
      p.add(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
      p.add(Bytes.toBytes("i_f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
      region.put(p);
    }
    region.flush(true);
    Store store = region.getStore(Bytes.toBytes("f"));
    Collection<StoreFile> storefiles = store.getStorefiles();
    assertEquals(storefiles.size(), 1);
    assertFalse(region.hasReferences());
    Path referencePath =
        region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f",
          storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy());
    assertNull(referencePath);
    referencePath =
        region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f",
          storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy());
    assertNotNull(referencePath);
  } finally {
    TESTING_UTIL.deleteTable(tableName);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestSplitTransactionOnCluster.java

示例7: testWriteRequestsCounter

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Test
public void testWriteRequestsCounter() throws IOException {
  byte[] fam = Bytes.toBytes("info");
  byte[][] families = { fam };
  this.region = initHRegion(tableName, method, CONF, families);

  Assert.assertEquals(0L, region.getWriteRequestsCount());

  Put put = new Put(row);
  put.add(fam, fam, fam);

  Assert.assertEquals(0L, region.getWriteRequestsCount());
  region.put(put);
  Assert.assertEquals(1L, region.getWriteRequestsCount());
  region.put(put);
  Assert.assertEquals(2L, region.getWriteRequestsCount());
  region.put(put);
  Assert.assertEquals(3L, region.getWriteRequestsCount());

  region.delete(new Delete(row));
  Assert.assertEquals(4L, region.getWriteRequestsCount());

  HRegion.closeHRegion(this.region);
  this.region = null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHRegion.java

示例8: testRow

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  byte[] row = getRandomRow(this.rand, this.totalRows);
  Put put = new Put(row);
  byte[] value = generateData(this.rand, ROW_LENGTH);
  if (useTags) {
    byte[] tag = generateData(this.rand, TAG_LENGTH);
    Tag[] tags = new Tag[noOfTags];
    for (int n = 0; n < noOfTags; n++) {
      Tag t = new Tag((byte) n, tag);
      tags[n] = t;
    }
    KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
        value, tags);
    put.add(kv);
  } else {
    put.add(FAMILY_NAME, QUALIFIER_NAME, value);
  }
  put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:PerformanceEvaluation.java

示例9: createTableAndWriteDataWithLabels

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  Table table = null;
  try {
    table = TEST_UTIL.createTable(tableName, fam);
    int i = 1;
    List<Put> puts = new ArrayList<Put>();
    for (String labelExp : labelExps) {
      Put put = new Put(Bytes.toBytes("row" + i));
      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
      put.setCellVisibility(new CellVisibility(labelExp));
      puts.add(put);
      i++;
    }
    table.put(puts);
  } finally {
    if (table != null) {
      table.close();
    }
  }
  return table;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestVisibilityLabelsWithACL.java

示例10: testFlushedFileWithNoTags

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Test
public void testFlushedFileWithNoTags() throws Exception {
  String method = "testFlushedFileWithNoTags";
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(fam1));
  region = initHRegion(Bytes.toBytes(method), method, TEST_UTIL.getConfiguration(), fam1);
  Put put = new Put(Bytes.toBytes("a-b-0-0"));
  put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
  region.put(put);
  region.flush(true);
  Store store = region.getStore(fam1);
  Collection<StoreFile> storefiles = store.getStorefiles();
  for (StoreFile sf : storefiles) {
    assertFalse("Tags should not be present "
        ,sf.getReader().getHFileReader().getFileContext().isIncludesTags());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestHRegion.java

示例11: createIndexRowKey

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Override public byte[] createIndexRowKey(final IndexSpecification indexSpec, final Put put) {
  List<Cell> values = put.get(indexSpec.getFamily(), indexSpec.getQualifier());
  if (values == null || values.size() == 0) {
    return null;
  }
  if (values.size() > 1) {
    throw new IllegalArgumentException(
        "Make sure that there is at most one value for an index column in a Put");
  }
  byte[] value = values.get(0).getValue();
  return createIndexRowKey(put.getRow(), value);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:SimpleIndexKeyGenerator.java

示例12: testManualHBaseInsertion

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Test
public void testManualHBaseInsertion() throws ServiceException, IOException {
  IgniteConfiguration cfg = prepareConfig(false);
  IgniteConfiguration cfg2 = new IgniteConfiguration(cfg);
  cfg.setGridName("first");
  cfg2.setGridName("second");
  String cacheName = "myCache";
  try (Ignite ignite = Ignition.getOrStart(cfg); Ignite ignite2 = Ignition.getOrStart(cfg2)) {
    IgniteCache<String, String> cache = ignite.getOrCreateCache(cacheName);
    cache.remove("Hello");
    assertNull(cache.get("Hello"));
    try (Connection conn = getHBaseConnection()) {
      TableName tableName = TableName.valueOf(TABLE_NAME);
      Table table = conn.getTable(tableName);
      Serializer<Object> serializer = ObjectSerializer.INSTANCE;
      Put put = new Put(serializer.serialize("Hello"));
      put.addColumn(cacheName.getBytes(), QUALIFIER, serializer.serialize("World"));
      table.put(put);
    }
    assertEquals("World", cache.get("Hello"));
  }
}
 
开发者ID:bakdata,项目名称:ignite-hbase,代码行数:23,代码来源:HBaseCacheStoreTest.java

示例13: processPut

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
/**
 * parse put, add index put into mdRecordList
 */
private void processPut(List<KeyValue> mdRecordList, Put put) throws IOException {
  if (put == null) return;
  byte[] rawRowkey = put.getRow();
  int[] arr = new int[dimensions];
  int i = 0;
  for (Map.Entry<byte[], TreeSet<byte[]>> entry : tableRelation.getIndexFamilyMap().entrySet()) {
    for (byte[] qualifier : entry.getValue()) {
      arr[i] = Bytes.toInt(put.get(entry.getKey(), qualifier).get(0).getValue());
      ++i;
    }
  }
  byte[] mdKey = MDUtils.bitwiseZip(arr, dimensions);
  KeyValue keyValue =
      new KeyValue(mdKey, LMDIndexConstants.FAMILY, rawRowkey, put.getTimeStamp(), Type.Put,
          LMDIndexConstants.VALUE);
  mdRecordList.add(keyValue);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:LMDIndexWriter.java

示例14: before

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
@Before
public void before()  throws Exception {
  final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);

  Put puta = new Put( ROW_A );
  puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(puta);

  Put putb = new Put( ROW_B );
  putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putb);

  Put putc = new Put( ROW_C );
  putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putc);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestServerCustomProtocol.java

示例15: createTable

import org.apache.hadoop.hbase.client.Put; //导入依赖的package包/类
/**
 * @param dropIfExists
 */
public void createTable(boolean dropIfExists) throws IOException {
  if (admin.tableExists(secondaryTableName)) {
    if (dropIfExists) {
      admin.disableTable(bucketTableName);
      admin.deleteTable(bucketTableName);
      admin.disableTable(secondaryTableName);
      admin.deleteTable(secondaryTableName);
    } else {
      secondaryTable = conn.getTable(secondaryTableName);
      bucketTable = conn.getTable(bucketTableName);
      return;
    }
  }
  // secondary table
  HTableDescriptor secondaryDesc = new HTableDescriptor(secondaryTableName);
  secondaryDesc
      .addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.SECONDARY_FAMILY));
  admin.createTable(secondaryDesc);
  secondaryTable = conn.getTable(secondaryTableName);
  // bucket table
  HTableDescriptor bucketDesc = new HTableDescriptor(bucketTableName);
  bucketDesc.addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.BUCKET_FAMILY));
  admin.createTable(bucketDesc);
  bucketTable = conn.getTable(bucketTableName);
  // init when init
  int[] starts = new int[dimensions];
  Arrays.fill(starts, 0);
  Put put = new Put(MDUtils.bitwiseZip(starts, dimensions));
  put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER,
      Bytes.toBytes(dimensions));
  put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER,
      Bytes.toBytes(0L));
  bucketTable.put(put);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:MDIndex.java


注:本文中的org.apache.hadoop.hbase.client.Put类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。