当前位置: 首页>>代码示例>>Java>>正文


Java Put.addColumn方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Put.addColumn方法的典型用法代码示例。如果您正苦于以下问题:Java Put.addColumn方法的具体用法?Java Put.addColumn怎么用?Java Put.addColumn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Put的用法示例。


在下文中一共展示了Put.addColumn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createTable

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * @param dropIfExists
 */
public void createTable(boolean dropIfExists) throws IOException {
  if (admin.tableExists(secondaryTableName)) {
    if (dropIfExists) {
      admin.disableTable(bucketTableName);
      admin.deleteTable(bucketTableName);
      admin.disableTable(secondaryTableName);
      admin.deleteTable(secondaryTableName);
    } else {
      secondaryTable = conn.getTable(secondaryTableName);
      bucketTable = conn.getTable(bucketTableName);
      return;
    }
  }
  // secondary table
  HTableDescriptor secondaryDesc = new HTableDescriptor(secondaryTableName);
  secondaryDesc
      .addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.SECONDARY_FAMILY));
  admin.createTable(secondaryDesc);
  secondaryTable = conn.getTable(secondaryTableName);
  // bucket table
  HTableDescriptor bucketDesc = new HTableDescriptor(bucketTableName);
  bucketDesc.addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.BUCKET_FAMILY));
  admin.createTable(bucketDesc);
  bucketTable = conn.getTable(bucketTableName);
  // init when init
  int[] starts = new int[dimensions];
  Arrays.fill(starts, 0);
  Put put = new Put(MDUtils.bitwiseZip(starts, dimensions));
  put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER,
      Bytes.toBytes(dimensions));
  put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER,
      Bytes.toBytes(0L));
  bucketTable.put(put);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:MDIndex.java

示例2: parsePut

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override protected Map<TableName, Put> parsePut(Put put, boolean serverSide) {
  Map<TableName, Put> map = new HashMap<>();
  byte[] row = put.getRow();
  for (Map.Entry<byte[], List<Cell>> entry : put.getFamilyCellMap().entrySet()) {
    byte[] family = entry.getKey();
    for (Cell cell : entry.getValue()) {
      byte[] q = CellUtil.cloneQualifier(cell);
      if (tableRelation.isIndexColumn(family, q)) {
        TableName indexTableName = tableRelation.getIndexTableName(family, q);
        Put newPut = new Put(getIndexRow(row, CellUtil.cloneValue(cell)));
        if (serverSide) newPut
            .addColumn(IndexType.SEDONDARY_FAMILY_BYTES, (byte[]) null, cell.getTimestamp(),
                null);
        else newPut.addColumn(IndexType.SEDONDARY_FAMILY_BYTES, null, null);
        map.put(indexTableName, newPut);
      }
    }
  }
  tableRelation.getIndexFamilyMap();
  return map;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:IndexPutParser.java

示例3: storeTrace

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * 索引表time_consume, 保存每个trace耗时
 * rowkey: serviceId + cs时间
 * columnFamily: trace
 * qualifier: traceId ...
 * value: 整个调用链条耗时
 * @param span
 * @param annotationMap
 * @return
 */
@Override
public Put storeTrace(Span span, Map<String, Annotation> annotationMap) {
    if (null == span.getParentId() && annotationMap.containsKey(AnnotationType.CS.symbol())) {
        // 是root span, 并且是client端
        Annotation csAnnotation = annotationMap.get(AnnotationType.CS.symbol());
        Annotation crAnnotation = annotationMap.get(AnnotationType.CR.symbol());

        long consumeTime = crAnnotation.getTimestamp() - csAnnotation.getTimestamp();
        String rowKey = span.getServiceId() + Constants.UNDER_LINE + csAnnotation.getTimestamp();
        Put put = new Put(Bytes.toBytes(rowKey));
        put.addColumn(Bytes.toBytes(Constants.TABLE_TIME_CONSUME_COLUMN_FAMILY), Bytes.toBytes(span.getTraceId()),
                Bytes.toBytes(consumeTime));
        return put;
    }
    return null;
}
 
开发者ID:JThink,项目名称:SkyEye,代码行数:27,代码来源:HbaseStore.java

示例4: storeAnnotation

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * 索引表annotation, 保存自定义的异常信息
 * rowkey: serviceId + ExceptionType + cs/sr时间
 * columnFamily: trace
 * qualifier: traceId ...
 * value: binaryAnnotation的value
 * @param span
 * @param annotationMap
 * @return
 */
@Override
public List<Put> storeAnnotation(Span span, Map<String, Annotation> annotationMap) {
    List<BinaryAnnotation> annotations = span.getBinaryAnnotations();
    if (null != annotations && annotations.size() != 0) {
        List<Put> puts = new ArrayList<Put>();
        // 如果有自定义异常
        for (BinaryAnnotation annotation : annotations) {
            String rowKey = span.getServiceId() + Constants.UNDER_LINE + annotation.getType()
                    + Constants.UNDER_LINE + this.getBinaryAnnotationTimestamp(annotationMap);
            Put put = new Put(Bytes.toBytes(rowKey));
            put.addColumn(Bytes.toBytes(Constants.TABLE_ANNOTATION_COLUMN_FAMILY), Bytes.toBytes(span.getTraceId()),
                    Bytes.toBytes(annotation.getValue() == null ? annotation.getType() : annotation.getValue()));
            puts.add(put);
        }
        return puts;
    }
    return null;
}
 
开发者ID:JThink,项目名称:SkyEye,代码行数:29,代码来源:HbaseStore.java

示例5: testSequenceId

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
 * Test that I can use the max flushed sequence id after the close.
 * @throws IOException
 */
@Test (timeout = 100000)
public void testSequenceId() throws IOException {
  HRegion region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES);
  assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId());
  // Weird. This returns 0 if no store files or no edits. Afraid to change it.
  assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES));
  region.close();
  assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId());
  assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES));
  // Open region again.
  region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES);
  byte [] value = Bytes.toBytes(name.getMethodName());
  // Make a random put against our cf.
  Put put = new Put(value);
  put.addColumn(COLUMN_FAMILY_BYTES, null, value);
  region.put(put);
  // No flush yet so init numbers should still be in place.
  assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId());
  assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES));
  region.flush(true);
  long max = region.getMaxFlushedSeqId();
  region.close();
  assertEquals(max, region.getMaxFlushedSeqId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestHRegion.java

示例6: testHBASE14489

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestRegionObserverInterface.java

示例7: testFlushedFileWithNoTags

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void testFlushedFileWithNoTags() throws Exception {
  String method = "testFlushedFileWithNoTags";
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(fam1));
  region = initHRegion(Bytes.toBytes(method), method, TEST_UTIL.getConfiguration(), fam1);
  Put put = new Put(Bytes.toBytes("a-b-0-0"));
  put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
  region.put(put);
  region.flush(true);
  Store store = region.getStore(fam1);
  Collection<StoreFile> storefiles = store.getStorefiles();
  for (StoreFile sf : storefiles) {
    assertFalse("Tags should not be present "
        ,sf.getReader().getHFileReader().getFileContext().isIncludesTags());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestHRegion.java

示例8: tableSpecificCreationProcessing

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
void tableSpecificCreationProcessing() {
    LOGGER.info("Creating max ID row in Unique ID table");

    final Put put = new Put(MAXID_ROW);
    put.addColumn(NAME_FAMILY, MAXID_COL_QUALIFIER, Bytes.toBytes(MAXID_INITIAL_VALUE));

    doPut(put);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:10,代码来源:HBaseUniqueIdReverseMapTable.java

示例9: testReverseScanWithoutPadding

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Test
public void testReverseScanWithoutPadding() throws Exception {
  byte[] row1 = Bytes.toBytes("a");
  byte[] row2 = Bytes.toBytes("ab");
  byte[] row3 = Bytes.toBytes("b");

  Put put1 = new Put(row1);
  put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put2 = new Put(row2);
  put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put3 = new Put(row3);
  put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);

  region.put(put1);
  region.put(put2);
  region.put(put3);
  region.flush(true);
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  scan.setReversed(true);
  scan.setFilter(new FirstKeyOnlyFilter());
  scan.addFamily(cfName);
  RegionScanner scanner = region.getScanner(scan);
  List<Cell> res = new ArrayList<Cell>();
  int count = 1;
  while (scanner.next(res)) {
    count++;
  }
  assertEquals(Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), res.get(0)
      .getRowLength()), "b");
  assertEquals(Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), res.get(1)
      .getRowLength()), "ab");
  assertEquals(Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), res.get(2)
      .getRowLength()), "a");
  assertEquals(3, count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestSeekBeforeWithReverseScan.java

示例10: getPut

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override public Put getPut() {
  Put put = new Put(getSaltRowkey());
  put.addColumn(FAMILY_NAME, Bytes.toBytes("a"), Bytes.toBytes(a));
  put.addColumn(FAMILY_NAME, Bytes.toBytes("b"), Bytes.toBytes(b));
  put.addColumn(FAMILY_NAME, Bytes.toBytes("c"), Bytes.toBytes(c));
  for (int i = DATA_COLUMN_OFFSET; i < nbTotalColumns; i++) {
    put.addColumn(FAMILY_NAME, Bytes.toBytes(i), Bytes.toBytes(columnValues[i]));
  }
  return put;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:UniWorkload.java

示例11: doInsert

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void doInsert(int totalSize, int flushSize, boolean flushTable) throws IOException {
  Table table = conn.getTable(tableName);
  List<Put> putList = new ArrayList<>();
  for (int i = 0; i < totalSize; i++) {
    Put put = new Put(Bytes.toBytes(i));
    for (int j = 0; j < indexColumnNames.length; j++) {
      put.addColumn(familyName, Bytes.toBytes(indexColumnNames[j]),
          Bytes.toBytes(i % BUCKET_SIZE));
    }
    for (int j = 0; j < dataColumnNumber; j++) {
      put.addColumn(familyName, Bytes.toBytes("info"), Bytes.toBytes(i * 10));
    }
    putList.add(put);
    if (putList.size() >= INSERT_LIST_SIZE) {
      table.put(putList);
      putList = new ArrayList<>(INSERT_LIST_SIZE);
    }
    if (i > flushSize) {
      hBaseAdmin.flush(tableName);
      flushSize = Integer.MAX_VALUE;
      System.out.println("flush table after " + i);
      try {
        Thread.sleep(3 * 1000);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
  }
  table.close();
  if (flushTable) hBaseAdmin.flush(tableName);

  System.out.println("insert " + totalSize + " records into table, flush table=" + flushTable);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:LMDTester.java

示例12: createPut

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private Put createPut(Entry<? extends K, ? extends V> entry) {
  try {
    Put put = new Put(keySerializer.serialize(entry.getKey()));
    put.addColumn(family(), QUALIFIER, valueSerializer.serialize(entry.getValue()));
    return put;
  } catch (SerializationException e) {
    throw new CacheWriterException("Failed to create put", e);
  }
}
 
开发者ID:bakdata,项目名称:ignite-hbase,代码行数:10,代码来源:HBaseCacheStore.java

示例13: testRow

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  byte [] bytes = format(i);
  // Put a known value so when we go to check it, it is there.
  Put put = new Put(bytes);
  put.addColumn(FAMILY_NAME, getQualifier(), bytes);
  this.table.put(put);
  Delete delete = new Delete(put.getRow());
  delete.addColumn(FAMILY_NAME, getQualifier());
  this.table.checkAndDelete(bytes, FAMILY_NAME, getQualifier(), CompareOp.EQUAL, bytes, delete);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:PerformanceEvaluation.java

示例14: generateHBaseDatasetCompositeKeyInt

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static void generateHBaseDatasetCompositeKeyInt(Connection conn, Admin admin, TableName tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  BufferedMutator table = conn.getBufferedMutator(tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.addColumn(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.mutate(p);
  }

  table.close();
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:36,代码来源:TestTableGenerator.java

示例15: generateHBaseDatasetDoubleOB

import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOB(Connection conn, Admin admin, TableName tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  BufferedMutator table = conn.getBufferedMutator(tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[9];
    PositionedByteRange br = new SimplePositionedMutableByteRange(bytes, 0, 9);
    OrderedBytes.encodeFloat64(br, i, Order.ASCENDING);
    Put p = new Put(bytes);
    p.addColumn(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.mutate(p);
  }

  table.close();

  admin.flush(tableName);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:31,代码来源:TestTableGenerator.java


注:本文中的org.apache.hadoop.hbase.client.Put.addColumn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。