當前位置: 首頁>>代碼示例>>Java>>正文


Java KeyValue類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue類的具體用法?Java KeyValue怎麽用?Java KeyValue使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


KeyValue類屬於org.apache.hadoop.hbase包,在下文中一共展示了KeyValue類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testSeekToBlockWithDecreasingCommonPrefix

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
/**
 * Test seeking while file is encoded.
 */
@Test
public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException {
  List<KeyValue> sampleKv = new ArrayList<KeyValue>();
  KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q1"), Bytes.toBytes("val"));
  sampleKv.add(kv1);
  KeyValue kv2 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q2"), Bytes.toBytes("val"));
  sampleKv.add(kv2);
  KeyValue kv3 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q3"), Bytes.toBytes("val"));
  sampleKv.add(kv3);
  KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q1"), Bytes.toBytes("val"));
  sampleKv.add(kv4);
  KeyValue toSeek = KeyValueUtil.createLastOnRow(kv3.getRowArray(), kv3.getRowOffset(),
      kv3.getRowLength(), null, 0, 0, null, 0, 0);
  seekToTheKey(kv3, sampleKv, toSeek);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestSeekToBlockWithEncoders.java

示例2: deleteAllTs

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
@Override
public void deleteAllTs(ByteBuffer tableName,
                        ByteBuffer row,
                        ByteBuffer column,
    long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
  Table table = null;
  try {
    table = getTable(tableName);
    Delete delete  = new Delete(getBytes(row));
    addAttributes(delete, attributes);
    byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
    if (famAndQf.length == 1) {
      delete.deleteFamily(famAndQf[0], timestamp);
    } else {
      delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
    }
    table.delete(delete);

  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally {
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:ThriftServerRunner.java

示例3: getAllRecord

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
/**
 * show data
 */
public static void getAllRecord(String tableName) {
    try {
        Table table = connection.getTable(TableName.valueOf(tableName));
        Scan s = new Scan();

        ResultScanner ss = table.getScanner(s);
        for (Result r : ss) {
            for (KeyValue kv : r.raw()) {
                System.out.print(new String(kv.getRow()) + " ");
                System.out.print(new String(kv.getFamily()) + ":");
                System.out.print(new String(kv.getQualifier()) + " ");
                System.out.print(kv.getTimestamp() + " ");
                System.out.println(new String(kv.getValue()));
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:23,代碼來源:HBaseTest.java

示例4: fromFilter

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
public static Range[] fromFilter(SingleColumnValueFilter filter) {
  if (!(filter.getComparator() instanceof BinaryComparator)) {
    return new Range[0];
  }

  byte[] column = KeyValue.makeColumn(filter.getFamily(), filter.getQualifier());
  CompareOp compareOp = filter.getOperator();
  byte[] value = filter.getComparator().getValue();

  if (compareOp == CompareOp.NOT_EQUAL) {
    return new Range[] { new Range(column, null, CompareOp.NO_OP, value, CompareOp.LESS),
        new Range(column, value, CompareOp.GREATER, null, CompareOp.NO_OP) };
  } else {
    switch (compareOp) {
    case EQUAL:
    case GREATER_OR_EQUAL:
    case GREATER:
      return new Range[] { new Range(column, value, compareOp, null, CompareOp.NO_OP) };
    case LESS:
    case LESS_OR_EQUAL:
      return new Range[] { new Range(column, null, CompareOp.NO_OP, value, compareOp) };
    default:
      return new Range[0];
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:Range.java

示例5: getAllRecord

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
開發者ID:PacktPublishing,項目名稱:HBase-High-Performance-Cookbook,代碼行數:31,代碼來源:HBaseRegularClient.java

示例6: testSeekToBlockWithDiffFamilyAndQualifer

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
@Test
public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException {
  List<KeyValue> sampleKv = new ArrayList<KeyValue>();
  KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
      Bytes.toBytes("val"));
  sampleKv.add(kv1);
  KeyValue kv2 = new KeyValue(Bytes.toBytes("aab"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
      Bytes.toBytes("val"));
  sampleKv.add(kv2);
  KeyValue kv4 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
      Bytes.toBytes("val"));
  sampleKv.add(kv4);
  KeyValue kv5 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q2"),
      Bytes.toBytes("val"));
  sampleKv.add(kv5);
  KeyValue toSeek = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam2"),
      Bytes.toBytes("q2"), Bytes.toBytes("val"));
  seekToTheKey(kv5, sampleKv, toSeek);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestSeekToBlockWithEncoders.java

示例7: testSeekToBlockWithDiffQualiferOnSameRowButDescendingInSize

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
@Test
public void testSeekToBlockWithDiffQualiferOnSameRowButDescendingInSize() throws IOException {
  List<KeyValue> sampleKv = new ArrayList<KeyValue>();
  KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual1"),
      Bytes.toBytes("val"));
  sampleKv.add(kv1);
  KeyValue kv2 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual2"),
      Bytes.toBytes("val"));
  sampleKv.add(kv2);
  KeyValue kv4 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual3"),
      Bytes.toBytes("val"));
  sampleKv.add(kv4);
  KeyValue kv5 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual4"),
      Bytes.toBytes("val"));
  sampleKv.add(kv5);
  KeyValue kv6 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qz"),
      Bytes.toBytes("val"));
  sampleKv.add(kv6);
  KeyValue toSeek = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qz"),
      Bytes.toBytes("val"));
  seekToTheKey(kv6, sampleKv, toSeek);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestSeekToBlockWithEncoders.java

示例8: recoverClusteringResult

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
public static List<Cell> recoverClusteringResult(List<Cell> cells, byte[] family,
    byte[] qualifier) {
  if (cells == null || cells.size() == 0) return cells;
  byte[][] indexColumn = IndexPutParser.parseIndexRowKey(cells.get(0).getRow());
  List<Cell> list = new ArrayList<>(cells.size() + 1);
  for (Cell cell : cells) {
    byte[] tag = cell.getTagsArray();
    if (tag != null && tag.length > KeyValue.MAX_TAGS_LENGTH) tag = null;
    KeyValue kv =
        new KeyValue(indexColumn[0], CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
            cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
            CellUtil.cloneValue(cell), tag);
    list.add(kv);
  }
  list.add(new KeyValue(indexColumn[0], family, qualifier, indexColumn[1]));
  Collections.sort(list, KeyValue.COMPARATOR);
  return list;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:BaseIndexScanner.java

示例9: testMixedPutDelete

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
/**
 * Insert a mix of puts and deletes
 * @throws Exception
 */
@Test
public void testMixedPutDelete() throws Exception {
  List<WALEntry> entries = new ArrayList<WALEntry>(BATCH_SIZE/2);
  List<Cell> cells = new ArrayList<Cell>();
  for(int i = 0; i < BATCH_SIZE/2; i++) {
    entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
  }
  SINK.replicateEntries(entries, CellUtil.createCellScanner(cells));

  entries = new ArrayList<WALEntry>(BATCH_SIZE);
  cells = new ArrayList<Cell>();
  for(int i = 0; i < BATCH_SIZE; i++) {
    entries.add(createEntry(TABLE_NAME1, i,
        i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells));
  }

  SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()));
  Scan scan = new Scan();
  ResultScanner scanRes = table1.getScanner(scan);
  assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:TestReplicationSink.java

示例10: insertData

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestScannersWithLabels.java

示例11: createBlockOnDisk

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
    throws IOException {
  int size;
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
      block.getHFileContext());

  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  baos.write(block.getDummyHeaderForVersion());
  DataOutputStream dos = new DataOutputStream(baos);
  blockEncoder.startBlockEncoding(context, dos);
  for (KeyValue kv : kvs) {
    blockEncoder.encode(kv, context, dos);
  }
  BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
  baos.writeTo(stream);
  blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA);
  byte[] encodedBytes = baos.toByteArray();
  size = encodedBytes.length - block.getDummyHeaderForVersion().length;
  return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
      HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestHFileDataBlockEncoder.java

示例12: testCreateKey

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
@Test
public void testCreateKey() {
  CompoundBloomFilterBase cbfb = new CompoundBloomFilterBase();
  byte[] row = "myRow".getBytes();
  byte[] qualifier = "myQualifier".getBytes();
  byte[] rowKey = cbfb.createBloomKey(row, 0, row.length,
      row, 0, 0);
  byte[] rowColKey = cbfb.createBloomKey(row, 0, row.length,
      qualifier, 0, qualifier.length);
  KeyValue rowKV = KeyValue.createKeyValueFromKey(rowKey);
  KeyValue rowColKV = KeyValue.createKeyValueFromKey(rowColKey);
  assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp());
  assertEquals(Bytes.toStringBinary(rowKV.getRow()),
      Bytes.toStringBinary(rowColKV.getRow()));
  assertEquals(0, rowKV.getQualifier().length);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestCompoundBloomFilter.java

示例13: writeStoreFile

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
  byte[] fam = Bytes.toBytes("f");
  byte[] qualifier = Bytes.toBytes("q");
  long now = System.currentTimeMillis();
  byte[] b = Bytes.toBytes("k1");
  Tag t1 = new Tag((byte) 1, "tag1");
  Tag t2 = new Tag((byte) 2, "tag2");
  Tag t3 = new Tag((byte) 3, "tag3");
  try {
    writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t1 }));
    b = Bytes.toBytes("k3");
    writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t2, t1 }));
    b = Bytes.toBytes("k4");
    writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t3 }));
    b = Bytes.toBytes("k5");
    writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t3 }));
  } finally {
    writer.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TestStoreFileScannerWithTagCompression.java

示例14: GetClosestRowBeforeTracker

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
/**
 * @param c
 * @param kv Presume first on row: i.e. empty column, maximum timestamp and
 * a type of Type.Maximum
 * @param ttl Time to live in ms for this Store
 * @param metaregion True if this is hbase:meta or -ROOT- region.
 */
GetClosestRowBeforeTracker(final KVComparator c, final KeyValue kv,
    final long ttl, final boolean metaregion) {
  super();
  this.metaregion = metaregion;
  this.targetkey = kv;
  // If we are in a metaregion, then our table name is the prefix on the
  // targetkey.
  this.rowoffset = kv.getRowOffset();
  int l = -1;
  if (metaregion) {
    l = KeyValue.getDelimiter(kv.getRowArray(), rowoffset, kv.getRowLength(),
      HConstants.DELIMITER) - this.rowoffset;
  }
  this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
  this.now = System.currentTimeMillis();
  this.oldestUnexpiredTs = now - ttl;
  this.kvcomparator = c;
  KeyValue.RowOnlyComparator rc = new KeyValue.RowOnlyComparator(this.kvcomparator);
  this.deletes = new TreeMap<KeyValue, NavigableSet<KeyValue>>(rc);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:GetClosestRowBeforeTracker.java

示例15: write

import org.apache.hadoop.hbase.KeyValue; //導入依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  LOG.warn("WALEdit is being serialized to writable - only expected in test code");
  out.writeInt(VERSION_2);
  out.writeInt(cells.size());
  // We interleave the two lists for code simplicity
  for (Cell cell : cells) {
    // This is not used in any of the core code flows so it is just fine to convert to KV
    KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
    if (compressionContext != null) {
      KeyValueCompression.writeKV(out, kv, compressionContext);
    } else{
      KeyValue.write(kv, out);
    }
  }
  if (scopes == null) {
    out.writeInt(0);
  } else {
    out.writeInt(scopes.size());
    for (byte[] key : scopes.keySet()) {
      Bytes.writeByteArray(out, key);
      out.writeInt(scopes.get(key));
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:WALEdit.java


注:本文中的org.apache.hadoop.hbase.KeyValue類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。