当前位置: 首页>>代码示例>>Java>>正文


Java KeyValue.getRow方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.getRow方法的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.getRow方法的具体用法?Java KeyValue.getRow怎么用?Java KeyValue.getRow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.getRow方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFileSplitPoint

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Gets the approximate mid-point of this file that is optimal for use in splitting it.
 *
 * @param comparator Comparator used to compare KVs.
 * @return The split point row, or null if splitting is not possible, or reader is null.
 */
@SuppressWarnings("deprecation") byte[] getFileSplitPoint(KVComparator comparator)
    throws IOException {
  if (this.reader == null) {
    LOG.warn("Storefile " + this + " Reader is null; cannot get split point");
    return null;
  }
  // Get first, last, and mid keys. Midkey is the key that starts block
  // in middle of hfile. Has column and timestamp. Need to return just
  // the row we want to split on as midkey.
  byte[] midkey = this.reader.midkey();
  if (midkey != null) {
    KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length);
    byte[] fk = this.reader.getFirstKey();
    KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
    byte[] lk = this.reader.getLastKey();
    KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
    // if the midkey is the same as the first or last keys, we cannot (ever) split this region.
    if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("cannot split because midkey is the same as first or last row");
      }
      return null;
    }
    return mk.getRow();
  }
  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:StoreFile.java

示例2: prePut

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
    final WALEdit edit, final Durability durability) throws IOException {
  byte[] attribute = put.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : put.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag(TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    put.getFamilyCellMap().remove(cf);
    // Update the family map
    put.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestReplicationWithTags.java

示例3: prePut

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put m, WALEdit edit,
    Durability durability) throws IOException {
  byte[] attribute = m.getAttribute(NON_VISIBILITY);
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag((byte) NON_VIS_TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);
        tagList.addAll(kv.getTags());
        byte[] fromList = Tag.fromList(tagList);
        TagRewriteCell newcell = new TagRewriteCell(kv, fromList);
        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newcell);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestVisibilityLabelsReplication.java

示例4: updateMutationAddingTags

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
private void updateMutationAddingTags(final Mutation m) {
  byte[] attribute = m.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag((byte) 1, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestTags.java

示例5: testHalfScanAndReseek

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API
 * demands that seekTo and reseekTo() only return < 0 if the key lies
 * before the start of the file (with no position on the scanner). Returning
 * 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 *
 * The latter case being the most common, we should generally be returning 1,
 * and if we do, there may or may not be a 'next' in the scanner/file.
 *
 * A bug in the half file scanner was returning -1 at the end of the bottom
 * half, and that was causing the infrastructure above to go null causing NPEs
 * and other problems.  This test reproduces that failure, and also tests
 * both the bottom and top of the file while we are at it.
 *
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  String root_dir = TEST_UTIL.getDataTestDir().toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
  HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, p)
      .withFileContext(meta)
      .create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
  r.loadFileInfo();
  byte [] midkey = r.midkey();
  KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
  midkey = midKV.getRow();

  //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestHalfStoreFileReader.java

示例6: readStoreFile

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
  StoreFile.Reader r = sf.createReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = kv.getRow();
      boolean present = isInBloom(scanner, row, kv.getQualifier());
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    ByteBloomFilter.setFakeLookupMode(fakeLookupEnabled);
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      ByteBloomFilter.setFakeLookupMode(false);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:80,代码来源:TestCompoundBloomFilter.java

示例7: testReference

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
    BloomType.NONE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.

  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  hsf.closeReader(true);

  // Make a reference
  HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
  Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
    BloomType.NONE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:51,代码来源:TestStoreFile.java

示例8: testHalfScanner

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Test
public void testHalfScanner() throws IOException {
    String root_dir = TEST_UTIL.getDataTestDir().toString();
    Path p = new Path(root_dir, "test");
    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
    HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fs, p)
            .withFileContext(meta)
            .create();

    // write some things.
    List<KeyValue> items = genSomeKeys();
    for (KeyValue kv : items) {
        w.append(kv);
    }
    w.close();


    HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
    r.loadFileInfo();
    byte[] midkey = r.midkey();
    KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
    midkey = midKV.getRow();

    Reference bottom = new Reference(midkey, Reference.Range.bottom);
    Reference top = new Reference(midkey, Reference.Range.top);

    // Ugly code to get the item before the midkey
    KeyValue beforeMidKey = null;
    for (KeyValue item : items) {
        if (KeyValue.COMPARATOR.compare(item, midKV) >= 0) {
            break;
        }
        beforeMidKey = item;
    }
    System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
    System.out.println("beforeMidKey: " + beforeMidKey);


    // Seek on the splitKey, should be in top, not in bottom
    Cell foundKeyValue = doTestOfSeekBefore(p, fs, bottom, midKV, cacheConf);
    assertEquals(beforeMidKey, foundKeyValue);

    // Seek tot the last thing should be the penultimate on the top, the one before the midkey on the bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(items.size() - 1), cacheConf);
    assertEquals(items.get(items.size() - 2), foundKeyValue);

    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(items.size() - 1), cacheConf);
    assertEquals(beforeMidKey, foundKeyValue);

    // Try and seek before something that is in the bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(0), cacheConf);
    assertNull(foundKeyValue);

    // Try and seek before the first thing.
    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(0), cacheConf);
    assertNull(foundKeyValue);

    // Try and seek before the second thing in the top and bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(1), cacheConf);
    assertNull(foundKeyValue);

    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(1), cacheConf);
    assertEquals(items.get(0), foundKeyValue);

    // Try to seek before the splitKey in the top file
    foundKeyValue = doTestOfSeekBefore(p, fs, top, midKV, cacheConf);
    assertNull(foundKeyValue);
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestHalfStoreFileReader.java


注:本文中的org.apache.hadoop.hbase.KeyValue.getRow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。