當前位置: 首頁>>代碼示例>>Java>>正文


Java KeyValue.getRow方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.getRow方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.getRow方法的具體用法?Java KeyValue.getRow怎麽用?Java KeyValue.getRow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.getRow方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getFileSplitPoint

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Gets the approximate mid-point of this file that is optimal for use in splitting it.
 *
 * @param comparator Comparator used to compare KVs.
 * @return The split point row, or null if splitting is not possible, or reader is null.
 */
@SuppressWarnings("deprecation") byte[] getFileSplitPoint(KVComparator comparator)
    throws IOException {
  if (this.reader == null) {
    LOG.warn("Storefile " + this + " Reader is null; cannot get split point");
    return null;
  }
  // Get first, last, and mid keys. Midkey is the key that starts block
  // in middle of hfile. Has column and timestamp. Need to return just
  // the row we want to split on as midkey.
  byte[] midkey = this.reader.midkey();
  if (midkey != null) {
    KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length);
    byte[] fk = this.reader.getFirstKey();
    KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
    byte[] lk = this.reader.getLastKey();
    KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
    // if the midkey is the same as the first or last keys, we cannot (ever) split this region.
    if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("cannot split because midkey is the same as first or last row");
      }
      return null;
    }
    return mk.getRow();
  }
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:StoreFile.java

示例2: prePut

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
    final WALEdit edit, final Durability durability) throws IOException {
  byte[] attribute = put.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : put.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag(TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    put.getFamilyCellMap().remove(cf);
    // Update the family map
    put.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestReplicationWithTags.java

示例3: prePut

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put m, WALEdit edit,
    Durability durability) throws IOException {
  byte[] attribute = m.getAttribute(NON_VISIBILITY);
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag((byte) NON_VIS_TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);
        tagList.addAll(kv.getTags());
        byte[] fromList = Tag.fromList(tagList);
        TagRewriteCell newcell = new TagRewriteCell(kv, fromList);
        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newcell);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:TestVisibilityLabelsReplication.java

示例4: updateMutationAddingTags

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private void updateMutationAddingTags(final Mutation m) {
  byte[] attribute = m.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag((byte) 1, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestTags.java

示例5: testHalfScanAndReseek

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API
 * demands that seekTo and reseekTo() only return < 0 if the key lies
 * before the start of the file (with no position on the scanner). Returning
 * 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 *
 * The latter case being the most common, we should generally be returning 1,
 * and if we do, there may or may not be a 'next' in the scanner/file.
 *
 * A bug in the half file scanner was returning -1 at the end of the bottom
 * half, and that was causing the infrastructure above to go null causing NPEs
 * and other problems.  This test reproduces that failure, and also tests
 * both the bottom and top of the file while we are at it.
 *
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  String root_dir = TEST_UTIL.getDataTestDir().toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
  HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, p)
      .withFileContext(meta)
      .create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
  r.loadFileInfo();
  byte [] midkey = r.midkey();
  KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
  midkey = midKV.getRow();

  //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:TestHalfStoreFileReader.java

示例6: readStoreFile

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
  StoreFile.Reader r = sf.createReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = kv.getRow();
      boolean present = isInBloom(scanner, row, kv.getQualifier());
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    ByteBloomFilter.setFakeLookupMode(fakeLookupEnabled);
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      ByteBloomFilter.setFakeLookupMode(false);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:80,代碼來源:TestCompoundBloomFilter.java

示例7: testReference

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
    BloomType.NONE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.

  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  hsf.closeReader(true);

  // Make a reference
  HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
  Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
    BloomType.NONE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:TestStoreFile.java

示例8: testHalfScanner

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Test
public void testHalfScanner() throws IOException {
    String root_dir = TEST_UTIL.getDataTestDir().toString();
    Path p = new Path(root_dir, "test");
    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
    HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fs, p)
            .withFileContext(meta)
            .create();

    // write some things.
    List<KeyValue> items = genSomeKeys();
    for (KeyValue kv : items) {
        w.append(kv);
    }
    w.close();


    HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
    r.loadFileInfo();
    byte[] midkey = r.midkey();
    KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
    midkey = midKV.getRow();

    Reference bottom = new Reference(midkey, Reference.Range.bottom);
    Reference top = new Reference(midkey, Reference.Range.top);

    // Ugly code to get the item before the midkey
    KeyValue beforeMidKey = null;
    for (KeyValue item : items) {
        if (KeyValue.COMPARATOR.compare(item, midKV) >= 0) {
            break;
        }
        beforeMidKey = item;
    }
    System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
    System.out.println("beforeMidKey: " + beforeMidKey);


    // Seek on the splitKey, should be in top, not in bottom
    Cell foundKeyValue = doTestOfSeekBefore(p, fs, bottom, midKV, cacheConf);
    assertEquals(beforeMidKey, foundKeyValue);

    // Seek tot the last thing should be the penultimate on the top, the one before the midkey on the bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(items.size() - 1), cacheConf);
    assertEquals(items.get(items.size() - 2), foundKeyValue);

    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(items.size() - 1), cacheConf);
    assertEquals(beforeMidKey, foundKeyValue);

    // Try and seek before something that is in the bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(0), cacheConf);
    assertNull(foundKeyValue);

    // Try and seek before the first thing.
    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(0), cacheConf);
    assertNull(foundKeyValue);

    // Try and seek before the second thing in the top and bottom.
    foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(1), cacheConf);
    assertNull(foundKeyValue);

    foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(1), cacheConf);
    assertEquals(items.get(0), foundKeyValue);

    // Try to seek before the splitKey in the top file
    foundKeyValue = doTestOfSeekBefore(p, fs, top, midKV, cacheConf);
    assertNull(foundKeyValue);
  }
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:73,代碼來源:TestHalfStoreFileReader.java


注:本文中的org.apache.hadoop.hbase.KeyValue.getRow方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。