当前位置: 首页>>代码示例>>Java>>正文


Java ByteArrayComparator类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayComparator类的具体用法?Java ByteArrayComparator怎么用?Java ByteArrayComparator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ByteArrayComparator类属于org.apache.hadoop.hbase.util.Bytes包,在下文中一共展示了ByteArrayComparator类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testIterator

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
/**
 * Tests full iteration on a hoplog. Ensures all inserted keys are returned and no key is missing
 */
public void testIterator() throws IOException {
  int count = 10;
  ByteArrayComparator bac = new ByteArrayComparator();

  String hoplogName = getRandomHoplogName();
  TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);

  HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  HoplogReader reader = testHoplog.getReader();

  Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
  HoplogIterator<byte[], byte[]> iter = reader.scan();
  for (; iter.hasNext();) {
    byte[] key = iter.next();
    Entry<String, String> entry = mapIter.next();
    assertEquals(0, bac.compare(key, iter.getKey()));
    assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
    assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
    count--;
  }
  assertEquals(0, count);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:26,代码来源:HfileSortedOplogJUnitTest.java

示例2: toIterator

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
/**
 * Tests to condition based iteration. creates hoplog with 10 KVs. Creates a scanner ending at
 * a middle key and verifies the count of KVs iterated on
 */
public void toIterator(boolean includeTo) throws Exception {
  int count = 10;
  ByteArrayComparator bac = new ByteArrayComparator();
  
  String hoplogName = getRandomHoplogName();
  // sorted map contains the keys inserted in the hoplog for testing
  TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
  Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
  
  HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  HoplogReader reader = testHoplog.getReader();
  
  int middleKey = 4;
  // keys are like Key-X, for X=0 till X=9. End iterator at fifth key,
  // key-4. if excluding to key, end at fourth key, key-3.
  HoplogIterator<byte[], byte[]> iter = reader.scan(null, true, ("key-" + middleKey).getBytes(), includeTo);
  
  for (; iter.hasNext();) {
    byte[] key = iter.next();
    Entry<String, String> entry = mapIter.next();
    // make sure the KV returned by iterator match the inserted KV
    assertEquals(0, bac.compare(key, iter.getKey()));
    assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
    assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
    
    count --;
  }
  
  if (includeTo) {
    count++;
  }

  assertEquals(10, count + middleKey);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:39,代码来源:HfileSortedOplogJUnitTest.java

示例3: testFromToIterator

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
/**
 * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
 */
public void testFromToIterator() throws IOException {
  ByteArrayComparator bac = new ByteArrayComparator();
  String hoplogName = getRandomHoplogName();
  HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  
  int count = 5;
  HoplogWriter writer = hoplog.createWriter(5);
  for (int i = 0; i < count; i++) {
    String value = "value-" + (i * 2);
    // even keys key-[0 2 4 6 8]
    writer.append(("key-" + (i * 2)).getBytes(), value.getBytes());
  }
  writer.close();
  
  HoplogReader reader = hoplog.getReader();
  HoplogIterator<byte[], byte[]> iter = reader.scan("key-1".getBytes(), true, "key-7".getBytes(), true);

  for (int i = 2; i < 7; i += 2) {
    assertTrue(iter.hasNext());
    iter.next();
    assertEquals(0, bac.compare(("key-" + i).getBytes(), iter.getKey()));
    assertEquals(0, bac.compare(("value-" + i).getBytes(), iter.getValue()));
    System.out.println(new String(iter.getKey()));
  }
  assertFalse(iter.hasNext());
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:30,代码来源:HfileSortedOplogJUnitTest.java

示例4: checkRegionBoundaries

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), connection, false);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    Path hbaseRoot = FSUtils.getRootDir(getConf());
    for (HRegionInfo regionInfo : regions) {
      Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(tableDir, regionInfo.getEncodedName());
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
                getConf()), getConf());
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    reader.getFirstKey()) > 0))) {
              storeFirstKey = reader.getFirstKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    reader.getLastKey())) < 0)) {
              storeLastKey = reader.getLastKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;

      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.

      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(regionInfo.getTable()));
        LOG.warn("Region's boundaries not alligned between stores and META for:");
        LOG.warn(currentRegionBoundariesInformation);
      }
    }
  } catch (IOException e) {
    LOG.error(e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:HBaseFsck.java

示例5: fromIterator

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
/**
 * Tests from condition based iteration. creates hoplog with 10 KVs. Creates a scanner starting at
 * a middle key and verifies the count of KVs iterated on
 */
public void fromIterator(boolean includeFrom) throws Exception {
  int count = 10;
  ByteArrayComparator bac = new ByteArrayComparator();

  String hoplogName = getRandomHoplogName();
  // sorted map contains the keys inserted in the hoplog for testing
  TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);

  HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  HoplogReader reader = testHoplog.getReader();

  int middleKey = 4;
  // remove top keys from the sorted map as the hoplog scanner should not
  // return those
  Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
  for (int i = 0; i < middleKey; i++) {
    mapIter.next();
    count--;
  }
  if (!includeFrom) {
    mapIter.next();
    count--;
  }

  // keys are like Key-X, for X=0 till X=9. Start iterator at fifth key,
  // key-4. if excluding from key, start at sixth key, key-5.
  HoplogIterator<byte[], byte[]> iter = reader.scan(("key-" + middleKey).getBytes(), includeFrom,
      null, true);

  for (; iter.hasNext();) {
    byte[] key = iter.next();
    Entry<String, String> entry = mapIter.next();
    // make sure the KV returned by iterator match the inserted KV
    assertEquals(0, bac.compare(key, iter.getKey()));
    assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
    assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
    count--;
  }
  assertEquals(0, count);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:45,代码来源:HfileSortedOplogJUnitTest.java

示例6: checkRegionBoundaries

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), false);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    for (HRegionInfo regionInfo : regions) {
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(getConf().get(HConstants.HBASE_DIR) + "/"
          + Bytes.toString(regionInfo.getTableName()) + "/"
          + regionInfo.getEncodedName() + "/");
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
                getConf()));
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    reader.getFirstKey()) > 0))) {
              storeFirstKey = reader.getFirstKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    reader.getLastKey())) < 0)) {
              storeLastKey = reader.getLastKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;
    
      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.
    
      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(Bytes.toString(regionInfo.getTableName())));
        LOG.warn("Region's boundaries not alligned between stores and META for:");
        LOG.warn(currentRegionBoundariesInformation);
      }
    }
  } catch (IOException e) {
    LOG.error(e);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:82,代码来源:HBaseFsck.java

示例7: checkRegionBoundaries

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), false);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    Path hbaseRoot = FSUtils.getRootDir(getConf());
    for (HRegionInfo regionInfo : regions) {
      Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(tableDir, regionInfo.getEncodedName());
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
                getConf()), getConf());
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    reader.getFirstKey()) > 0))) {
              storeFirstKey = reader.getFirstKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    reader.getLastKey())) < 0)) {
              storeLastKey = reader.getLastKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;

      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.

      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(regionInfo.getTable()));
        LOG.warn("Region's boundaries not alligned between stores and META for:");
        LOG.warn(currentRegionBoundariesInformation);
      }
    }
  } catch (IOException e) {
    LOG.error(e);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:82,代码来源:HBaseFsck.java

示例8: checkRegionBoundaries

import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; //导入依赖的package包/类
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<RegionInfo> regions = MetaTableAccessor.getAllRegions(connection, true);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    Path hbaseRoot = FSUtils.getRootDir(getConf());
    for (RegionInfo regionInfo : regions) {
      Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(tableDir, regionInfo.getEncodedName());
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(),
              new CacheConfig(getConf()), true, getConf());
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) {
              storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) {
              storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;

      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.

      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(regionInfo.getTable()));
        LOG.warn("Region's boundaries not aligned between stores and META for:");
        LOG.warn(Objects.toString(currentRegionBoundariesInformation));
      }
    }
  } catch (IOException e) {
    LOG.error(e.toString(), e);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:82,代码来源:HBaseFsck.java


注:本文中的org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。