当前位置: 首页>>代码示例>>Java>>正文


Java RawComparator.compare方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.RawComparator.compare方法的典型用法代码示例。如果您正苦于以下问题:Java RawComparator.compare方法的具体用法?Java RawComparator.compare怎么用?Java RawComparator.compare使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.RawComparator的用法示例。


在下文中一共展示了RawComparator.compare方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: binarySearch

import org.apache.hadoop.io.RawComparator; //导入方法依赖的package包/类
/**
 * Binary search for keys in indexes.
 *
 * @param arr array of byte arrays to search for
 * @param key the key you want to find
 * @param offset the offset in the key you want to find
 * @param length the length of the key
 * @param comparator a comparator to compare.
 * @return zero-based index of the key, if the key is present in the array.
 *         Otherwise, a value -(i + 1) such that the key is between arr[i -
 *         1] and arr[i] non-inclusively, where i is in [0, i], if we define
 *         arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above
 *         means that this function can return 2N + 1 different values
 *         ranging from -(N + 1) to N - 1.
 */
public static int binarySearch(byte [][]arr, byte []key, int offset,
    int length, RawComparator<?> comparator) {
  int low = 0;
  int high = arr.length - 1;

  while (low <= high) {
    int mid = (low+high) >>> 1;
    // we have to compare in this order, because the comparator order
    // has special logic when the 'left side' is a special key.
    int cmp = comparator.compare(key, offset, length,
        arr[mid], 0, arr[mid].length);
    // key lives above the midpoint
    if (cmp > 0)
      low = mid + 1;
    // key lives below the midpoint
    else if (cmp < 0)
      high = mid - 1;
    // BAM. how often does this really happen?
    else
      return mid;
  }
  return - (low+1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:Bytes.java

示例2: setConf

import org.apache.hadoop.io.RawComparator; //导入方法依赖的package包/类
/**
 * Read in the partition file and build indexing data structures.
 * If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
 * <tt>total.order.partitioner.natural.order</tt> is not false, a trie
 * of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
 * will be built. Otherwise, keys will be located using a binary search of
 * the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
 * defined for this job. The input file must be sorted with the same
 * comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
 */
@SuppressWarnings("unchecked") // keytype from conf not static
public void setConf(Configuration conf) {
  try {
    this.conf = conf;
    String parts = getPartitionFile(conf);
    final Path partFile = new Path(parts);
    final FileSystem fs = (DEFAULT_PATH.equals(parts))
      ? FileSystem.getLocal(conf)     // assume in DistributedCache
      : partFile.getFileSystem(conf);

    Job job = Job.getInstance(conf);
    Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
    K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
    if (splitPoints.length != job.getNumReduceTasks() - 1) {
      throw new IOException("Wrong number of partitions in keyset");
    }
    RawComparator<K> comparator =
      (RawComparator<K>) job.getSortComparator();
    for (int i = 0; i < splitPoints.length - 1; ++i) {
      if (comparator.compare(splitPoints[i], splitPoints[i+1]) >= 0) {
        throw new IOException("Split points are out of order");
      }
    }
    boolean natOrder =
      conf.getBoolean(NATURAL_ORDER, true);
    if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
      partitions = buildTrie((BinaryComparable[])splitPoints, 0,
          splitPoints.length, new byte[0],
          // Now that blocks of identical splitless trie nodes are 
          // represented reentrantly, and we develop a leaf for any trie
          // node with only one split point, the only reason for a depth
          // limit is to refute stack overflow or bloat in the pathological
          // case where the split points are long and mostly look like bytes 
          // iii...iixii...iii   .  Therefore, we make the default depth
          // limit large but not huge.
          conf.getInt(MAX_TRIE_DEPTH, 200));
    } else {
      partitions = new BinarySearchNode(splitPoints, comparator);
    }
  } catch (IOException e) {
    throw new IllegalArgumentException("Can't read partitions file", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TotalOrderPartitioner.java

示例3: writePartitionFile

import org.apache.hadoop.io.RawComparator; //导入方法依赖的package包/类
/**
 * Write a partition file for the given job, using the Sampler provided.
 * Queries the sampler for a sample keyset, sorts by the output key
 * comparator, selects the keys for each rank, and writes to the destination
 * returned from {@link TotalOrderPartitioner#getPartitionFile}.
 */
@SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator
public static <K,V> void writePartitionFile(Job job, Sampler<K,V> sampler) 
    throws IOException, ClassNotFoundException, InterruptedException {
  Configuration conf = job.getConfiguration();
  final InputFormat inf = 
      ReflectionUtils.newInstance(job.getInputFormatClass(), conf);
  int numPartitions = job.getNumReduceTasks();
  K[] samples = (K[])sampler.getSample(inf, job);
  LOG.info("Using " + samples.length + " samples");
  RawComparator<K> comparator =
    (RawComparator<K>) job.getSortComparator();
  Arrays.sort(samples, comparator);
  Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  FileSystem fs = dst.getFileSystem(conf);
  if (fs.exists(dst)) {
    fs.delete(dst, false);
  }
  SequenceFile.Writer writer = SequenceFile.createWriter(fs, 
    conf, dst, job.getMapOutputKeyClass(), NullWritable.class);
  NullWritable nullValue = NullWritable.get();
  float stepSize = samples.length / (float) numPartitions;
  int last = -1;
  for(int i = 1; i < numPartitions; ++i) {
    int k = Math.round(stepSize * i);
    while (last >= k && comparator.compare(samples[last], samples[k]) == 0) {
      ++k;
    }
    writer.append(samples[k], nullValue);
    last = k;
  }
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:InputSampler.java


注:本文中的org.apache.hadoop.io.RawComparator.compare方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。