当前位置: 首页>>代码示例>>Java>>正文


Java WritableComparator类代码示例

本文整理汇总了Java中org.apache.hadoop.io.WritableComparator的典型用法代码示例。如果您正苦于以下问题:Java WritableComparator类的具体用法?Java WritableComparator怎么用?Java WritableComparator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


WritableComparator类属于org.apache.hadoop.io包,在下文中一共展示了WritableComparator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: WritableSortable

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
public WritableSortable(int j) throws IOException {
  seed = r.nextLong();
  r.setSeed(seed);
  Text t = new Text();
  StringBuilder sb = new StringBuilder();
  indices = new int[j];
  offsets = new int[j];
  check = new String[j];
  DataOutputBuffer dob = new DataOutputBuffer();
  for (int i = 0; i < j; ++i) {
    indices[i] = i;
    offsets[i] = dob.getLength();
    genRandom(t, r.nextInt(15) + 1, sb);
    t.write(dob);
    check[i] = t.toString();
  }
  eob = dob.getLength();
  bytes = dob.getData();
  comparator = WritableComparator.get(Text.class);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestIndexedSort.java

示例2: fillKey

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.getBytes(), n, l);
    n += l;
  }
  if (sorted && WritableComparator.compareBytes(
          lastKey.getBytes(), MIN_KEY_LEN, lastKey.getLength() - MIN_KEY_LEN,
          o.getBytes(), MIN_KEY_LEN, o.getLength() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.getBytes(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:KVGenerator.java

示例3: testEncoding

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
private static void testEncoding(long l) {
  byte[] b = GenericObjectMapper.writeReverseOrderedLong(l);
  assertEquals("error decoding", l,
      GenericObjectMapper.readReverseOrderedLong(b, 0));
  byte[] buf = new byte[16];
  System.arraycopy(b, 0, buf, 5, 8);
  assertEquals("error decoding at offset", l,
      GenericObjectMapper.readReverseOrderedLong(buf, 5));
  if (l > Long.MIN_VALUE) {
    byte[] a = GenericObjectMapper.writeReverseOrderedLong(l-1);
    assertEquals("error preserving ordering", 1,
        WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length));
  }
  if (l < Long.MAX_VALUE) {
    byte[] c = GenericObjectMapper.writeReverseOrderedLong(l+1);
    assertEquals("error preserving ordering", 1,
        WritableComparator.compareBytes(b, 0, b.length, c, 0, c.length));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestGenericObjectMapper.java

示例4: parse

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Given an expression and an optional comparator, build a tree of
 * InputFormats using the comparator to sort keys.
 */
static Node parse(String expr, JobConf job) throws IOException {
  if (null == expr) {
    throw new IOException("Expression is null");
  }
  Class<? extends WritableComparator> cmpcl =
    job.getClass("mapred.join.keycomparator", null, WritableComparator.class);
  Lexer lex = new Lexer(expr);
  Stack<Token> st = new Stack<Token>();
  Token tok;
  while ((tok = lex.next()) != null) {
    if (TType.RPAREN.equals(tok.getType())) {
      st.push(reduce(st, job));
    } else {
      st.push(tok);
    }
  }
  if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
    Node ret = st.pop().getNode();
    if (cmpcl != null) {
      ret.setKeyComparator(cmpcl);
    }
    return ret;
  }
  throw new IOException("Missing ')'");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Parser.java

示例5: CompositeRecordReader

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Create a RecordReader with <tt>capacity</tt> children to position
 * <tt>id</tt> in the parent reader.
 * The id of a root CompositeRecordReader is -1 by convention, but relying
 * on this is not recommended.
 */
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
    Class<? extends WritableComparator> cmpcl)
    throws IOException {
  assert capacity > 0 : "Invalid capacity";
  this.id = id;
  if (null != cmpcl) {
    cmp = ReflectionUtils.newInstance(cmpcl, null);
    q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
        new Comparator<ComposableRecordReader<K,?>>() {
          public int compare(ComposableRecordReader<K,?> o1,
                             ComposableRecordReader<K,?> o2) {
            return cmp.compare(o1.key(), o2.key());
          }
        });
  }
  jc = new JoinCollector(capacity);
  kids = new ComposableRecordReader[capacity];
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:CompositeRecordReader.java

示例6: add

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Add a RecordReader to this collection.
 * The id() of a RecordReader determines where in the Tuple its
 * entry will appear. Adding RecordReaders with the same id has
 * undefined behavior.
 */
public void add(ComposableRecordReader<K,? extends V> rr) throws IOException {
  kids[rr.id()] = rr;
  if (null == q) {
    cmp = WritableComparator.get(rr.createKey().getClass(), conf);
    q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
        new Comparator<ComposableRecordReader<K,?>>() {
          public int compare(ComposableRecordReader<K,?> o1,
                             ComposableRecordReader<K,?> o2) {
            return cmp.compare(o1.key(), o2.key());
          }
        });
  }
  if (rr.hasNext()) {
    q.add(rr);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CompositeRecordReader.java

示例7: parse

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Given an expression and an optional comparator, build a tree of
 * InputFormats using the comparator to sort keys.
 */
static Node parse(String expr, Configuration conf) throws IOException {
  if (null == expr) {
    throw new IOException("Expression is null");
  }
  Class<? extends WritableComparator> cmpcl = conf.getClass(
    CompositeInputFormat.JOIN_COMPARATOR, null, WritableComparator.class);
  Lexer lex = new Lexer(expr);
  Stack<Token> st = new Stack<Token>();
  Token tok;
  while ((tok = lex.next()) != null) {
    if (TType.RPAREN.equals(tok.getType())) {
      st.push(reduce(st, conf));
    } else {
      st.push(tok);
    }
  }
  if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
    Node ret = st.pop().getNode();
    if (cmpcl != null) {
      ret.setKeyComparator(cmpcl);
    }
    return ret;
  }
  throw new IOException("Missing ')'");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Parser.java

示例8: CompositeRecordReader

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Create a RecordReader with <tt>capacity</tt> children to position
 * <tt>id</tt> in the parent reader.
 * The id of a root CompositeRecordReader is -1 by convention, but relying
 * on this is not recommended.
 */
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
    Class<? extends WritableComparator> cmpcl)
    throws IOException {
  assert capacity > 0 : "Invalid capacity";
  this.id = id;
  if (null != cmpcl) {
    cmp = ReflectionUtils.newInstance(cmpcl, null);
    q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
          new Comparator<ComposableRecordReader<K,?>>() {
            public int compare(ComposableRecordReader<K,?> o1,
                               ComposableRecordReader<K,?> o2) {
              return cmp.compare(o1.key(), o2.key());
            }
          });
  }
  jc = new JoinCollector(capacity);
  kids = new ComposableRecordReader[capacity];
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:CompositeRecordReader.java

示例9: fillKey

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
  if (sorted
      && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
          .getSize()
          - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:KVGenerator.java

示例10: compareTo

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Compare bytes from {#getBytes()}.
 * @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)
 */
public int compareTo(BinaryComparable other) {
  if (this == other)
    return 0;
  return WritableComparator.compareBytes(getBytes(), 0, getLength(),
           other.getBytes(), 0, other.getLength());
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:11,代码来源:BinaryComparable.java

示例11: prefixMatches

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Returns true if the byte array begins with the specified prefix.
 */
public static boolean prefixMatches(byte[] prefix, int prefixlen,
    byte[] b) {
  if (b.length < prefixlen) {
    return false;
  }
  return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0,
      prefixlen) == 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:LeveldbUtils.java

示例12: testBakedUserComparator

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Test a user comparator that relies on deserializing both arguments
 * for each compare.
 */
@Test
public void testBakedUserComparator() throws Exception {
  MyWritable a = new MyWritable(8, 8);
  MyWritable b = new MyWritable(7, 9);
  assertTrue(a.compareTo(b) > 0);
  assertTrue(WritableComparator.get(MyWritable.class).compare(a, b) < 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestComparators.java

示例13: getOutputKeyComparator

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/**
 * Get the {@link RawComparator} comparator used to compare keys.
 * 
 * @return the {@link RawComparator} comparator used to compare keys.
 */
public RawComparator getOutputKeyComparator() {
  Class<? extends RawComparator> theClass = getClass(
    JobContext.KEY_COMPARATOR, null, RawComparator.class);
  if (theClass != null)
    return ReflectionUtils.newInstance(theClass, this);
  return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:JobConf.java

示例14: getPartition

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
/** 
 * Use (the specified slice of the array returned by) 
 * {@link BinaryComparable#getBytes()} to partition. 
 */
@Override
public int getPartition(BinaryComparable key, V value, int numPartitions) {
  int length = key.getLength();
  int leftIndex = (leftOffset + length) % length;
  int rightIndex = (rightOffset + length) % length;
  int hash = WritableComparator.hashBytes(key.getBytes(), 
    leftIndex, rightIndex - leftIndex + 1);
  return (hash & Integer.MAX_VALUE) % numPartitions;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:BinaryPartitioner.java

示例15: initialize

import org.apache.hadoop.io.WritableComparator; //导入依赖的package包/类
public void initialize(InputSplit split,
                       TaskAttemptContext context)
throws IOException, InterruptedException {
  rr.initialize(split, context);
  conf = context.getConfiguration();
  nextKeyValue();
  if (!empty) {
    keyclass = key.getClass().asSubclass(WritableComparable.class);
    valueclass = value.getClass();
    if (cmp == null) {
      cmp = WritableComparator.get(keyclass, conf);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:WrappedRecordReader.java


注:本文中的org.apache.hadoop.io.WritableComparator类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。