本文整理匯總了Java中org.apache.hadoop.io.WritableComparator類的典型用法代碼示例。如果您正苦於以下問題:Java WritableComparator類的具體用法?Java WritableComparator怎麽用?Java WritableComparator使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
WritableComparator類屬於org.apache.hadoop.io包,在下文中一共展示了WritableComparator類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: WritableSortable
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
public WritableSortable(int j) throws IOException {
seed = r.nextLong();
r.setSeed(seed);
Text t = new Text();
StringBuilder sb = new StringBuilder();
indices = new int[j];
offsets = new int[j];
check = new String[j];
DataOutputBuffer dob = new DataOutputBuffer();
for (int i = 0; i < j; ++i) {
indices[i] = i;
offsets[i] = dob.getLength();
genRandom(t, r.nextInt(15) + 1, sb);
t.write(dob);
check[i] = t.toString();
}
eob = dob.getLength();
bytes = dob.getData();
comparator = WritableComparator.get(Text.class);
}
示例2: fillKey
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
private void fillKey(BytesWritable o) {
int len = keyLenRNG.nextInt();
if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
o.setSize(len);
int n = MIN_KEY_LEN;
while (n < len) {
byte[] word = dict[random.nextInt(dict.length)];
int l = Math.min(word.length, len - n);
System.arraycopy(word, 0, o.getBytes(), n, l);
n += l;
}
if (sorted && WritableComparator.compareBytes(
lastKey.getBytes(), MIN_KEY_LEN, lastKey.getLength() - MIN_KEY_LEN,
o.getBytes(), MIN_KEY_LEN, o.getLength() - MIN_KEY_LEN) > 0) {
incrementPrefix();
}
System.arraycopy(prefix, 0, o.getBytes(), 0, MIN_KEY_LEN);
lastKey.set(o);
}
示例3: testEncoding
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
private static void testEncoding(long l) {
byte[] b = GenericObjectMapper.writeReverseOrderedLong(l);
assertEquals("error decoding", l,
GenericObjectMapper.readReverseOrderedLong(b, 0));
byte[] buf = new byte[16];
System.arraycopy(b, 0, buf, 5, 8);
assertEquals("error decoding at offset", l,
GenericObjectMapper.readReverseOrderedLong(buf, 5));
if (l > Long.MIN_VALUE) {
byte[] a = GenericObjectMapper.writeReverseOrderedLong(l-1);
assertEquals("error preserving ordering", 1,
WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length));
}
if (l < Long.MAX_VALUE) {
byte[] c = GenericObjectMapper.writeReverseOrderedLong(l+1);
assertEquals("error preserving ordering", 1,
WritableComparator.compareBytes(b, 0, b.length, c, 0, c.length));
}
}
示例4: parse
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Given an expression and an optional comparator, build a tree of
* InputFormats using the comparator to sort keys.
*/
static Node parse(String expr, JobConf job) throws IOException {
if (null == expr) {
throw new IOException("Expression is null");
}
Class<? extends WritableComparator> cmpcl =
job.getClass("mapred.join.keycomparator", null, WritableComparator.class);
Lexer lex = new Lexer(expr);
Stack<Token> st = new Stack<Token>();
Token tok;
while ((tok = lex.next()) != null) {
if (TType.RPAREN.equals(tok.getType())) {
st.push(reduce(st, job));
} else {
st.push(tok);
}
}
if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
Node ret = st.pop().getNode();
if (cmpcl != null) {
ret.setKeyComparator(cmpcl);
}
return ret;
}
throw new IOException("Missing ')'");
}
示例5: CompositeRecordReader
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Create a RecordReader with <tt>capacity</tt> children to position
* <tt>id</tt> in the parent reader.
* The id of a root CompositeRecordReader is -1 by convention, but relying
* on this is not recommended.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
Class<? extends WritableComparator> cmpcl)
throws IOException {
assert capacity > 0 : "Invalid capacity";
this.id = id;
if (null != cmpcl) {
cmp = ReflectionUtils.newInstance(cmpcl, null);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
jc = new JoinCollector(capacity);
kids = new ComposableRecordReader[capacity];
}
示例6: add
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Add a RecordReader to this collection.
* The id() of a RecordReader determines where in the Tuple its
* entry will appear. Adding RecordReaders with the same id has
* undefined behavior.
*/
public void add(ComposableRecordReader<K,? extends V> rr) throws IOException {
kids[rr.id()] = rr;
if (null == q) {
cmp = WritableComparator.get(rr.createKey().getClass(), conf);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
if (rr.hasNext()) {
q.add(rr);
}
}
示例7: parse
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Given an expression and an optional comparator, build a tree of
* InputFormats using the comparator to sort keys.
*/
static Node parse(String expr, Configuration conf) throws IOException {
if (null == expr) {
throw new IOException("Expression is null");
}
Class<? extends WritableComparator> cmpcl = conf.getClass(
CompositeInputFormat.JOIN_COMPARATOR, null, WritableComparator.class);
Lexer lex = new Lexer(expr);
Stack<Token> st = new Stack<Token>();
Token tok;
while ((tok = lex.next()) != null) {
if (TType.RPAREN.equals(tok.getType())) {
st.push(reduce(st, conf));
} else {
st.push(tok);
}
}
if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
Node ret = st.pop().getNode();
if (cmpcl != null) {
ret.setKeyComparator(cmpcl);
}
return ret;
}
throw new IOException("Missing ')'");
}
示例8: CompositeRecordReader
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Create a RecordReader with <tt>capacity</tt> children to position
* <tt>id</tt> in the parent reader.
* The id of a root CompositeRecordReader is -1 by convention, but relying
* on this is not recommended.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
Class<? extends WritableComparator> cmpcl)
throws IOException {
assert capacity > 0 : "Invalid capacity";
this.id = id;
if (null != cmpcl) {
cmp = ReflectionUtils.newInstance(cmpcl, null);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
jc = new JoinCollector(capacity);
kids = new ComposableRecordReader[capacity];
}
示例9: fillKey
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
private void fillKey(BytesWritable o) {
int len = keyLenRNG.nextInt();
if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
o.setSize(len);
int n = MIN_KEY_LEN;
while (n < len) {
byte[] word = dict[random.nextInt(dict.length)];
int l = Math.min(word.length, len - n);
System.arraycopy(word, 0, o.get(), n, l);
n += l;
}
if (sorted
&& WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
.getSize()
- MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
incrementPrefix();
}
System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
lastKey.set(o);
}
示例10: compareTo
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Compare bytes from {#getBytes()}.
* @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)
*/
public int compareTo(BinaryComparable other) {
if (this == other)
return 0;
return WritableComparator.compareBytes(getBytes(), 0, getLength(),
other.getBytes(), 0, other.getLength());
}
示例11: prefixMatches
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Returns true if the byte array begins with the specified prefix.
*/
public static boolean prefixMatches(byte[] prefix, int prefixlen,
byte[] b) {
if (b.length < prefixlen) {
return false;
}
return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0,
prefixlen) == 0;
}
示例12: testBakedUserComparator
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Test a user comparator that relies on deserializing both arguments
* for each compare.
*/
@Test
public void testBakedUserComparator() throws Exception {
MyWritable a = new MyWritable(8, 8);
MyWritable b = new MyWritable(7, 9);
assertTrue(a.compareTo(b) > 0);
assertTrue(WritableComparator.get(MyWritable.class).compare(a, b) < 0);
}
示例13: getOutputKeyComparator
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator getOutputKeyComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null, RawComparator.class);
if (theClass != null)
return ReflectionUtils.newInstance(theClass, this);
return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}
示例14: getPartition
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
/**
* Use (the specified slice of the array returned by)
* {@link BinaryComparable#getBytes()} to partition.
*/
@Override
public int getPartition(BinaryComparable key, V value, int numPartitions) {
int length = key.getLength();
int leftIndex = (leftOffset + length) % length;
int rightIndex = (rightOffset + length) % length;
int hash = WritableComparator.hashBytes(key.getBytes(),
leftIndex, rightIndex - leftIndex + 1);
return (hash & Integer.MAX_VALUE) % numPartitions;
}
示例15: initialize
import org.apache.hadoop.io.WritableComparator; //導入依賴的package包/類
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
rr.initialize(split, context);
conf = context.getConfiguration();
nextKeyValue();
if (!empty) {
keyclass = key.getClass().asSubclass(WritableComparable.class);
valueclass = value.getClass();
if (cmp == null) {
cmp = WritableComparator.get(keyclass, conf);
}
}
}