本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription类的典型用法代码示例。如果您正苦于以下问题:Java KeyDescription类的具体用法?Java KeyDescription怎么用?Java KeyDescription使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KeyDescription类属于org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper包,在下文中一共展示了KeyDescription类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartition
import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription; //导入依赖的package包/类
public int getPartition(K2 key, V2 value, int numReduceTasks) {
byte[] keyBytes;
List <KeyDescription> allKeySpecs = keyFieldHelper.keySpecs();
if (allKeySpecs.size() == 0) {
return getPartition(key.toString().hashCode(), numReduceTasks);
}
try {
keyBytes = key.toString().getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("The current system does not " +
"support UTF-8 encoding!", e);
}
// return 0 if the key is empty
if (keyBytes.length == 0) {
return 0;
}
int []lengthIndicesFirst = keyFieldHelper.getWordLengths(keyBytes, 0,
keyBytes.length);
int currentHash = 0;
for (KeyDescription keySpec : allKeySpecs) {
int startChar = keyFieldHelper.getStartOffset(keyBytes, 0,
keyBytes.length, lengthIndicesFirst, keySpec);
// no key found! continue
if (startChar < 0) {
continue;
}
int endChar = keyFieldHelper.getEndOffset(keyBytes, 0, keyBytes.length,
lengthIndicesFirst, keySpec);
currentHash = hashCode(keyBytes, startChar, endChar,
currentHash);
}
return getPartition(currentHash, numReduceTasks);
}
示例2: compare
import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription; //导入依赖的package包/类
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
List <KeyDescription> allKeySpecs = keyFieldHelper.keySpecs();
if (allKeySpecs.size() == 0) {
return compareBytes(b1, s1 + n1, l1 - n1, b2, s2 + n2, l2 - n2);
}
int []lengthIndicesFirst =
keyFieldHelper.getWordLengths(b1, s1 + n1, s1 + l1);
int []lengthIndicesSecond =
keyFieldHelper.getWordLengths(b2, s2 + n2, s2 + l2);
for (KeyDescription keySpec : allKeySpecs) {
int startCharFirst = keyFieldHelper.getStartOffset(b1, s1 + n1, s1 + l1,
lengthIndicesFirst, keySpec);
int endCharFirst = keyFieldHelper.getEndOffset(b1, s1 + n1, s1 + l1,
lengthIndicesFirst, keySpec);
int startCharSecond = keyFieldHelper.getStartOffset(b2, s2 + n2, s2 + l2,
lengthIndicesSecond, keySpec);
int endCharSecond = keyFieldHelper.getEndOffset(b2, s2 + n2, s2 + l2,
lengthIndicesSecond, keySpec);
int result;
if ((result = compareByteSequence(b1, startCharFirst, endCharFirst, b2,
startCharSecond, endCharSecond, keySpec)) != 0) {
return result;
}
}
return 0;
}
示例3: compareByteSequence
import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription; //导入依赖的package包/类
private int compareByteSequence(byte[] first, int start1, int end1,
byte[] second, int start2, int end2, KeyDescription key) {
if (start1 == -1) {
if (key.reverse) {
return 1;
}
return -1;
}
if (start2 == -1) {
if (key.reverse) {
return -1;
}
return 1;
}
int compareResult = 0;
if (!key.numeric) {
compareResult = compareBytes(first, start1, end1-start1 + 1, second,
start2, end2 - start2 + 1);
}
if (key.numeric) {
compareResult = numericalCompare (first, start1, end1, second, start2,
end2);
}
if (key.reverse) {
return -compareResult;
}
return compareResult;
}