本文整理匯總了Java中org.apache.hadoop.hbase.io.ImmutableBytesWritable.equals方法的典型用法代碼示例。如果您正苦於以下問題:Java ImmutableBytesWritable.equals方法的具體用法?Java ImmutableBytesWritable.equals怎麽用?Java ImmutableBytesWritable.equals使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.io.ImmutableBytesWritable
的用法示例。
在下文中一共展示了ImmutableBytesWritable.equals方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: finishBatchAndCompareHashes
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //導入方法依賴的package包/類
/**
* Finish the currently open hash batch.
* Compare the target hash to the given source hash.
* If they do not match, then sync the covered key range.
*/
private void finishBatchAndCompareHashes(Context context)
throws IOException, InterruptedException {
targetHasher.finishBatch();
context.getCounter(Counter.BATCHES).increment(1);
if (targetHasher.getBatchSize() == 0) {
context.getCounter(Counter.EMPTY_BATCHES).increment(1);
}
ImmutableBytesWritable targetHash = targetHasher.getBatchHash();
if (targetHash.equals(currentSourceHash)) {
context.getCounter(Counter.HASHES_MATCHED).increment(1);
} else {
context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1);
ImmutableBytesWritable stopRow = nextSourceKey == null
? new ImmutableBytesWritable(sourceTableHash.stopRow)
: nextSourceKey;
if (LOG.isDebugEnabled()) {
LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey())
+ " to " + toHex(stopRow)
+ " sourceHash: " + toHex(currentSourceHash)
+ " targetHash: " + toHex(targetHash));
}
syncRange(context, targetHasher.getBatchStartKey(), stopRow);
}
}
示例2: writePartitions
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //導入方法依賴的package包/類
/**
* Write out a {@link SequenceFile} that can be read by
* {@link TotalOrderPartitioner} that contains the split points in startKeys.
*/
@SuppressWarnings("deprecation")
private static void writePartitions(Configuration conf, Path partitionsPath,
List<ImmutableBytesWritable> startKeys) throws IOException {
LOG.info("Writing partition information to " + partitionsPath);
if (startKeys.isEmpty()) {
throw new IllegalArgumentException("No regions passed");
}
// We're generating a list of split points, and we don't ever
// have keys < the first region (which has an empty start key)
// so we need to remove it. Otherwise we would end up with an
// empty reducer with index 0
TreeSet<ImmutableBytesWritable> sorted =
new TreeSet<ImmutableBytesWritable>(startKeys);
ImmutableBytesWritable first = sorted.first();
if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
throw new IllegalArgumentException(
"First region of table should have empty start key. Instead has: "
+ Bytes.toStringBinary(first.get()));
}
sorted.remove(first);
// Write the actual file
FileSystem fs = partitionsPath.getFileSystem(conf);
SequenceFile.Writer writer = SequenceFile.createWriter(
fs, conf, partitionsPath, ImmutableBytesWritable.class,
NullWritable.class);
try {
for (ImmutableBytesWritable startKey : sorted) {
writer.append(startKey, NullWritable.get());
}
} finally {
writer.close();
}
}