本文整理汇总了Java中org.apache.hadoop.hbase.io.ImmutableBytesWritable类的典型用法代码示例。如果您正苦于以下问题:Java ImmutableBytesWritable类的具体用法?Java ImmutableBytesWritable怎么用?Java ImmutableBytesWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ImmutableBytesWritable类属于org.apache.hadoop.hbase.io包,在下文中一共展示了ImmutableBytesWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<NullWritable,NullWritable> output,
Reporter reporter) throws IOException {
for (Cell cell : value.listCells()) {
reporter.getCounter(TestTableInputFormat.class.getName() + ":row",
Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()))
.increment(1l);
reporter.getCounter(TestTableInputFormat.class.getName() + ":family",
Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()))
.increment(1l);
reporter.getCounter(TestTableInputFormat.class.getName() + ":value",
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))
.increment(1l);
}
}
示例2: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
public void map(LongWritable key, SqoopRecord val, Context context)
throws IOException, InterruptedException {
try {
// Loading of LOBs was delayed until we have a Context.
val.loadLargeObjects(lobLoader);
} catch (SQLException sqlE) {
throw new IOException(sqlE);
}
Map<String, Object> fields = val.getFieldMap();
List<Put> putList = putTransformer.getPutCommand(fields);
for(Put put: putList){
context.write(new ImmutableBytesWritable(put.getRow()), put);
}
}
示例3: getPartition
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) {
byte[] region = null;
// Only one region return 0
if (this.startKeys.length == 1){
return 0;
}
try {
// Not sure if this is cached after a split so we could have problems
// here if a region splits while mapping
region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey();
} catch (IOException e) {
LOG.error(e);
}
for (int i = 0; i < this.startKeys.length; i++){
if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){
if (i >= numPartitions-1){
// cover if we have less reduces then regions.
return (Integer.toString(i).hashCode()
& Integer.MAX_VALUE) % numPartitions;
}
return i;
}
}
// if above fails to find start key that match we need to return something
return 0;
}
示例4: createSubmittableJob
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* @param args
* @return the JobConf
* @throws IOException
*/
public JobConf createSubmittableJob(String[] args) throws IOException {
JobConf c = new JobConf(getConf(), getClass());
c.setJobName(NAME);
// Columns are space delimited
StringBuilder sb = new StringBuilder();
final int columnoffset = 2;
for (int i = columnoffset; i < args.length; i++) {
if (i > columnoffset) {
sb.append(" ");
}
sb.append(args[i]);
}
// Second argument is the table name.
TableMapReduceUtil.initTableMapJob(args[1], sb.toString(),
RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c);
c.setNumReduceTasks(0);
// First arg is the output directory.
FileOutputFormat.setOutputPath(c, new Path(args[0]));
return c;
}
示例5: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Pass the key, and reversed value to reduce
*
* @param key
* @param value
* @param context
* @throws IOException
*/
public void map(ImmutableBytesWritable key, Result value,
Context context)
throws IOException, InterruptedException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
cf = value.getMap();
if(!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
// Get the original value and reverse it
String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, null));
StringBuilder newValue = new StringBuilder(originalValue);
newValue.reverse();
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
context.write(key, outval);
}
示例6: initTableReduceJob
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Use this before submitting a TableReduce job. It will
* appropriately set up the JobConf.
*
* @param table The output table.
* @param reducer The reducer class to use.
* @param job The current job configuration to adjust.
* @param partitioner Partitioner to use. Pass <code>null</code> to use
* default partitioner.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When determining the region count fails.
*/
public static void initTableReduceJob(String table,
Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
boolean addDependencyJars) throws IOException {
job.setOutputFormat(TableOutputFormat.class);
job.setReducerClass(reducer);
job.set(TableOutputFormat.OUTPUT_TABLE, table);
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Put.class);
job.setStrings("io.serializations", job.get("io.serializations"),
MutationSerialization.class.getName(), ResultSerialization.class.getName());
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
int regions =
MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
} else if (partitioner != null) {
job.setPartitionerClass(partitioner);
}
if (addDependencyJars) {
addDependencyJars(job);
}
initCredentials(job);
}
示例7: writeRandomKeyValues
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Write random values to the writer assuming a table created using
* {@link #FAMILIES} as column family descriptors
*/
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
TaskAttemptContext context, Set<byte[]> families, int numRows)
throws IOException, InterruptedException {
byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
int valLength = 10;
byte valBytes[] = new byte[valLength];
int taskId = context.getTaskAttemptID().getTaskID().getId();
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
final byte [] qualifier = Bytes.toBytes("data");
Random random = new Random();
for (int i = 0; i < numRows; i++) {
Bytes.putInt(keyBytes, 0, i);
random.nextBytes(valBytes);
ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
for (byte[] family : families) {
KeyValue kv = new KeyValue(keyBytes, family, qualifier, valBytes);
writer.write(key, kv);
}
}
}
示例8: next
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Read the next key/hash pair.
* Returns true if such a pair exists and false when at the end of the data.
*/
public boolean next() throws IOException {
if (cachedNext) {
cachedNext = false;
return true;
}
key = new ImmutableBytesWritable();
hash = new ImmutableBytesWritable();
while (true) {
boolean hasNext = mapFileReader.next(key, hash);
if (hasNext) {
return true;
}
hashFileIndex++;
if (hashFileIndex < TableHash.this.numHashFiles) {
mapFileReader.close();
openHashFile();
} else {
key = null;
hash = null;
return false;
}
}
}
示例9: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
protected void map(ImmutableBytesWritable rowKey, Result result, Context context)
throws IOException, InterruptedException {
for(java.util.Map.Entry<byte[], ImmutableBytesWritable> index : indexes.entrySet()) {
byte[] qualifier = index.getKey();
ImmutableBytesWritable tableName = index.getValue();
byte[] value = result.getValue(family, qualifier);
if (value != null) {
// original: row 123 attribute:phone 555-1212
// index: row 555-1212 INDEX:ROW 123
Put put = new Put(value);
put.add(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get());
context.write(tableName, put);
}
}
}
示例10: setup
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
Configuration configuration = context.getConfiguration();
String tableName = configuration.get("index.tablename");
String[] fields = configuration.getStrings("index.fields");
String familyName = configuration.get("index.familyname");
family = Bytes.toBytes(familyName);
indexes = new TreeMap<byte[], ImmutableBytesWritable>(Bytes.BYTES_COMPARATOR);
for(String field : fields) {
// if the table is "people" and the field to index is "email", then the
// index table will be called "people-email"
indexes.put(Bytes.toBytes(field),
new ImmutableBytesWritable(Bytes.toBytes(tableName + "-" + field)));
}
}
示例11: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
BytesWritable bwKey = new BytesWritable(key.get());
BytesWritable bwVal = new BytesWritable();
for (Cell kv : value.listCells()) {
if (Bytes.compareTo(TEST_QUALIFIER, 0, TEST_QUALIFIER.length,
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) {
context.write(bwKey, EMPTY);
} else {
bwVal.set(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
context.write(bwVal, bwKey);
}
}
}
示例12: map
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
try {
// first, finish any hash batches that end before the scanned row
while (nextSourceKey != null && key.compareTo(nextSourceKey) >= 0) {
moveToNextBatch(context);
}
// next, add the scanned row (as long as we've reached the first batch)
if (targetHasher.isBatchStarted()) {
targetHasher.hashResult(value);
}
} catch (Throwable t) {
mapperException = t;
Throwables.propagateIfInstanceOf(t, IOException.class);
Throwables.propagateIfInstanceOf(t, InterruptedException.class);
Throwables.propagate(t);
}
}
示例13: writeRandomKeyValues
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Write random values to the writer assuming a table created using
* {@link #FAMILIES} as column family descriptors
*/
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer,
TaskAttemptContext context, Set<byte[]> families, int numRows)
throws IOException, InterruptedException {
byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
int valLength = 10;
byte valBytes[] = new byte[valLength];
int taskId = context.getTaskAttemptID().getTaskID().getId();
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
final byte [] qualifier = Bytes.toBytes("data");
Random random = new Random();
for (int i = 0; i < numRows; i++) {
Bytes.putInt(keyBytes, 0, i);
random.nextBytes(valBytes);
ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
for (byte[] family : families) {
Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes);
writer.write(key, kv);
}
}
}
示例14: hasCoprocessor
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
/**
* Check if the table has an attached co-processor represented by the name className
*
* @param classNameToMatch - Class name of the co-processor
* @return true of the table has a co-processor className
*/
public boolean hasCoprocessor(String classNameToMatch) {
Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
this.values.entrySet()) {
keyMatcher =
HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
}
String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
if (className == null) continue;
if (className.equals(classNameToMatch.trim())) {
return true;
}
}
return false;
}
示例15: testWithMockedMapReduce
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; //导入依赖的package包/类
@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
int numRegions, int expectedNumSplits) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
try {
createTableAndSnapshot(
util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
JobConf job = new JobConf(util.getConfiguration());
Path tmpTableDir = util.getRandomDir();
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, false, tmpTableDir);
// mapred doesn't support start and end keys? o.O
verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
} finally {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}