本文整理汇总了Java中org.apache.hadoop.util.ReflectionUtils.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java ReflectionUtils.newInstance方法的具体用法?Java ReflectionUtils.newInstance怎么用?Java ReflectionUtils.newInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.ReflectionUtils
的用法示例。
在下文中一共展示了ReflectionUtils.newInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCustomOffsets
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
public void testCustomOffsets() {
Configuration conf = new Configuration();
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
BinaryComparable key2 = new BytesWritable(new byte[] { 6, 2, 3, 7, 8 });
BinaryPartitioner.setOffsets(conf, 1, -3);
BinaryPartitioner<?> partitioner =
ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
int partition1 = partitioner.getPartition(key1, null, 10);
int partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
BinaryPartitioner.setOffsets(conf, 1, 2);
partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
BinaryPartitioner.setOffsets(conf, -4, -3);
partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
}
示例2: setup
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
this.conf = context.getConfiguration();
this.lobLoader = new LargeObjectLoader(this.conf, new Path( this.conf.get("sqoop.hbase.lob.extern.dir", "/tmp/sqoop-hbase-" + context.getTaskAttemptID())));
// Get the implementation of PutTransformer to use.
// By default, we call toString() on every non-null field.
Class<? extends PutTransformer> xformerClass =
(Class<? extends PutTransformer>)
this.conf.getClass(TRANSFORMER_CLASS_KEY, ToStringPutTransformer.class);
this.putTransformer = (PutTransformer)
ReflectionUtils.newInstance(xformerClass, this.conf);
if (null == putTransformer) {
throw new RuntimeException("Could not instantiate PutTransformer.");
}
this.putTransformer.setColumnFamily(conf.get(COL_FAMILY_KEY, null));
this.putTransformer.setRowKeyColumn(conf.get(ROW_KEY_COLUMN_KEY, null));
}
示例3: readFields
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
K key =
(K) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
key.readFields(in);
Writable value = (Writable) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
value.readFields(in);
instance.put(key, value);
}
}
示例4: configure
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Override // Mapper
public void configure(JobConf conf) {
super.configure(conf);
// grab compression
String compression = getConf().get("test.io.compression.class", null);
Class<? extends CompressionCodec> codec;
// try to initialize codec
try {
codec = (compression == null) ? null :
Class.forName(compression).asSubclass(CompressionCodec.class);
} catch(Exception e) {
throw new RuntimeException("Compression codec not found: ", e);
}
if(codec != null) {
compressionCodec = (CompressionCodec)
ReflectionUtils.newInstance(codec, getConf());
}
}
示例5: testWritable
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
, Configuration conf) throws Exception {
DataOutputBuffer dob = new DataOutputBuffer();
before.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), dob.getLength());
Writable after = (Writable)ReflectionUtils.newInstance(
before.getClass(), conf);
after.readFields(dib);
assertEquals(before, after);
return after;
}
示例6: readFields
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
// First clear the map. Otherwise we will just accumulate
// entries every time this method is called.
this.instance.clear();
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
byte[] key = Bytes.readByteArray(in);
byte id = in.readByte();
Class clazz = getClass(id);
V value = null;
if (clazz.equals(byte[].class)) {
byte[] bytes = Bytes.readByteArray(in);
value = (V) bytes;
} else {
Writable w = (Writable) ReflectionUtils.newInstance(clazz, getConf());
w.readFields(in);
value = (V) w;
}
this.instance.put(key, value);
}
}
示例7: writeBadOutput
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
throws Exception {
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attempt.getID()));
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key2, val2);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val2);
theRecordWriter.write(nullWritable, val1);
theRecordWriter.write(key1, nullWritable);
theRecordWriter.write(key2, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key1, val1);
} finally {
theRecordWriter.close(tContext);
}
OutputFormat outputFormat = ReflectionUtils.newInstance(
tContext.getOutputFormatClass(), conf);
OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
committer.commitTask(tContext);
}
示例8: writeToQueue
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void writeToQueue(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
this.keyout = (KEYOUT) ReflectionUtils.newInstance(keyClass, conf);
this.valueout = (VALUEOUT) ReflectionUtils.newInstance(valueClass, conf);
ReflectionUtils.copy(conf, key, this.keyout);
ReflectionUtils.copy(conf, value, this.valueout);
// wait to write output to queuue
outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(keyout, valueout));
}
示例9: combineAndSpill
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void combineAndSpill(
RawKeyValueIterator kvIter,
Counters.Counter inCounter) throws IOException {
JobConf job = jobConf;
Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
RawComparator<K> comparator =
(RawComparator<K>)job.getCombinerKeyGroupingComparator();
try {
CombineValuesIterator values = new CombineValuesIterator(
kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
inCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
Reporter.NULL);
values.nextKey();
}
} finally {
combiner.close();
}
}
示例10: newInstance
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Create a new instance of a class with a defined factory. */
public static Writable newInstance(Class<? extends Writable> c, Configuration conf) {
WritableFactory factory = WritableFactories.getFactory(c);
if (factory != null) {
Writable result = factory.newInstance();
if (result instanceof Configurable) {
((Configurable) result).setConf(conf);
}
return result;
} else {
return ReflectionUtils.newInstance(c, conf);
}
}
示例11: decodeIdentifier
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
* Get the token identifier object, or null if it could not be constructed
* (because the class could not be loaded, for example).
* @return the token identifier, or null
* @throws IOException
*/
@SuppressWarnings("unchecked")
public T decodeIdentifier() throws IOException {
Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
if (cls == null) {
return null;
}
TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
DataInputStream in = new DataInputStream(buf);
tokenIdentifier.readFields(in);
in.close();
return (T) tokenIdentifier;
}
示例12: createDistributedHBaseCluster
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
public void createDistributedHBaseCluster() throws IOException {
Configuration conf = getConfiguration();
Class<? extends ClusterManager> clusterManagerClass = conf.getClass(HBASE_CLUSTER_MANAGER_CLASS,
DEFAULT_HBASE_CLUSTER_MANAGER_CLASS, ClusterManager.class);
ClusterManager clusterManager = ReflectionUtils.newInstance(
clusterManagerClass, conf);
setHBaseCluster(new DistributedHBaseCluster(conf, clusterManager));
getHBaseAdmin();
}
示例13: createOutputReader
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
OutputReader createOutputReader(Class<? extends OutputReader> outputReaderClass)
throws IOException {
OutputReader outputReader =
ReflectionUtils.newInstance(outputReaderClass, job_);
outputReader.initialize(this);
return outputReader;
}
示例14: testThreeUnbalancedVolumes
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testThreeUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
// Third volume, again with 3MB free space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
// We should alternate assigning between the two volumes with a lot of free
// space.
initPolicy(policy, 1.0f);
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
// All writes should be assigned to the volume with the least free space.
initPolicy(policy, 0.0f);
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
}
示例15: testGzipCompatibility
import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Test
public void testGzipCompatibility() throws IOException {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
LOG.info("seed: " + seed);
DataOutputBuffer dflbuf = new DataOutputBuffer();
GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
r.nextBytes(b);
gzout.write(b);
gzout.close();
DataInputBuffer gzbuf = new DataInputBuffer();
gzbuf.reset(dflbuf.getData(), dflbuf.getLength());
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
Decompressor decom = codec.createDecompressor();
assertNotNull(decom);
assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
InputStream gzin = codec.createInputStream(gzbuf, decom);
dflbuf.reset();
IOUtils.copyBytes(gzin, dflbuf, 4096);
final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
assertArrayEquals(b, dflchk);
}