本文整理汇总了Java中org.apache.hadoop.util.ReflectionUtils类的典型用法代码示例。如果您正苦于以下问题:Java ReflectionUtils类的具体用法?Java ReflectionUtils怎么用?Java ReflectionUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ReflectionUtils类属于org.apache.hadoop.util包,在下文中一共展示了ReflectionUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPossiblyCompressedOutputStream
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/**
* Returns a {@link OutputStream} for a file that might need
* compression.
*/
static OutputStream getPossiblyCompressedOutputStream(Path file,
Configuration conf)
throws IOException {
FileSystem fs = file.getFileSystem(conf);
JobConf jConf = new JobConf(conf);
if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
// get the codec class
Class<? extends CompressionCodec> codecClass =
org.apache.hadoop.mapred.FileOutputFormat
.getOutputCompressorClass(jConf,
GzipCodec.class);
// get the codec implementation
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
// add the appropriate extension
file = file.suffix(codec.getDefaultExtension());
if (isCompressionEmulationEnabled(conf)) {
FSDataOutputStream fileOut = fs.create(file, false);
return new DataOutputStream(codec.createOutputStream(fileOut));
}
}
return fs.create(file, false);
}
示例2: initReader
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
@SuppressWarnings({"rawtypes", "unchecked"})
public void initReader() throws IOException {
try {
Configuration conf = WorkerContext.get().getConf();
String inputFormatClassName =
conf.get(AngelConf.ANGEL_INPUTFORMAT_CLASS,
AngelConf.DEFAULT_ANGEL_INPUTFORMAT_CLASS);
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormatClass =
(Class<? extends org.apache.hadoop.mapred.InputFormat>) Class
.forName(inputFormatClassName);
org.apache.hadoop.mapred.InputFormat inputFormat =
ReflectionUtils.newInstance(inputFormatClass,
new JobConf(conf));
org.apache.hadoop.mapred.RecordReader<KEY, VALUE> recordReader =
inputFormat.getRecordReader(split, new JobConf(conf), Reporter.NULL);
setReader(new DFSReaderOldAPI(recordReader));
} catch (Exception x) {
LOG.error("init reader error ", x);
throw new IOException(x);
}
}
示例3: readFields
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/**
* {@inheritDoc}
* @throws IOException If the child InputSplit cannot be read, typically
* for faliing access checks.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
if (splits == null || splits.length != card) {
splits = new InputSplit[card];
}
Class<? extends InputSplit>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] =
Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
}
for (int i = 0; i < card; ++i) {
splits[i] = ReflectionUtils.newInstance(cls[i], null);
splits[i].readFields(in);
}
} catch (ClassNotFoundException e) {
throw (IOException)new IOException("Failed split init").initCause(e);
}
}
示例4: createScheduler
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
protected ResourceScheduler createScheduler() {
String schedulerClassName = conf.get(YarnConfiguration.RM_SCHEDULER,
YarnConfiguration.DEFAULT_RM_SCHEDULER);
LOG.info("Using Scheduler: " + schedulerClassName);
try {
Class<?> schedulerClazz = Class.forName(schedulerClassName);
if (ResourceScheduler.class.isAssignableFrom(schedulerClazz)) {
return (ResourceScheduler) ReflectionUtils.newInstance(schedulerClazz,
this.conf);
} else {
throw new YarnRuntimeException("Class: " + schedulerClassName
+ " not instance of " + ResourceScheduler.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate Scheduler: "
+ schedulerClassName, e);
}
}
示例5: getFilterInitializers
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for(int i = 0; i < classes.length; i++) {
initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
classes[i], conf);
}
return initializers;
}
示例6: NewOutputCollector
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
@SuppressWarnings("unchecked")
NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext,
JobConf job,
TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, ClassNotFoundException {
collector = createSortingCollector(job, reporter);
partitions = jobContext.getNumReduceTasks();
if (partitions > 1) {
partitioner = (org.apache.hadoop.mapreduce.Partitioner<K,V>)
ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job);
} else {
partitioner = new org.apache.hadoop.mapreduce.Partitioner<K,V>() {
@Override
public int getPartition(K key, V value, int numPartitions) {
return partitions - 1;
}
};
}
}
示例7: CompressionCodecFactory
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/**
* Find the codecs specified in the config value io.compression.codecs
* and register them. Defaults to gzip and deflate.
*/
public CompressionCodecFactory(Configuration conf) {
codecs = new TreeMap<String, CompressionCodec>();
codecsByClassName = new HashMap<String, CompressionCodec>();
codecsByName = new HashMap<String, CompressionCodec>();
List<Class<? extends CompressionCodec>> codecClasses =
getCodecClasses(conf);
if (codecClasses == null || codecClasses.isEmpty()) {
addCodec(new GzipCodec());
addCodec(new DefaultCodec());
} else {
for (Class<? extends CompressionCodec> codecClass : codecClasses) {
addCodec(ReflectionUtils.newInstance(codecClass, conf));
}
}
}
示例8: get
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/** Get a comparator for a {@link WritableComparable} implementation. */
public static WritableComparator get(
Class<? extends WritableComparable> c, Configuration conf) {
WritableComparator comparator = comparators.get(c);
if (comparator == null) {
// force the static initializers to run
forceInit(c);
// look to see if it is defined now
comparator = comparators.get(c);
// if not, use the generic one
if (comparator == null) {
comparator = new WritableComparator(c, conf, true);
}
}
// Newly passed Configuration objects should be used.
ReflectionUtils.setConf(comparator, conf);
return comparator;
}
示例9: makeCopyForPassByValue
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
E obj) throws IOException {
Serializer<E> ser =
serialization.getSerializer(GenericsUtil.getClass(obj));
Deserializer<E> deser =
serialization.getDeserializer(GenericsUtil.getClass(obj));
DataOutputBuffer dof = threadLocalDataOutputBuffer.get();
dof.reset();
ser.open(dof);
ser.serialize(obj);
ser.close();
obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
getChainJobConf());
ByteArrayInputStream bais =
new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
deser.open(bais);
deser.deserialize(obj);
deser.close();
return obj;
}
示例10: getKeyProvider
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
public static KeyProvider getKeyProvider(Configuration conf) {
String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
KeyStoreKeyProvider.class.getName());
String providerParameters = conf.get(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "");
try {
Pair<String,String> providerCacheKey = new Pair<String,String>(providerClassName,
providerParameters);
KeyProvider provider = keyProviderCache.get(providerCacheKey);
if (provider != null) {
return provider;
}
provider = (KeyProvider) ReflectionUtils.newInstance(
getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName),
conf);
provider.init(providerParameters);
if (LOG.isDebugEnabled()) {
LOG.debug("Installed " + providerClassName + " into key provider cache");
}
keyProviderCache.put(providerCacheKey, provider);
return provider;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例11: testWritable
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
, Configuration conf) throws Exception {
DataOutputBuffer dob = new DataOutputBuffer();
before.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), dob.getLength());
Writable after = (Writable)ReflectionUtils.newInstance(
before.getClass(), conf);
after.readFields(dib);
assertEquals(before, after);
return after;
}
示例12: createPlanFollower
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
private PlanFollower createPlanFollower() {
String planFollowerPolicyClassName =
conf.get(YarnConfiguration.RM_RESERVATION_SYSTEM_PLAN_FOLLOWER,
getDefaultPlanFollower());
if (planFollowerPolicyClassName == null) {
return null;
}
LOG.info("Using PlanFollowerPolicy: " + planFollowerPolicyClassName);
try {
Class<?> planFollowerPolicyClazz =
conf.getClassByName(planFollowerPolicyClassName);
if (PlanFollower.class.isAssignableFrom(planFollowerPolicyClazz)) {
return (PlanFollower) ReflectionUtils.newInstance(
planFollowerPolicyClazz, conf);
} else {
throw new YarnRuntimeException("Class: " + planFollowerPolicyClassName
+ " not instance of " + PlanFollower.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException(
"Could not instantiate PlanFollowerPolicy: "
+ planFollowerPolicyClassName, e);
}
}
示例13: processData
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
private void processData(byte[] buf) throws IOException, InterruptedException {
DataInputStream dis =
new DataInputStream(new ByteArrayInputStream(buf));
int id = dis.readInt(); // try to read an id
if (LOG.isDebugEnabled())
LOG.debug(" got #" + id);
Writable param = ReflectionUtils.newInstance(paramClass, conf);//read param
((RPC.Invocation)param).setConf(conf);
param.readFields(dis);
Call call = new Call(id, param, this);
//
callQueue.put(call); // queue the call; maybe blocked here
incRpcCount(); // Increment the rpc count
}
示例14: readFields
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
// First clear the map. Otherwise we will just accumulate
// entries every time this method is called.
this.instance.clear();
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
byte[] key = Bytes.readByteArray(in);
byte id = in.readByte();
Class clazz = getClass(id);
V value = null;
if (clazz.equals(byte[].class)) {
byte[] bytes = Bytes.readByteArray(in);
value = (V) bytes;
} else {
Writable w = (Writable) ReflectionUtils.newInstance(clazz, getConf());
w.readFields(in);
value = (V) w;
}
this.instance.put(key, value);
}
}
示例15: createSCMStoreService
import org.apache.hadoop.util.ReflectionUtils; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static SCMStore createSCMStoreService(Configuration conf) {
Class<? extends SCMStore> defaultStoreClass;
try {
defaultStoreClass =
(Class<? extends SCMStore>) Class
.forName(YarnConfiguration.DEFAULT_SCM_STORE_CLASS);
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default scm store class"
+ YarnConfiguration.DEFAULT_SCM_STORE_CLASS, e);
}
SCMStore store =
ReflectionUtils.newInstance(conf.getClass(
YarnConfiguration.SCM_STORE_CLASS,
defaultStoreClass, SCMStore.class), conf);
return store;
}