本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getClassByName方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getClassByName方法的具体用法?Java Configuration.getClassByName怎么用?Java Configuration.getClassByName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getClassByName方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSocketFactoryFromProperty
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Get the socket factory corresponding to the given proxy URI. If the
* given proxy URI corresponds to an absence of configuration parameter,
* returns null. If the URI is malformed raises an exception.
*
* @param propValue the property which is the class name of the
* SocketFactory to instantiate; assumed non null and non empty.
* @return a socket factory as defined in the property value.
*/
public static SocketFactory getSocketFactoryFromProperty(
Configuration conf, String propValue) {
try {
Class<?> theClass = conf.getClassByName(propValue);
return (SocketFactory) ReflectionUtils.newInstance(theClass, conf);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException("Socket Factory class not found: " + cnfe);
}
}
示例2: run
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public void run() {
Configuration conf = WorkerContext.get().getConf();
LOG.info("task " + taskId + " is running.");
try {
userTaskClass =
conf.getClassByName(conf.get(AngelConf.ANGEL_TASK_USER_TASKCLASS,
AngelConf.DEFAULT_ANGEL_TASK_USER_TASKCLASS));
LOG.info("userTaskClass = " + userTaskClass);
BaseTask userTask = newBaseTask(userTaskClass);
this.userTask = userTask;
runUser(userTask);
} catch (Throwable e) {
LOG.error("task runner error", e);
diagnostics.add("task runner error" + e.getMessage());
setState(TaskState.FAILED);
}
taskExit();
}
示例3: getCodec
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Given a codec name, instantiate the concrete implementation
* class that implements it.
* @throws com.cloudera.sqoop.io.UnsupportedCodecException if a codec cannot
* be found with the supplied name.
*/
public static CompressionCodec getCodec(String codecName,
Configuration conf) throws com.cloudera.sqoop.io.UnsupportedCodecException {
// Try standard Hadoop mechanism first
CompressionCodec codec = getCodecByName(codecName, conf);
if (codec != null) {
return codec;
}
// Fall back to Sqoop mechanism
String codecClassName = null;
try {
codecClassName = getCodecClassName(codecName);
if (null == codecClassName) {
return null;
}
Class<? extends CompressionCodec> codecClass =
(Class<? extends CompressionCodec>)
conf.getClassByName(codecClassName);
return (CompressionCodec) ReflectionUtils.newInstance(
codecClass, conf);
} catch (ClassNotFoundException cnfe) {
throw new com.cloudera.sqoop.io.UnsupportedCodecException(
"Cannot find codec class "
+ codecClassName + " for codec " + codecName);
}
}
示例4: getProtocolClass
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static Class<?> getProtocolClass(String protocolName, Configuration conf)
throws ClassNotFoundException {
Class<?> protocol = PROTOCOL_CACHE.get(protocolName);
if (protocol == null) {
protocol = conf.getClassByName(protocolName);
PROTOCOL_CACHE.put(protocolName, protocol);
}
return protocol;
}
示例5: getCodecClasses
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Get the list of codecs discovered via a Java ServiceLoader, or
* listed in the configuration. Codecs specified in configuration come
* later in the returned list, and are considered to override those
* from the ServiceLoader.
* @param conf the configuration to look in
* @return a list of the {@link CompressionCodec} classes
*/
public static List<Class<? extends CompressionCodec>> getCodecClasses(
Configuration conf) {
List<Class<? extends CompressionCodec>> result
= new ArrayList<Class<? extends CompressionCodec>>();
// Add codec classes discovered via service loading
synchronized (CODEC_PROVIDERS) {
// CODEC_PROVIDERS is a lazy collection. Synchronize so it is
// thread-safe. See HADOOP-8406.
for (CompressionCodec codec : CODEC_PROVIDERS) {
result.add(codec.getClass());
}
}
// Add codec classes from configuration
String codecsString = conf.get(
CommonConfigurationKeys.IO_COMPRESSION_CODECS_KEY);
if (codecsString != null) {
StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
while (codecSplit.hasMoreElements()) {
String codecSubstring = codecSplit.nextToken().trim();
if (codecSubstring.length() != 0) {
try {
Class<?> cls = conf.getClassByName(codecSubstring);
if (!CompressionCodec.class.isAssignableFrom(cls)) {
throw new IllegalArgumentException("Class " + codecSubstring +
" is not a CompressionCodec");
}
result.add(cls.asSubclass(CompressionCodec.class));
} catch (ClassNotFoundException ex) {
throw new IllegalArgumentException("Compression codec " +
codecSubstring + " not found.",
ex);
}
}
}
}
return result;
}
示例6: getClass
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** Return the class for a name. Default is {@link Class#forName(String)}.*/
public static synchronized Class<?> getClass(String name, Configuration conf
) throws IOException {
Class<?> writableClass = NAME_TO_CLASS.get(name);
if (writableClass != null)
return writableClass.asSubclass(Writable.class);
try {
return conf.getClassByName(name);
} catch (ClassNotFoundException e) {
IOException newE = new IOException("WritableName can't load class: " + name);
newE.initCause(e);
throw newE;
}
}
示例7: add
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void add(Configuration conf, String serializationName) {
try {
Class<? extends Serialization> serializionClass =
(Class<? extends Serialization>) conf.getClassByName(serializationName);
serializations.add((Serialization)
ReflectionUtils.newInstance(serializionClass, getConf()));
} catch (ClassNotFoundException e) {
LOG.warn("Serialization class not found: ", e);
}
}
示例8: add
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void add(Configuration conf, String serializationName) {
try {
Class<? extends Serialization> serializionClass =
(Class<? extends Serialization>) conf.getClassByName(serializationName);
serializations.add((Serialization)
ReflectionUtils.newInstance(serializionClass, getConf()));
} catch (ClassNotFoundException e) {
LOG.warn("Serilization class not found: " +
StringUtils.stringifyException(e));
}
}
示例9: getAccountKeyFromConfiguration
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@VisibleForTesting
public static String getAccountKeyFromConfiguration(String accountName,
Configuration conf) throws KeyProviderException {
String key = null;
String keyProviderClass = conf.get(KEY_ACCOUNT_KEYPROVIDER_PREFIX
+ accountName);
KeyProvider keyProvider = null;
if (keyProviderClass == null) {
// No key provider was provided so use the provided key as is.
keyProvider = new SimpleKeyProvider();
} else {
// create an instance of the key provider class and verify it
// implements KeyProvider
Object keyProviderObject = null;
try {
Class<?> clazz = conf.getClassByName(keyProviderClass);
keyProviderObject = clazz.newInstance();
} catch (Exception e) {
throw new KeyProviderException("Unable to load key provider class.", e);
}
if (!(keyProviderObject instanceof KeyProvider)) {
throw new KeyProviderException(keyProviderClass
+ " specified in config is not a valid KeyProvider class.");
}
keyProvider = (KeyProvider) keyProviderObject;
}
key = keyProvider.getStorageAccountKey(accountName, conf);
return key;
}
示例10: getScanLabelGenerators
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* @param conf The configuration to use
* @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in
* Configuration as comma separated list using key
* "hbase.regionserver.scan.visibility.label.generator.class"
* @throws IllegalArgumentException
* when any of the specified ScanLabelGenerator class can not be loaded.
*/
public static List<ScanLabelGenerator> getScanLabelGenerators(Configuration conf) {
// There can be n SLG specified as comma separated in conf
String slgClassesCommaSeparated = conf.get(VISIBILITY_LABEL_GENERATOR_CLASS);
// We have only System level SLGs now. The order of execution will be same as the order in the
// comma separated config value
List<ScanLabelGenerator> slgs = new ArrayList<ScanLabelGenerator>();
if (StringUtils.isNotEmpty(slgClassesCommaSeparated)) {
String[] slgClasses = slgClassesCommaSeparated.split(COMMA);
for (String slgClass : slgClasses) {
Class<? extends ScanLabelGenerator> slgKlass;
try {
slgKlass = (Class<? extends ScanLabelGenerator>) conf.getClassByName(slgClass.trim());
slgs.add(ReflectionUtils.newInstance(slgKlass, conf));
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Unable to find " + slgClass, e);
}
}
}
// If no SLG is specified in conf, by default we'll add two SLGs
// 1. FeedUserAuthScanLabelGenerator
// 2. DefinedSetFilterScanLabelGenerator
// This stacking will achieve the following default behavior:
// 1. If there is no Auths in the scan, we will obtain the global defined set for the user
// from the labels table.
// 2. If there is Auths in the scan, we will examine the passed in Auths and filter out the
// labels that the user is not entitled to. Then use the resulting label set.
if (slgs.isEmpty()) {
slgs.add(ReflectionUtils.newInstance(FeedUserAuthScanLabelGenerator.class, conf));
slgs.add(ReflectionUtils.newInstance(DefinedSetFilterScanLabelGenerator.class, conf));
}
return slgs;
}
示例11: getClassByName
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static Class getClassByName(Configuration conf, String className)
throws ClassNotFoundException {
if(conf != null) {
return conf.getClassByName(className);
}
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if(cl == null) {
cl = HbaseObjectWritableFor96Migration.class.getClassLoader();
}
return Class.forName(className, true, cl);
}
示例12: runTest
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void runTest(final JobClient jc, final Configuration conf,
final String jobClass, final String[] args, KillTaskThread killTaskThread,
KillTrackerThread killTrackerThread) throws Exception {
Thread t = new Thread("Job Test") {
public void run() {
try {
Class<?> jobClassObj = conf.getClassByName(jobClass);
int status = ToolRunner.run(conf, (Tool)(jobClassObj.newInstance()),
args);
checkJobExitStatus(status, jobClass);
} catch (Exception e) {
LOG.fatal("JOB " + jobClass + " failed to run");
System.exit(-1);
}
}
};
t.setDaemon(true);
t.start();
JobStatus[] jobs;
//get the job ID. This is the job that we just submitted
while ((jobs = jc.jobsToComplete()).length == 0) {
LOG.info("Waiting for the job " + jobClass +" to start");
Thread.sleep(1000);
}
JobID jobId = jobs[jobs.length - 1].getJobID();
RunningJob rJob = jc.getJob(jobId);
if(rJob.isComplete()) {
LOG.error("The last job returned by the querying JobTracker is complete :" +
rJob.getJobID() + " .Exiting the test");
System.exit(-1);
}
while (rJob.getJobState() == JobStatus.PREP) {
LOG.info("JobID : " + jobId + " not started RUNNING yet");
Thread.sleep(1000);
rJob = jc.getJob(jobId);
}
if (killTaskThread != null) {
killTaskThread.setRunningJob(rJob);
killTaskThread.start();
killTaskThread.join();
LOG.info("DONE WITH THE TASK KILL/FAIL TESTS");
}
if (killTrackerThread != null) {
killTrackerThread.setRunningJob(rJob);
killTrackerThread.start();
killTrackerThread.join();
LOG.info("DONE WITH THE TESTS TO DO WITH LOST TASKTRACKERS");
}
t.join();
}