本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getClass方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getClass方法的具体用法?Java Configuration.getClass怎么用?Java Configuration.getClass使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getClass方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SSLFactory
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Creates an SSLFactory.
*
* @param mode SSLFactory mode, client or server.
* @param conf Hadoop configuration from where the SSLFactory configuration
* will be read.
*/
public SSLFactory(Mode mode, Configuration conf) {
this.conf = conf;
if (mode == null) {
throw new IllegalArgumentException("mode cannot be NULL");
}
this.mode = mode;
requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
DEFAULT_SSL_REQUIRE_CLIENT_CERT);
Configuration sslConf = readSSLConfiguration(mode);
Class<? extends KeyStoresFactory> klass
= conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
FileBasedKeyStoresFactory.class, KeyStoresFactory.class);
keystoresFactory = ReflectionUtils.newInstance(klass, sslConf);
enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS,
DEFAULT_SSL_ENABLED_PROTOCOLS);
}
示例2: getJournalClass
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Retrieve the implementation class for a Journal scheme.
* @param conf The configuration to retrieve the information from
* @param uriScheme The uri scheme to look up.
* @return the class of the journal implementation
* @throws IllegalArgumentException if no class is configured for uri
*/
static Class<? extends JournalManager> getJournalClass(Configuration conf,
String uriScheme) {
String key
= DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + "." + uriScheme;
Class <? extends JournalManager> clazz = null;
try {
clazz = conf.getClass(key, null, JournalManager.class);
} catch (RuntimeException re) {
throw new IllegalArgumentException(
"Invalid class specified for " + uriScheme, re);
}
if (clazz == null) {
LOG.warn("No class configured for " +uriScheme
+ ", " + key + " is empty");
throw new IllegalArgumentException(
"No class configured for " + uriScheme);
}
return clazz;
}
示例3: getFileSystemClass
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static Class<? extends FileSystem> getFileSystemClass(String scheme,
Configuration conf) throws IOException {
if (!FILE_SYSTEMS_LOADED) {
loadFileSystems();
}
Class<? extends FileSystem> clazz = null;
if (conf != null) {
clazz = (Class<? extends FileSystem>) conf.getClass("fs." + scheme + ".impl", null);
}
if (clazz == null) {
clazz = SERVICE_FILE_SYSTEMS.get(scheme);
}
if (clazz == null) {
throw new IOException("No FileSystem for scheme: " + scheme);
}
return clazz;
}
示例4: setConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
this.conf = conf;
final Class<? extends Random> klass = conf.getClass(
HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class,
Random.class);
try {
random = ReflectionUtils.newInstance(klass, conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Using " + klass.getName() + " as random number generator.");
}
} catch (Exception e) {
LOG.info("Unable to use " + klass.getName() + ". Falling back to " +
"Java SecureRandom.", e);
this.random = new SecureRandom();
}
}
示例5: addMapper
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Add mapper that reads and writes from/to the queue
*/
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<KeyValuePair<?, ?>> input,
ChainBlockingQueue<KeyValuePair<?, ?>> output,
TaskInputOutputContext context, int index) throws IOException,
InterruptedException {
Configuration conf = getConf(index);
Class<?> keyClass = conf.getClass(MAPPER_INPUT_KEY_CLASS, Object.class);
Class<?> valueClass = conf.getClass(MAPPER_INPUT_VALUE_CLASS, Object.class);
Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS, Object.class);
Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,
Object.class);
RecordReader rr = new ChainRecordReader(keyClass, valueClass, input, conf);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass, output,
conf);
MapRunner runner = new MapRunner(mappers.get(index), createMapContext(rr,
rw, context, getConf(index)), rr, rw);
threads.add(runner);
}
示例6: getInstance
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
LOG.warn("Could not find scheme for uri " +
fs.getUri() + ", default to hdfs");
scheme = "hdfs";
}
Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
return fsUtils;
}
示例7: getInstance
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Get an instance of the configured TrashPolicy based on the value
* of the configuration parameter fs.trash.classname.
*
* @param conf the configuration to be used
* @param fs the file system to be used
* @return an instance of TrashPolicy
*/
public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
throws IOException {
Class<? extends TrashPolicy> trashClass = conf.getClass(
"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs); // initialize TrashPolicy
return trash;
}
示例8: getLoadBalancer
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Create a loadbalancer from the given conf.
* @param conf
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {
// Create the balancer
Class<? extends LoadBalancer> balancerKlass =
conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
return ReflectionUtils.newInstance(balancerKlass, conf);
}
示例9: HPCApplicationMasterProtocolImpl
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public HPCApplicationMasterProtocolImpl(Configuration conf,
AllocatedContainersInfo containersInfo) {
Class<? extends HPCApplicationMaster> appMasterClass = conf.getClass(
HPCConfiguration.YARN_APPLICATION_HPC_APPLICATIONMASTER_CLASS,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_APPLICATIONMASTER_CLASS,
HPCApplicationMaster.class);
applicationMaster = ReflectionUtils.newInstance(appMasterClass, conf);
applicationMaster.setContainersInfo(containersInfo);
}
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:10,代码来源:HPCApplicationMasterProtocolImpl.java
示例10: HPCApplicationClientProtocolImpl
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public HPCApplicationClientProtocolImpl(Configuration conf) {
Class<? extends HPCApplicationClient> appClientClass = conf.getClass(
HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_CLASS,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_CLASS,
HPCApplicationClient.class);
appClient = ReflectionUtils.newInstance(appClientClass, conf);
}
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:8,代码来源:HPCApplicationClientProtocolImpl.java
示例11: importTable
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Allow the user to inject custom mapper, input, and output formats
* into the importTable() process.
*/
@Override
@SuppressWarnings("unchecked")
public void importTable(ImportJobContext context)
throws IOException, ImportException {
SqoopOptions options = context.getOptions();
Configuration conf = options.getConf();
Class<? extends Mapper> mapperClass = (Class<? extends Mapper>)
conf.getClass(MAPPER_KEY, Mapper.class);
Class<? extends InputFormat> ifClass = (Class<? extends InputFormat>)
conf.getClass(INPUT_FORMAT_KEY, TextInputFormat.class);
Class<? extends OutputFormat> ofClass = (Class<? extends OutputFormat>)
conf.getClass(OUTPUT_FORMAT_KEY, TextOutputFormat.class);
Class<? extends ImportJobBase> jobClass = (Class<? extends ImportJobBase>)
conf.getClass(IMPORT_JOB_KEY, ImportJobBase.class);
String tableName = context.getTableName();
// Instantiate the user's chosen ImportJobBase instance.
ImportJobBase importJob = ReflectionUtils.newInstance(jobClass, conf);
// And configure the dependencies to inject
importJob.setOptions(options);
importJob.setMapperClass(mapperClass);
importJob.setInputFormatClass(ifClass);
importJob.setOutputFormatClass(ofClass);
importJob.runImport(tableName, context.getJarFile(),
getSplitColumn(options, tableName), conf);
}
示例12: getCopyListing
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Public Factory method with which the appropriate CopyListing implementation may be retrieved.
* @param configuration The input configuration.
* @param credentials Credentials object on which the FS delegation tokens are cached
* @param options The input Options, to help choose the appropriate CopyListing Implementation.
* @return An instance of the appropriate CopyListing implementation.
* @throws java.io.IOException - Exception if any
*/
public static CopyListing getCopyListing(Configuration configuration,
Credentials credentials,
DistCpOptions options)
throws IOException {
if (options.shouldUseDiff()) {
return new GlobbedCopyListing(configuration, credentials);
}
String copyListingClassName = configuration.get(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, "");
Class<? extends CopyListing> copyListingClass;
try {
if (! copyListingClassName.isEmpty()) {
copyListingClass = configuration.getClass(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, GlobbedCopyListing.class,
CopyListing.class);
} else {
if (options.getSourceFileListing() == null) {
copyListingClass = GlobbedCopyListing.class;
} else {
copyListingClass = FileBasedCopyListing.class;
}
}
copyListingClassName = copyListingClass.getName();
Constructor<? extends CopyListing> constructor = copyListingClass.
getDeclaredConstructor(Configuration.class, Credentials.class);
return constructor.newInstance(configuration, credentials);
} catch (Exception e) {
throw new IOException("Unable to instantiate " + copyListingClassName, e);
}
}
示例13: getInputPathFilter
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static PathFilter getInputPathFilter(JobContext context) {
Configuration conf = context.getConfiguration();
Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null, PathFilter.class);
return (filterClass != null) ? (PathFilter) ReflectionUtils.newInstance(filterClass, conf)
: null;
}
示例14: getQueueClass
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static Class<? extends BlockingQueue<Call>> getQueueClass(
String prefix, Configuration conf) {
String name = prefix + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY;
Class<?> queueClass = conf.getClass(name, LinkedBlockingQueue.class);
return CallQueueManager.convertQueueClass(queueClass, Call.class);
}
示例15: getStrategy
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Returns the class that implements a copy strategy. Looks up the implementation for a particular strategy from
* s3mapreducecp-default.xml
*
* @param conf - Configuration object
* @param options - Handle to input options
* @return Class implementing the strategy specified in options.
*/
public static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options) {
String confLabel = "com.hotels.bdp.circustrain.s3mapreducecp."
+ options.getCopyStrategy().toLowerCase(Locale.getDefault())
+ ".strategy.impl";
return conf.getClass(confLabel, UniformSizeInputFormat.class, InputFormat.class);
}