本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getClass方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getClass方法的具體用法?Java Configuration.getClass怎麽用?Java Configuration.getClass使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getClass方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: SSLFactory
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Creates an SSLFactory.
*
* @param mode SSLFactory mode, client or server.
* @param conf Hadoop configuration from where the SSLFactory configuration
* will be read.
*/
public SSLFactory(Mode mode, Configuration conf) {
this.conf = conf;
if (mode == null) {
throw new IllegalArgumentException("mode cannot be NULL");
}
this.mode = mode;
requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
DEFAULT_SSL_REQUIRE_CLIENT_CERT);
Configuration sslConf = readSSLConfiguration(mode);
Class<? extends KeyStoresFactory> klass
= conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
FileBasedKeyStoresFactory.class, KeyStoresFactory.class);
keystoresFactory = ReflectionUtils.newInstance(klass, sslConf);
enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS,
DEFAULT_SSL_ENABLED_PROTOCOLS);
}
示例2: getJournalClass
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Retrieve the implementation class for a Journal scheme.
* @param conf The configuration to retrieve the information from
* @param uriScheme The uri scheme to look up.
* @return the class of the journal implementation
* @throws IllegalArgumentException if no class is configured for uri
*/
static Class<? extends JournalManager> getJournalClass(Configuration conf,
String uriScheme) {
String key
= DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + "." + uriScheme;
Class <? extends JournalManager> clazz = null;
try {
clazz = conf.getClass(key, null, JournalManager.class);
} catch (RuntimeException re) {
throw new IllegalArgumentException(
"Invalid class specified for " + uriScheme, re);
}
if (clazz == null) {
LOG.warn("No class configured for " +uriScheme
+ ", " + key + " is empty");
throw new IllegalArgumentException(
"No class configured for " + uriScheme);
}
return clazz;
}
示例3: getFileSystemClass
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static Class<? extends FileSystem> getFileSystemClass(String scheme,
Configuration conf) throws IOException {
if (!FILE_SYSTEMS_LOADED) {
loadFileSystems();
}
Class<? extends FileSystem> clazz = null;
if (conf != null) {
clazz = (Class<? extends FileSystem>) conf.getClass("fs." + scheme + ".impl", null);
}
if (clazz == null) {
clazz = SERVICE_FILE_SYSTEMS.get(scheme);
}
if (clazz == null) {
throw new IOException("No FileSystem for scheme: " + scheme);
}
return clazz;
}
示例4: setConf
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void setConf(Configuration conf) {
this.conf = conf;
final Class<? extends Random> klass = conf.getClass(
HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class,
Random.class);
try {
random = ReflectionUtils.newInstance(klass, conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Using " + klass.getName() + " as random number generator.");
}
} catch (Exception e) {
LOG.info("Unable to use " + klass.getName() + ". Falling back to " +
"Java SecureRandom.", e);
this.random = new SecureRandom();
}
}
示例5: addMapper
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Add mapper that reads and writes from/to the queue
*/
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<KeyValuePair<?, ?>> input,
ChainBlockingQueue<KeyValuePair<?, ?>> output,
TaskInputOutputContext context, int index) throws IOException,
InterruptedException {
Configuration conf = getConf(index);
Class<?> keyClass = conf.getClass(MAPPER_INPUT_KEY_CLASS, Object.class);
Class<?> valueClass = conf.getClass(MAPPER_INPUT_VALUE_CLASS, Object.class);
Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS, Object.class);
Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,
Object.class);
RecordReader rr = new ChainRecordReader(keyClass, valueClass, input, conf);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass, output,
conf);
MapRunner runner = new MapRunner(mappers.get(index), createMapContext(rr,
rw, context, getConf(index)), rr, rw);
threads.add(runner);
}
示例6: getInstance
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
LOG.warn("Could not find scheme for uri " +
fs.getUri() + ", default to hdfs");
scheme = "hdfs";
}
Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
return fsUtils;
}
示例7: getInstance
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Get an instance of the configured TrashPolicy based on the value
* of the configuration parameter fs.trash.classname.
*
* @param conf the configuration to be used
* @param fs the file system to be used
* @return an instance of TrashPolicy
*/
public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
throws IOException {
Class<? extends TrashPolicy> trashClass = conf.getClass(
"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs); // initialize TrashPolicy
return trash;
}
示例8: getLoadBalancer
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Create a loadbalancer from the given conf.
* @param conf
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {
// Create the balancer
Class<? extends LoadBalancer> balancerKlass =
conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
return ReflectionUtils.newInstance(balancerKlass, conf);
}
示例9: HPCApplicationMasterProtocolImpl
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public HPCApplicationMasterProtocolImpl(Configuration conf,
AllocatedContainersInfo containersInfo) {
Class<? extends HPCApplicationMaster> appMasterClass = conf.getClass(
HPCConfiguration.YARN_APPLICATION_HPC_APPLICATIONMASTER_CLASS,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_APPLICATIONMASTER_CLASS,
HPCApplicationMaster.class);
applicationMaster = ReflectionUtils.newInstance(appMasterClass, conf);
applicationMaster.setContainersInfo(containersInfo);
}
開發者ID:intel-hpdd,項目名稱:scheduling-connector-for-hadoop,代碼行數:10,代碼來源:HPCApplicationMasterProtocolImpl.java
示例10: HPCApplicationClientProtocolImpl
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public HPCApplicationClientProtocolImpl(Configuration conf) {
Class<? extends HPCApplicationClient> appClientClass = conf.getClass(
HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_CLASS,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_CLASS,
HPCApplicationClient.class);
appClient = ReflectionUtils.newInstance(appClientClass, conf);
}
開發者ID:intel-hpdd,項目名稱:scheduling-connector-for-hadoop,代碼行數:8,代碼來源:HPCApplicationClientProtocolImpl.java
示例11: importTable
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Allow the user to inject custom mapper, input, and output formats
* into the importTable() process.
*/
@Override
@SuppressWarnings("unchecked")
public void importTable(ImportJobContext context)
throws IOException, ImportException {
SqoopOptions options = context.getOptions();
Configuration conf = options.getConf();
Class<? extends Mapper> mapperClass = (Class<? extends Mapper>)
conf.getClass(MAPPER_KEY, Mapper.class);
Class<? extends InputFormat> ifClass = (Class<? extends InputFormat>)
conf.getClass(INPUT_FORMAT_KEY, TextInputFormat.class);
Class<? extends OutputFormat> ofClass = (Class<? extends OutputFormat>)
conf.getClass(OUTPUT_FORMAT_KEY, TextOutputFormat.class);
Class<? extends ImportJobBase> jobClass = (Class<? extends ImportJobBase>)
conf.getClass(IMPORT_JOB_KEY, ImportJobBase.class);
String tableName = context.getTableName();
// Instantiate the user's chosen ImportJobBase instance.
ImportJobBase importJob = ReflectionUtils.newInstance(jobClass, conf);
// And configure the dependencies to inject
importJob.setOptions(options);
importJob.setMapperClass(mapperClass);
importJob.setInputFormatClass(ifClass);
importJob.setOutputFormatClass(ofClass);
importJob.runImport(tableName, context.getJarFile(),
getSplitColumn(options, tableName), conf);
}
示例12: getCopyListing
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Public Factory method with which the appropriate CopyListing implementation may be retrieved.
* @param configuration The input configuration.
* @param credentials Credentials object on which the FS delegation tokens are cached
* @param options The input Options, to help choose the appropriate CopyListing Implementation.
* @return An instance of the appropriate CopyListing implementation.
* @throws java.io.IOException - Exception if any
*/
public static CopyListing getCopyListing(Configuration configuration,
Credentials credentials,
DistCpOptions options)
throws IOException {
if (options.shouldUseDiff()) {
return new GlobbedCopyListing(configuration, credentials);
}
String copyListingClassName = configuration.get(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, "");
Class<? extends CopyListing> copyListingClass;
try {
if (! copyListingClassName.isEmpty()) {
copyListingClass = configuration.getClass(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, GlobbedCopyListing.class,
CopyListing.class);
} else {
if (options.getSourceFileListing() == null) {
copyListingClass = GlobbedCopyListing.class;
} else {
copyListingClass = FileBasedCopyListing.class;
}
}
copyListingClassName = copyListingClass.getName();
Constructor<? extends CopyListing> constructor = copyListingClass.
getDeclaredConstructor(Configuration.class, Credentials.class);
return constructor.newInstance(configuration, credentials);
} catch (Exception e) {
throw new IOException("Unable to instantiate " + copyListingClassName, e);
}
}
示例13: getInputPathFilter
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static PathFilter getInputPathFilter(JobContext context) {
Configuration conf = context.getConfiguration();
Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null, PathFilter.class);
return (filterClass != null) ? (PathFilter) ReflectionUtils.newInstance(filterClass, conf)
: null;
}
示例14: getQueueClass
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
static Class<? extends BlockingQueue<Call>> getQueueClass(
String prefix, Configuration conf) {
String name = prefix + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY;
Class<?> queueClass = conf.getClass(name, LinkedBlockingQueue.class);
return CallQueueManager.convertQueueClass(queueClass, Call.class);
}
示例15: getStrategy
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Returns the class that implements a copy strategy. Looks up the implementation for a particular strategy from
* s3mapreducecp-default.xml
*
* @param conf - Configuration object
* @param options - Handle to input options
* @return Class implementing the strategy specified in options.
*/
public static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options) {
String confLabel = "com.hotels.bdp.circustrain.s3mapreducecp."
+ options.getCopyStrategy().toLowerCase(Locale.getDefault())
+ ".strategy.impl";
return conf.getClass(confLabel, UniformSizeInputFormat.class, InputFormat.class);
}