本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getStrings方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getStrings方法的具体用法?Java Configuration.getStrings怎么用?Java Configuration.getStrings使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getStrings方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SSLFactory
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Creates an SSLFactory.
*
* @param mode SSLFactory mode, client or server.
* @param conf Hadoop configuration from where the SSLFactory configuration
* will be read.
*/
public SSLFactory(Mode mode, Configuration conf) {
this.conf = conf;
if (mode == null) {
throw new IllegalArgumentException("mode cannot be NULL");
}
this.mode = mode;
requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
DEFAULT_SSL_REQUIRE_CLIENT_CERT);
Configuration sslConf = readSSLConfiguration(mode);
Class<? extends KeyStoresFactory> klass
= conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
FileBasedKeyStoresFactory.class, KeyStoresFactory.class);
keystoresFactory = ReflectionUtils.newInstance(klass, sslConf);
enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS,
DEFAULT_SSL_ENABLED_PROTOCOLS);
}
示例2: instantiateFactories
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Create the ManagerFactory instances that should populate
* the factories list.
*/
private void instantiateFactories(Configuration conf) {
loadManagersFromConfDir(conf);
String [] classNameArray =
conf.getStrings(FACTORY_CLASS_NAMES_KEY,
DEFAULT_FACTORY_CLASS_NAMES_ARR);
for (String className : classNameArray) {
try {
className = className.trim(); // Ignore leading/trailing whitespace.
ManagerFactory factory = ReflectionUtils.newInstance(
(Class<? extends ManagerFactory>)
conf.getClassByName(className), conf);
LOG.debug("Loaded manager factory: " + className);
factories.add(factory);
} catch (ClassNotFoundException cnfe) {
LOG.error("Could not load ManagerFactory " + className
+ " (not found)");
}
}
}
示例3: getKeyValues
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Retrieve a list of key value pairs from configuration, stored under the provided key
*
* @param conf configuration to retrieve kvps from
* @param key key under which the key values are stored
* @param delimiter character used to separate each kvp
* @return the list of kvps stored under key in conf, or null if the key isn't present.
* @see #setKeyValues(Configuration, String, Collection, char)
*/
public static List<Map.Entry<String, String>> getKeyValues(Configuration conf, String key,
char delimiter) {
String[] kvps = conf.getStrings(key);
if (kvps == null) {
return null;
}
List<Map.Entry<String, String>> rtn = Lists.newArrayList();
for (String kvp : kvps) {
String[] splitKvp = StringUtils.split(kvp, delimiter);
if (splitKvp.length != 2) {
throw new IllegalArgumentException(
"Expected key value pair for configuration key '" + key + "'" + " to be of form '<key>"
+ delimiter + "<value>; was " + kvp + " instead");
}
rtn.add(new AbstractMap.SimpleImmutableEntry<String, String>(splitKvp[0], splitKvp[1]));
}
return rtn;
}
示例4: getNodeHealthScriptRunner
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static NodeHealthScriptRunner getNodeHealthScriptRunner(Configuration conf) {
String nodeHealthScript =
conf.get(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_PATH);
if(!NodeHealthScriptRunner.shouldRun(nodeHealthScript)) {
LOG.info("Abey khali");
return null;
}
long nmCheckintervalTime = conf.getLong(
YarnConfiguration.NM_HEALTH_CHECK_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS);
long scriptTimeout = conf.getLong(
YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS,
YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS);
String[] scriptArgs = conf.getStrings(
YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_OPTS, new String[] {});
return new NodeHealthScriptRunner(nodeHealthScript,
nmCheckintervalTime, scriptTimeout, scriptArgs);
}
示例5: testDistCacheEmulator
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* test method configureDistCacheFiles
*
*/
@Test (timeout=2000)
public void testDistCacheEmulator() throws Exception {
Configuration conf = new Configuration();
configureDummyDistCacheFiles(conf);
File ws = new File("target" + File.separator + this.getClass().getName());
Path ioPath = new Path(ws.getAbsolutePath());
DistributedCacheEmulator dce = new DistributedCacheEmulator(conf, ioPath);
JobConf jobConf = new JobConf(conf);
jobConf.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
File fin=new File("src"+File.separator+"test"+File.separator+"resources"+File.separator+"data"+File.separator+"wordcount.json");
dce.init(fin.getAbsolutePath(), JobCreator.LOADJOB, true);
dce.configureDistCacheFiles(conf, jobConf);
String[] caches=conf.getStrings(MRJobConfig.CACHE_FILES);
String[] tmpfiles=conf.getStrings("tmpfiles");
// this method should fill caches AND tmpfiles from MRJobConfig.CACHE_FILES property
assertEquals(6, ((caches==null?0:caches.length)+(tmpfiles==null?0:tmpfiles.length)));
}
示例6: registerCustomFilter
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void registerCustomFilter(Configuration conf) {
String[] filterList = conf.getStrings(Constants.CUSTOM_FILTERS);
if (filterList != null) {
for (String filterClass : filterList) {
String[] filterPart = filterClass.split(":");
if (filterPart.length != 2) {
LOG.warn(
"Invalid filter specification " + filterClass + " - skipping");
} else {
ParseFilter.registerFilter(filterPart[0], filterPart[1]);
}
}
}
}
示例7: getSaslProperties
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static Map<String, String> getSaslProperties(Configuration conf) {
Map<String, String> saslProps =new TreeMap<String, String>();
String[] qop = conf.getStrings(HADOOP_RPC_PROTECTION_NON_WHITELIST,
QualityOfProtection.PRIVACY.toString());
for (int i=0; i < qop.length; i++) {
qop[i] = QualityOfProtection.valueOf(
StringUtils.toUpperCase(qop[i])).getSaslQop();
}
saslProps.put(Sasl.QOP, StringUtils.join(",", qop));
saslProps.put(Sasl.SERVER_AUTH, "true");
return saslProps;
}
示例8: getFileSizes
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static long[] getFileSizes(Configuration conf, String key) {
String[] strs = conf.getStrings(key);
if (strs == null) {
return null;
}
long[] result = new long[strs.length];
for (int i = 0; i < strs.length; ++i) {
result[i] = Long.parseLong(strs[i]);
}
return result;
}
示例9: setConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Configure the mapping by extracting any mappings defined in the
* {@link #KEY_HADOOP_CONFIGURED_NODE_MAPPING} field
* @param conf new configuration
*/
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
String[] mappings = conf.getStrings(KEY_HADOOP_CONFIGURED_NODE_MAPPING);
if (mappings != null) {
for (String str : mappings) {
String host = str.substring(0, str.indexOf('='));
String rack = str.substring(str.indexOf('=') + 1);
addNodeToRack(host, rack);
}
}
}
}
示例10: SerializationFactory
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* <p>
* Serializations are found by reading the <code>io.serializations</code>
* property from <code>conf</code>, which is a comma-delimited list of
* classnames.
* </p>
*/
public SerializationFactory(Configuration conf) {
super(conf);
for (String serializerName : conf.getStrings("io.serializations",
new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"})) {
add(conf, serializerName);
}
}
示例11: getFileSizes
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static long[] getFileSizes(Configuration conf, String key) {
String[] strs = conf.getStrings(key);
if (strs == null) {
return null;
}
long[] result = new long[strs.length];
for(int i=0; i < strs.length; ++i) {
result[i] = Long.parseLong(strs[i]);
}
return result;
}
示例12: registerFilters
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Adds the option to pre-load filters at startup.
*
* @param conf The current configuration instance.
*/
protected static void registerFilters(Configuration conf) {
String[] filters = conf.getStrings("hbase.thrift.filters");
if(filters != null) {
for(String filterClass: filters) {
String[] filterPart = filterClass.split(":");
if(filterPart.length != 2) {
log.warn("Invalid filter specification " + filterClass + " - skipping");
} else {
ParseFilter.registerFilter(filterPart[0], filterPart[1]);
}
}
}
}
示例13: getZKQuorumServersStringFromHbaseConfig
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Return the ZK Quorum servers string given the specified configuration
*
* @param conf
* @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(
conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT));
// Build the ZK quorum server string with "server:clientport" list, separated by ','
final String[] serverHosts =
conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
return buildZKQuorumServerString(serverHosts, defaultClientPort);
}
示例14: checkCodecs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Run test on configured codecs to make sure supporting libs are in place.
*
* @param c
* @throws IOException
*/
private static void checkCodecs(final Configuration c) throws IOException {
// check to see if the codec list is available:
String[] codecs = c.getStrings("hbase.regionserver.codecs", (String[]) null);
if (codecs == null) return;
for (String codec : codecs) {
if (!CompressionTest.testCompression(codec)) {
throw new IOException(
"Compression codec " + codec + " not supported, aborting RS construction");
}
}
}
示例15: testCannotCreatePageBlobByDefault
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testCannotCreatePageBlobByDefault() throws Exception {
// Verify that the page blob directory list configuration setting
// is not set in the default configuration.
Configuration conf = new Configuration();
String[] rawPageBlobDirs =
conf.getStrings(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES);
assertTrue(rawPageBlobDirs == null);
}