本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getStringCollection方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getStringCollection方法的具体用法?Java Configuration.getStringCollection怎么用?Java Configuration.getStringCollection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getStringCollection方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getProviders
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static List<CredentialProvider> getProviders(Configuration conf
) throws IOException {
List<CredentialProvider> result = new ArrayList<CredentialProvider>();
for(String path: conf.getStringCollection(CREDENTIAL_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
boolean found = false;
for(CredentialProviderFactory factory: serviceLoader) {
CredentialProvider kp = factory.createProvider(uri, conf);
if (kp != null) {
result.add(kp);
found = true;
break;
}
}
if (!found) {
throw new IOException("No CredentialProviderFactory for " + uri + " in " +
CREDENTIAL_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + CREDENTIAL_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
示例2: getProviders
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static List<KeyPairProvider> getProviders(Configuration conf
) throws IOException {
List<KeyPairProvider> result = new ArrayList<>();
for(String path: conf.getStringCollection(KEY_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
KeyPairProvider kp = get(uri, conf);
if (kp != null) {
result.add(kp);
} else {
throw new IOException("No KeyPairProviderFactory for " + uri + " in " +
KEY_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + KEY_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
示例3: getProviders
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static List<KeyProvider> getProviders(Configuration conf
) throws IOException {
List<KeyProvider> result = new ArrayList<KeyProvider>();
for(String path: conf.getStringCollection(KEY_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
KeyProvider kp = get(uri, conf);
if (kp != null) {
result.add(kp);
} else {
throw new IOException("No KeyProviderFactory for " + uri + " in " +
KEY_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + KEY_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
示例4: getProviders
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static List<ReEncryptionKeyProviderInterface> getProviders(Configuration conf
) throws IOException {
List<ReEncryptionKeyProviderInterface> result = new ArrayList<>();
for(String path: conf.getStringCollection(KEY_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
ReEncryptionKeyProviderInterface kp = get(uri, conf);
if (kp != null) {
result.add(kp);
} else {
throw new IOException("No ReEncryptionKeyProviderFactory for " + uri + " in " +
KEY_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + KEY_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
示例5: buildDependencyClasspath
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Returns a classpath string built from the content of the "tmpjars" value in {@code conf}.
* Also exposed to shell scripts via `bin/hbase mapredcp`.
*/
public static String buildDependencyClasspath(Configuration conf) {
if (conf == null) {
throw new IllegalArgumentException("Must provide a configuration object.");
}
Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
if (paths.size() == 0) {
throw new IllegalArgumentException("Configuration contains no tmpjars.");
}
StringBuilder sb = new StringBuilder();
for (String s : paths) {
// entries can take the form 'file:/path/to/file.jar'.
int idx = s.indexOf(":");
if (idx != -1) s = s.substring(idx + 1);
if (sb.length() > 0) sb.append(File.pathSeparator);
sb.append(s);
}
return sb.toString();
}
示例6: initAuditLoggers
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private List<AuditLogger> initAuditLoggers(Configuration conf) {
// Initialize the custom access loggers if configured.
Collection<String> alClasses = conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
List<AuditLogger> auditLoggers = Lists.newArrayList();
if (alClasses != null && !alClasses.isEmpty()) {
for (String className : alClasses) {
try {
AuditLogger logger;
if (DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME.equals(className)) {
logger = new DefaultAuditLogger();
} else {
logger = (AuditLogger) Class.forName(className).newInstance();
}
logger.initialize(conf);
auditLoggers.add(logger);
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
// Make sure there is at least one logger installed.
if (auditLoggers.isEmpty()) {
auditLoggers.add(new DefaultAuditLogger());
}
// Add audit logger to calculate top users
if (topConf.isEnabled) {
topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
auditLoggers.add(new TopAuditLogger(topMetrics));
}
return Collections.unmodifiableList(auditLoggers);
}
示例7: init
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable) throws IOException {
Collection<String> operations = conf.getStringCollection(ALLOWED_OPERATIONS);
if (operations.isEmpty() || operations.contains(AllowedOperations.all.name())) {
doAppends = doSyncs = true;
} else if (operations.contains(AllowedOperations.none.name())) {
doAppends = doSyncs = false;
} else {
doAppends = operations.contains(AllowedOperations.append.name());
doSyncs = operations.contains(AllowedOperations.sync.name());
}
LOG.info("IOTestWriter initialized with appends " + (doAppends ? "enabled" : "disabled") +
" and syncs " + (doSyncs ? "enabled" : "disabled"));
super.init(fs, path, conf, overwritable);
}
示例8: IOTestWAL
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Create an edit log at the given <code>dir</code> location.
*
* You should never have to load an existing log. If there is a log at
* startup, it should have already been processed and deleted by the time the
* WAL object is started up.
*
* @param fs filesystem handle
* @param rootDir path to where logs and oldlogs
* @param logDir dir where wals are stored
* @param archiveDir dir where wals are archived
* @param conf configuration to use
* @param listeners Listeners on WAL events. Listeners passed here will
* be registered before we do anything else; e.g. the
* Constructor {@link #rollWriter()}.
* @param failIfWALExists If true IOException will be thrown if files related to this wal
* already exist.
* @param prefix should always be hostname and port in distributed env and
* it will be URL encoded before being used.
* If prefix is null, "wal" will be used
* @param suffix will be url encoded. null is treated as empty. non-empty must start with
* {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
* @throws IOException
*/
public IOTestWAL(final FileSystem fs, final Path rootDir, final String logDir,
final String archiveDir, final Configuration conf,
final List<WALActionsListener> listeners,
final boolean failIfWALExists, final String prefix, final String suffix)
throws IOException {
super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix);
Collection<String> operations = conf.getStringCollection(ALLOWED_OPERATIONS);
doFileRolls = operations.isEmpty() || operations.contains(AllowedOperations.all.name()) ||
operations.contains(AllowedOperations.fileroll.name());
initialized = true;
LOG.info("Initialized with file rolling " + (doFileRolls ? "enabled" : "disabled"));
}
示例9: getRMHAIds
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* @param conf Configuration. Please use getRMHAIds to check.
* @return RM Ids on success
*/
public static Collection<String> getRMHAIds(Configuration conf) {
return conf.getStringCollection(YarnConfiguration.RM_HA_IDS);
}