本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getTrimmedStringCollection方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getTrimmedStringCollection方法的具体用法?Java Configuration.getTrimmedStringCollection怎么用?Java Configuration.getTrimmedStringCollection使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getTrimmedStringCollection方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyAndSetRMHAIdsList
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Verify configuration that there are at least two RM-ids
* and RPC addresses are specified for each RM-id.
* Then set the RM-ids.
*/
private static void verifyAndSetRMHAIdsList(Configuration conf) {
Collection<String> ids =
conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
if (ids.size() < 2) {
throwBadConfigurationException(
getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,
conf.get(YarnConfiguration.RM_HA_IDS) +
"\nHA mode requires atleast two RMs"));
}
StringBuilder setValue = new StringBuilder();
for (String id: ids) {
// verify the RM service addresses configurations for every RMIds
for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) {
checkAndSetRMRPCAddress(prefix, id, conf);
}
setValue.append(id);
setValue.append(",");
}
conf.set(YarnConfiguration.RM_HA_IDS,
setValue.substring(0, setValue.length() - 1));
}
示例2: verifyAndSetCurrentRMHAId
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static void verifyAndSetCurrentRMHAId(Configuration conf) {
String rmId = getRMHAId(conf);
if (rmId == null) {
StringBuilder msg = new StringBuilder();
msg.append("Can not find valid RM_HA_ID. None of ");
for (String id : conf
.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) {
msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " ");
}
msg.append(" are matching" +
" the local address OR " + YarnConfiguration.RM_HA_ID + " is not" +
" specified in HA Configuration");
throwBadConfigurationException(msg.toString());
} else {
Collection<String> ids = getRMHAIds(conf);
if (!ids.contains(rmId)) {
throwBadConfigurationException(
getRMHAIdNeedToBeIncludedMessage(ids.toString(), rmId));
}
}
conf.set(YarnConfiguration.RM_HA_ID, rmId);
}
示例3: getNNServiceRpcAddressesForCluster
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Returns list of InetSocketAddresses corresponding to the namenode
* that manages this cluster. Note this is to be used by datanodes to get
* the list of namenode addresses to talk to.
*
* Returns namenode address specifically configured for datanodes (using
* service ports), if found. If not, regular RPC address configured for other
* clients is returned.
*
* @param conf configuration
* @return list of InetSocketAddress
* @throws IOException on error
*/
public static Map<String, Map<String, InetSocketAddress>>
getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
// Use default address as fall back
String defaultAddress;
try {
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
Collection<String> parentNameServices = conf.getTrimmedStringCollection
(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
if (parentNameServices.isEmpty()) {
parentNameServices = conf.getTrimmedStringCollection
(DFSConfigKeys.DFS_NAMESERVICES);
} else {
// Ensure that the internal service is ineed in the list of all available
// nameservices.
Set<String> availableNameServices = Sets.newHashSet(conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
for (String nsId : parentNameServices) {
if (!availableNameServices.contains(nsId)) {
throw new IOException("Unknown nameservice: " + nsId);
}
}
}
Map<String, Map<String, InetSocketAddress>> addressList =
getAddressesForNsIds(conf, parentNameServices, defaultAddress,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
if (addressList.isEmpty()) {
throw new IOException("Incorrect configuration: namenode address "
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
+ DFS_NAMENODE_RPC_ADDRESS_KEY
+ " is not configured.");
}
return addressList;
}
示例4: getStorageDirs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static Collection<URI> getStorageDirs(Configuration conf,
String propertyName) {
Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
StartupOption startOpt = NameNode.getStartupOption(conf);
if(startOpt == StartupOption.IMPORT) {
// In case of IMPORT this will get rid of default directories
// but will retain directories specified in hdfs-site.xml
// When importing image from a checkpoint, the name-node can
// start with empty set of storage directories.
Configuration cE = new HdfsConfiguration(false);
cE.addResource("core-default.xml");
cE.addResource("core-site.xml");
cE.addResource("hdfs-default.xml");
Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
dirNames.removeAll(dirNames2);
if(dirNames.isEmpty())
LOG.warn("!!! WARNING !!!" +
"\n\tThe NameNode currently runs without persistent storage." +
"\n\tAny changes to the file system meta-data may be lost." +
"\n\tRecommended actions:" +
"\n\t\t- shutdown and restart NameNode with configured \""
+ propertyName + "\" in hdfs-site.xml;" +
"\n\t\t- use Backup Node as a persistent and up-to-date storage " +
"of the file system meta-data.");
} else if (dirNames.isEmpty()) {
dirNames = Collections.singletonList(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
}
return Util.stringCollectionAsURIs(dirNames);
}
示例5: getSharedEditsDirs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Returns edit directories that are shared between primary and secondary.
* @param conf configuration
* @return collection of edit directories from {@code conf}
*/
public static List<URI> getSharedEditsDirs(Configuration conf) {
// don't use getStorageDirs here, because we want an empty default
// rather than the dir in /tmp
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
return Util.stringCollectionAsURIs(dirNames);
}
示例6: getCheckpointDirs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Retrieve checkpoint dirs from configuration.
*
* @param conf the Configuration
* @param defaultValue a default value for the attribute, if null
* @return a Collection of URIs representing the values in
* dfs.namenode.checkpoint.dir configuration property
*/
static Collection<URI> getCheckpointDirs(Configuration conf,
String defaultValue) {
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
if (dirNames.size() == 0 && defaultValue != null) {
dirNames.add(defaultValue);
}
return Util.stringCollectionAsURIs(dirNames);
}
示例7: getCheckpointEditsDirs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static List<URI> getCheckpointEditsDirs(Configuration conf,
String defaultName) {
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
if (dirNames.size() == 0 && defaultName != null) {
dirNames.add(defaultName);
}
return Util.stringCollectionAsURIs(dirNames);
}
示例8: getStorageLocations
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static List<StorageLocation> getStorageLocations(Configuration conf) {
Collection<String> rawLocations =
conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
List<StorageLocation> locations =
new ArrayList<StorageLocation>(rawLocations.size());
for(String locationString : rawLocations) {
final StorageLocation location;
try {
location = StorageLocation.parse(locationString);
} catch (IOException ioe) {
LOG.error("Failed to initialize storage directory " + locationString
+ ". Exception details: " + ioe);
// Ignore the exception.
continue;
} catch (SecurityException se) {
LOG.error("Failed to initialize storage directory " + locationString
+ ". Exception details: " + se);
// Ignore the exception.
continue;
}
locations.add(location);
}
return locations;
}
示例9: isSameHdfs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* @param conf the Configuration of HBase
* @param srcFs
* @param desFs
* @return Whether srcFs and desFs are on same hdfs or not
*/
public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) {
// By getCanonicalServiceName, we could make sure both srcFs and desFs
// show a unified format which contains scheme, host and port.
String srcServiceName = srcFs.getCanonicalServiceName();
String desServiceName = desFs.getCanonicalServiceName();
if (srcServiceName == null || desServiceName == null) {
return false;
}
if (srcServiceName.equals(desServiceName)) {
return true;
}
if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) {
Collection<String> internalNameServices =
conf.getTrimmedStringCollection("dfs.internal.nameservices");
if (!internalNameServices.isEmpty()) {
if (internalNameServices.contains(srcServiceName.split(":")[1])) {
return true;
} else {
return false;
}
}
}
if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) {
//If one serviceName is an HA format while the other is a non-HA format,
// maybe they refer to the same FileSystem.
//For example, srcFs is "ha-hdfs://nameservices" and desFs is "hdfs://activeNamenode:port"
Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf);
Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf);
if (Sets.intersection(srcAddrs, desAddrs).size() > 0) {
return true;
}
}
return false;
}
示例10: getNameServiceIds
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Returns collection of nameservice Ids from the configuration.
* @param conf configuration
* @return collection of nameservice Ids, or null if not specified
*/
public static Collection<String> getNameServiceIds(Configuration conf) {
return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
}
示例11: getNameNodeIds
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Namenode HighAvailability related configuration.
* Returns collection of namenode Ids from the configuration. One logical id
* for each namenode in the in the HA setup.
*
* @param conf configuration
* @param nsId the nameservice ID to look at, or null for non-federated
* @return collection of namenode Ids
*/
public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
return conf.getTrimmedStringCollection(key);
}
示例12: getNameNodeIds
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Namenode HighAvailability related configuration.
* Returns collection of namenode Ids from the configuration. One logical id
* for each namenode in the in the HA setup.
*
* @param conf configuration
* @param nsId the nameservice ID to look at, or null for non-federated
* @return collection of namenode Ids
*/
public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
return conf.getTrimmedStringCollection(key);
}