本文整理汇总了Java中org.apache.hadoop.conf.Configuration.setIfUnset方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.setIfUnset方法的具体用法?Java Configuration.setIfUnset怎么用?Java Configuration.setIfUnset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.setIfUnset方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setConfiguration
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Update the configuration for the {@link Constraint}; does not change the
* order in which the constraint is run.
*
* @param desc
* {@link HTableDescriptor} to update
* @param clazz
* {@link Constraint} to update
* @param configuration
* to update the {@link Constraint} with.
* @throws IOException
* if the Constraint was not stored correctly
* @throws IllegalArgumentException
* if the Constraint was not present on this table.
*/
public static void setConfiguration(HTableDescriptor desc,
Class<? extends Constraint> clazz, Configuration configuration)
throws IOException, IllegalArgumentException {
// get the entry for this class
Pair<String, String> e = getKeyValueForClass(desc, clazz);
if (e == null) {
throw new IllegalArgumentException("Constraint: " + clazz.getName()
+ " is not associated with this table.");
}
// clone over the configuration elements
Configuration conf = new Configuration(configuration);
// read in the previous info about the constraint
Configuration internal = readConfiguration(e.getSecond());
// update the fields based on the previous settings
conf.setIfUnset(ENABLED_KEY, internal.get(ENABLED_KEY));
conf.setIfUnset(PRIORITY_KEY, internal.get(PRIORITY_KEY));
// update the current value
writeConstraint(desc, e.getFirst(), conf);
}
示例2: setConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION),
String.valueOf(DEFAULT_REGION_REPLICATION));
conf.setIfUnset(
String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES),
StringUtils.join(",", DEFAULT_COLUMN_FAMILIES));
conf.setBoolean("hbase.table.sanity.checks", true);
// enable async wal replication to region replicas for unit tests
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB
conf.setInt("hbase.hstore.blockingStoreFiles", 100);
super.setConf(conf);
}
示例3: propagateOptionsToJob
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected void propagateOptionsToJob(Job job) {
super.propagateOptionsToJob(job);
SqoopOptions opts = context.getOptions();
Configuration conf = job.getConfiguration();
conf.setIfUnset("pgbulkload.bin", "pg_bulkload");
if (opts.getNullStringValue() != null) {
conf.set("pgbulkload.null.string", opts.getNullStringValue());
}
setDelimiter("pgbulkload.input.field.delim",
opts.getInputFieldDelim(),
conf);
setDelimiter("pgbulkload.input.record.delim",
opts.getInputRecordDelim(),
conf);
setDelimiter("pgbulkload.input.enclosedby",
opts.getInputEnclosedBy(),
conf);
setDelimiter("pgbulkload.input.escapedby",
opts.getInputEscapedBy(),
conf);
conf.setBoolean("pgbulkload.input.encloserequired",
opts.isInputEncloseRequired());
conf.setIfUnset("pgbulkload.check.constraints", "YES");
conf.setIfUnset("pgbulkload.parse.errors", "INFINITE");
conf.setIfUnset("pgbulkload.duplicate.errors", "INFINITE");
conf.set("mapred.jar", context.getJarFile());
conf.setBoolean("mapred.map.tasks.speculative.execution", false);
conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);
conf.setInt("mapred.map.max.attempts", 1);
conf.setInt("mapred.reduce.max.attempts", 1);
}
示例4: setupDatanodeAddress
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
if (setupHostsFile) {
String hostsFile = conf.get(DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) {
throw new IOException("Parameter dfs.hosts is not setup in conf");
}
// Setup datanode in the include file, if it is defined in the conf
String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
} else {
conf.set(DFS_DATANODE_ADDRESS_KEY, address);
}
addToFile(hostsFile, address);
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
} else {
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
} else {
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
}
}
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} else {
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
}
}
示例5: checkShortCircuitReadBufferSize
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
* @param conf
*/
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
final int notSet = -1;
// DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
int size = conf.getInt(dfsKey, notSet);
// If a size is set, return -- we will use it.
if (size != notSet) return;
// But short circuit buffer size is normally not set. Put in place the hbase wanted size.
int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
示例6: setConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
// default replication for this test is 3
String clazz = this.getClass().getSimpleName();
conf.setIfUnset(String.format("%s.%s", clazz, LoadTestTool.OPT_REGION_REPLICATION),
Integer.toString(DEFAULT_REGION_REPLICATION));
}
示例7: addHiveConfigs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Add hive conf to configuration object without overriding already set properties.
* @param hiveConf
* @param conf
*/
public static void addHiveConfigs(Configuration hiveConf, Configuration conf) {
for (Map.Entry<String, String> item : hiveConf) {
conf.setIfUnset(item.getKey(), item.getValue());
}
}