当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.setIfUnset方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.setIfUnset方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.setIfUnset方法的具体用法?Java Configuration.setIfUnset怎么用?Java Configuration.setIfUnset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setIfUnset方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setConfiguration

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Update the configuration for the {@link Constraint}; does not change the
 * order in which the constraint is run.
 * 
 * @param desc
 *          {@link HTableDescriptor} to update
 * @param clazz
 *          {@link Constraint} to update
 * @param configuration
 *          to update the {@link Constraint} with.
 * @throws IOException
 *           if the Constraint was not stored correctly
 * @throws IllegalArgumentException
 *           if the Constraint was not present on this table.
 */
public static void setConfiguration(HTableDescriptor desc,
    Class<? extends Constraint> clazz, Configuration configuration)
    throws IOException, IllegalArgumentException {
  // get the entry for this class
  Pair<String, String> e = getKeyValueForClass(desc, clazz);

  if (e == null) {
    throw new IllegalArgumentException("Constraint: " + clazz.getName()
        + " is not associated with this table.");
  }

  // clone over the configuration elements
  Configuration conf = new Configuration(configuration);

  // read in the previous info about the constraint
  Configuration internal = readConfiguration(e.getSecond());

  // update the fields based on the previous settings
  conf.setIfUnset(ENABLED_KEY, internal.get(ENABLED_KEY));
  conf.setIfUnset(PRIORITY_KEY, internal.get(PRIORITY_KEY));

  // update the current value
  writeConstraint(desc, e.getFirst(), conf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:Constraints.java

示例2: setConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
  conf.setIfUnset(
    String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION),
    String.valueOf(DEFAULT_REGION_REPLICATION));

  conf.setIfUnset(
    String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES),
    StringUtils.join(",", DEFAULT_COLUMN_FAMILIES));

  conf.setBoolean("hbase.table.sanity.checks", true);

  // enable async wal replication to region replicas for unit tests
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);

  conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB
  conf.setInt("hbase.hstore.blockingStoreFiles", 100);

  super.setConf(conf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:IntegrationTestRegionReplicaReplication.java

示例3: propagateOptionsToJob

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected void propagateOptionsToJob(Job job) {
  super.propagateOptionsToJob(job);
  SqoopOptions opts = context.getOptions();
  Configuration conf = job.getConfiguration();
  conf.setIfUnset("pgbulkload.bin", "pg_bulkload");
  if (opts.getNullStringValue() != null) {
    conf.set("pgbulkload.null.string", opts.getNullStringValue());
  }
  setDelimiter("pgbulkload.input.field.delim",
               opts.getInputFieldDelim(),
               conf);
  setDelimiter("pgbulkload.input.record.delim",
               opts.getInputRecordDelim(),
               conf);
  setDelimiter("pgbulkload.input.enclosedby",
               opts.getInputEnclosedBy(),
               conf);
  setDelimiter("pgbulkload.input.escapedby",
               opts.getInputEscapedBy(),
               conf);
  conf.setBoolean("pgbulkload.input.encloserequired",
                  opts.isInputEncloseRequired());
  conf.setIfUnset("pgbulkload.check.constraints", "YES");
  conf.setIfUnset("pgbulkload.parse.errors", "INFINITE");
  conf.setIfUnset("pgbulkload.duplicate.errors", "INFINITE");
  conf.set("mapred.jar", context.getJarFile());
  conf.setBoolean("mapred.map.tasks.speculative.execution", false);
  conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);
  conf.setInt("mapred.map.max.attempts", 1);
  conf.setInt("mapred.reduce.max.attempts", 1);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:33,代码来源:PGBulkloadExportJob.java

示例4: setupDatanodeAddress

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
                         boolean checkDataNodeAddrConfig) throws IOException {
  if (setupHostsFile) {
    String hostsFile = conf.get(DFS_HOSTS, "").trim();
    if (hostsFile.length() == 0) {
      throw new IOException("Parameter dfs.hosts is not setup in conf");
    }
    // Setup datanode in the include file, if it is defined in the conf
    String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
    if (checkDataNodeAddrConfig) {
      conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
    } else {
      conf.set(DFS_DATANODE_ADDRESS_KEY, address);
    }
    addToFile(hostsFile, address);
    LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
  } else {
    if (checkDataNodeAddrConfig) {
      conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
    } else {
      conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
    }
  }
  if (checkDataNodeAddrConfig) {
    conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
  } else {
    conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:MiniDFSCluster.java

示例5: checkShortCircuitReadBufferSize

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Check if short circuit read buffer size is set and if not, set it to hbase value.
 * @param conf
 */
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
  final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
  final int notSet = -1;
  // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
  final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
  int size = conf.getInt(dfsKey, notSet);
  // If a size is set, return -- we will use it.
  if (size != notSet) return;
  // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
  int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
  conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:FSUtils.java

示例6: setConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  // default replication for this test is 3
  String clazz = this.getClass().getSimpleName();
  conf.setIfUnset(String.format("%s.%s", clazz, LoadTestTool.OPT_REGION_REPLICATION),
    Integer.toString(DEFAULT_REGION_REPLICATION));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:IntegrationTestTimeBoundedRequestsWithRegionReplicas.java

示例7: addHiveConfigs

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Add hive conf to configuration object without overriding already set properties.
 * @param hiveConf
 * @param conf
 */
public static void addHiveConfigs(Configuration hiveConf, Configuration conf) {
  for (Map.Entry<String, String> item : hiveConf) {
    conf.setIfUnset(item.getKey(), item.getValue());
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:11,代码来源:HiveConfig.java


注:本文中的org.apache.hadoop.conf.Configuration.setIfUnset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。