當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.setIfUnset方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setIfUnset方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setIfUnset方法的具體用法?Java Configuration.setIfUnset怎麽用?Java Configuration.setIfUnset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setIfUnset方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setConfiguration

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Update the configuration for the {@link Constraint}; does not change the
 * order in which the constraint is run.
 * 
 * @param desc
 *          {@link HTableDescriptor} to update
 * @param clazz
 *          {@link Constraint} to update
 * @param configuration
 *          to update the {@link Constraint} with.
 * @throws IOException
 *           if the Constraint was not stored correctly
 * @throws IllegalArgumentException
 *           if the Constraint was not present on this table.
 */
public static void setConfiguration(HTableDescriptor desc,
    Class<? extends Constraint> clazz, Configuration configuration)
    throws IOException, IllegalArgumentException {
  // get the entry for this class
  Pair<String, String> e = getKeyValueForClass(desc, clazz);

  if (e == null) {
    throw new IllegalArgumentException("Constraint: " + clazz.getName()
        + " is not associated with this table.");
  }

  // clone over the configuration elements
  Configuration conf = new Configuration(configuration);

  // read in the previous info about the constraint
  Configuration internal = readConfiguration(e.getSecond());

  // update the fields based on the previous settings
  conf.setIfUnset(ENABLED_KEY, internal.get(ENABLED_KEY));
  conf.setIfUnset(PRIORITY_KEY, internal.get(PRIORITY_KEY));

  // update the current value
  writeConstraint(desc, e.getFirst(), conf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:Constraints.java

示例2: setConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void setConf(Configuration conf) {
  conf.setIfUnset(
    String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION),
    String.valueOf(DEFAULT_REGION_REPLICATION));

  conf.setIfUnset(
    String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES),
    StringUtils.join(",", DEFAULT_COLUMN_FAMILIES));

  conf.setBoolean("hbase.table.sanity.checks", true);

  // enable async wal replication to region replicas for unit tests
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);

  conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB
  conf.setInt("hbase.hstore.blockingStoreFiles", 100);

  super.setConf(conf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:IntegrationTestRegionReplicaReplication.java

示例3: propagateOptionsToJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void propagateOptionsToJob(Job job) {
  super.propagateOptionsToJob(job);
  SqoopOptions opts = context.getOptions();
  Configuration conf = job.getConfiguration();
  conf.setIfUnset("pgbulkload.bin", "pg_bulkload");
  if (opts.getNullStringValue() != null) {
    conf.set("pgbulkload.null.string", opts.getNullStringValue());
  }
  setDelimiter("pgbulkload.input.field.delim",
               opts.getInputFieldDelim(),
               conf);
  setDelimiter("pgbulkload.input.record.delim",
               opts.getInputRecordDelim(),
               conf);
  setDelimiter("pgbulkload.input.enclosedby",
               opts.getInputEnclosedBy(),
               conf);
  setDelimiter("pgbulkload.input.escapedby",
               opts.getInputEscapedBy(),
               conf);
  conf.setBoolean("pgbulkload.input.encloserequired",
                  opts.isInputEncloseRequired());
  conf.setIfUnset("pgbulkload.check.constraints", "YES");
  conf.setIfUnset("pgbulkload.parse.errors", "INFINITE");
  conf.setIfUnset("pgbulkload.duplicate.errors", "INFINITE");
  conf.set("mapred.jar", context.getJarFile());
  conf.setBoolean("mapred.map.tasks.speculative.execution", false);
  conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);
  conf.setInt("mapred.map.max.attempts", 1);
  conf.setInt("mapred.reduce.max.attempts", 1);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:33,代碼來源:PGBulkloadExportJob.java

示例4: setupDatanodeAddress

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
                         boolean checkDataNodeAddrConfig) throws IOException {
  if (setupHostsFile) {
    String hostsFile = conf.get(DFS_HOSTS, "").trim();
    if (hostsFile.length() == 0) {
      throw new IOException("Parameter dfs.hosts is not setup in conf");
    }
    // Setup datanode in the include file, if it is defined in the conf
    String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
    if (checkDataNodeAddrConfig) {
      conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
    } else {
      conf.set(DFS_DATANODE_ADDRESS_KEY, address);
    }
    addToFile(hostsFile, address);
    LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
  } else {
    if (checkDataNodeAddrConfig) {
      conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
    } else {
      conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
    }
  }
  if (checkDataNodeAddrConfig) {
    conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
  } else {
    conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:MiniDFSCluster.java

示例5: checkShortCircuitReadBufferSize

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Check if short circuit read buffer size is set and if not, set it to hbase value.
 * @param conf
 */
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
  final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
  final int notSet = -1;
  // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
  final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
  int size = conf.getInt(dfsKey, notSet);
  // If a size is set, return -- we will use it.
  if (size != notSet) return;
  // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
  int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
  conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:FSUtils.java

示例6: setConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  // default replication for this test is 3
  String clazz = this.getClass().getSimpleName();
  conf.setIfUnset(String.format("%s.%s", clazz, LoadTestTool.OPT_REGION_REPLICATION),
    Integer.toString(DEFAULT_REGION_REPLICATION));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:IntegrationTestTimeBoundedRequestsWithRegionReplicas.java

示例7: addHiveConfigs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Add hive conf to configuration object without overriding already set properties.
 * @param hiveConf
 * @param conf
 */
public static void addHiveConfigs(Configuration hiveConf, Configuration conf) {
  for (Map.Entry<String, String> item : hiveConf) {
    conf.setIfUnset(item.getKey(), item.getValue());
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:11,代碼來源:HiveConfig.java


注:本文中的org.apache.hadoop.conf.Configuration.setIfUnset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。