本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getResource方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getResource方法的具體用法?Java Configuration.getResource怎麽用?Java Configuration.getResource使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getResource方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getConfiguration
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
static Configuration getConfiguration(String jobTrackerSpec)
{
Configuration conf = new Configuration();
if (jobTrackerSpec != null) {
if (jobTrackerSpec.indexOf(":") >= 0) {
conf.set("mapred.job.tracker", jobTrackerSpec);
} else {
String classpathFile = "hadoop-" + jobTrackerSpec + ".xml";
URL validate = conf.getResource(classpathFile);
if (validate == null) {
throw new RuntimeException(classpathFile + " not found on CLASSPATH");
}
conf.addResource(classpathFile);
}
}
return conf;
}
示例2: addNewConfigResource
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void addNewConfigResource(String rsrcName, String keyGroup,
String groups, String keyHosts, String hosts)
throws FileNotFoundException, UnsupportedEncodingException {
// location for temp resource should be in CLASSPATH
Configuration conf = new Configuration();
URL url = conf.getResource("hdfs-site.xml");
String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8");
Path p = new Path(urlPath);
Path dir = p.getParent();
tempResource = dir.toString() + "/" + rsrcName;
String newResource =
"<configuration>"+
"<property><name>" + keyGroup + "</name><value>"+groups+"</value></property>" +
"<property><name>" + keyHosts + "</name><value>"+hosts+"</value></property>" +
"</configuration>";
PrintWriter writer = new PrintWriter(new FileOutputStream(tempResource));
writer.println(newResource);
writer.close();
Configuration.addDefaultResource(rsrcName);
}
示例3: call
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void call(JavaPairRDD<K,M> newData, Time timestamp)
throws IOException, InterruptedException {
if (newData.isEmpty()) {
log.info("No data in current generation's RDD; nothing to do");
return;
}
log.info("Beginning update at {}", timestamp);
Configuration hadoopConf = sparkContext.hadoopConfiguration();
if (hadoopConf.getResource("core-site.xml") == null) {
log.warn("Hadoop config like core-site.xml was not found; " +
"is the Hadoop config directory on the classpath?");
}
JavaPairRDD<K,M> pastData;
Path inputPathPattern = new Path(dataDirString + "/*/part-*");
FileSystem fs = FileSystem.get(inputPathPattern.toUri(), hadoopConf);
FileStatus[] inputPathStatuses = fs.globStatus(inputPathPattern);
if (inputPathStatuses == null || inputPathStatuses.length == 0) {
log.info("No past data at path(s) {}", inputPathPattern);
pastData = null;
} else {
log.info("Found past data at path(s) like {}", inputPathStatuses[0].getPath());
Configuration updatedConf = new Configuration(hadoopConf);
updatedConf.set(FileInputFormat.INPUT_DIR, joinFSPaths(fs, inputPathStatuses));
@SuppressWarnings("unchecked")
JavaPairRDD<Writable,Writable> pastWritableData = (JavaPairRDD<Writable,Writable>)
sparkContext.newAPIHadoopRDD(updatedConf,
SequenceFileInputFormat.class,
keyWritableClass,
messageWritableClass);
pastData = pastWritableData.mapToPair(
new WritableToValueFunction<>(keyClass,
messageClass,
keyWritableClass,
messageWritableClass));
}
if (updateTopic == null || updateBroker == null) {
log.info("Not producing updates to update topic since none was configured");
updateInstance.runUpdate(sparkContext,
timestamp.milliseconds(),
newData,
pastData,
modelDirString,
null);
} else {
// This TopicProducer should not be async; sends one big model generally and
// needs to occur before other updates reliably rather than be buffered
try (TopicProducer<String,U> producer =
new TopicProducerImpl<>(updateBroker, updateTopic, false)) {
updateInstance.runUpdate(sparkContext,
timestamp.milliseconds(),
newData,
pastData,
modelDirString,
producer);
}
}
}