本文整理汇总了Java中org.apache.hadoop.conf.Configuration.writeXml方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.writeXml方法的具体用法?Java Configuration.writeXml怎么用?Java Configuration.writeXml使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.writeXml方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testReadWriteWithDeprecatedKeys
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public void testReadWriteWithDeprecatedKeys() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("old.config.yet.to.be.deprecated", true);
Configuration.addDeprecation("old.config.yet.to.be.deprecated",
new String[]{"new.conf.to.replace.deprecated.conf"});
ByteArrayOutputStream out=new ByteArrayOutputStream();
String fileContents;
try {
conf.writeXml(out);
fileContents = out.toString();
} finally {
out.close();
}
assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
}
示例2: testGetHistoryIntermediateDoneDirForUser
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
// Test relative path
Configuration conf = new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
"/mapred/history/done_intermediate");
conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals("/mapred/history/done_intermediate/" +
System.getProperty("user.name"), pathStr);
// Test fully qualified path
// Create default configuration pointing to the minicluster
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// Simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals(dfsCluster.getURI().toString() +
"/mapred/history/done_intermediate/" + System.getProperty("user.name"),
pathStr);
}
示例3: writeConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void writeConf(Configuration conf, Path jobFile) throws IOException {
// Write job file to JobTracker's fs
FSDataOutputStream out =
FileSystem.create(jtFs, jobFile, new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
try {
conf.writeXml(out);
} finally {
out.close();
}
}
示例4: writeResponse
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Guts of the servlet - extracted for easy testing.
*/
static void writeResponse(Configuration conf, Writer out, String format)
throws IOException, BadFormatException {
if (FORMAT_JSON.equals(format)) {
Configuration.dumpConfiguration(conf, out);
} else if (FORMAT_XML.equals(format)) {
conf.writeXml(out);
} else {
throw new BadFormatException("Bad format: " + format);
}
}
示例5: writeConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static void writeConf(File file, String key, String value) throws IOException {
Configuration conf = new Configuration(false);
conf.set(key, value);
try (FileWriter out = new FileWriter(file)) {
conf.writeXml(out);
}
}
示例6: writeConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void writeConf(Configuration configuration, String hdfsConfOutPath) {
try {
configuration.writeXml(new FileWriter(hdfsConfOutPath));
} catch (IOException e) {
LOG.error("Error in writing configuration at " + hdfsConfOutPath);
}
}
示例7: submit
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public Job submit(Configuration conf, boolean mapSpeculative,
boolean reduceSpeculative) throws Exception {
String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation
.getCurrentUser().getShortUserName());
conf.set(MRJobConfig.USER_NAME, user);
conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
// TODO: fix the bug where the speculator gets events with
// not-fully-constructed objects. For now, disable speculative exec
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);
init(conf);
start();
DefaultMetricsSystem.shutdown();
Job job = getContext().getAllJobs().values().iterator().next();
if (assignedQueue != null) {
job.setQueueName(assignedQueue);
}
// Write job.xml
String jobFile = MRApps.getJobFile(conf, user,
TypeConverter.fromYarn(job.getID()));
LOG.info("Writing job conf to " + jobFile);
new File(jobFile).getParentFile().mkdirs();
conf.writeXml(new FileOutputStream(jobFile));
return job;
}
示例8: setUpConfigFile
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static void setUpConfigFile(Properties confProps, File configFile)
throws IOException {
Configuration config = new Configuration(false);
FileOutputStream fos = new FileOutputStream(configFile);
for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) {
String key = (String) e.nextElement();
config.set(key, confProps.getProperty(key));
}
config.writeXml(fos);
fos.close();
}
示例9: writeConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void writeConf(Configuration conf, Path jobFile)
throws IOException {
// Write job file to JobTracker's fs
FSDataOutputStream out =
FileSystem.create(jtFs, jobFile,
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
try {
conf.writeXml(out);
} finally {
out.close();
}
}
示例10: testCreateDirsWithAdditionalFileSystem
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testCreateDirsWithAdditionalFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
dfsCluster2.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());
// Set default configuration to the first cluster
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0), true);
// Directories should be created only in the default file system (dfsCluster)
Assert.assertTrue(dfsCluster.getFileSystem()
.exists(new Path(getDoneDirNameForTest())));
Assert.assertTrue(dfsCluster.getFileSystem()
.exists(new Path(getIntermediateDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem()
.exists(new Path(getDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem()
.exists(new Path(getIntermediateDoneDirNameForTest())));
}
示例11: saveConfig
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Saves configuration to a file.
*
* @param file File to save
* @param conf Configuration contents to write to file
* @throws IOException if there is an I/O error saving the file
*/
public static void saveConfig(File file, Configuration conf)
throws IOException {
Writer writer = new FileWriter(file);
try {
conf.writeXml(writer);
} finally {
writer.close();
}
}
示例12: serviceHadoopConfCustomDir
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
@TestDir
public void serviceHadoopConfCustomDir() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
new File(hadoopConfDir).mkdirs();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.config.dir", hadoopConfDir);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
Configuration hadoopConf = new Configuration(false);
hadoopConf.set("foo", "BAR");
hadoopConf.writeXml(os);
os.close();
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
server.destroy();
}
示例13: testWriteConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testWriteConf() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
System.out.println("Setting conf in: " + System.identityHashCode(conf));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = null;
OutputStream os = null;
try {
fs = cluster.getFileSystem();
Path filePath = new Path("/testWriteConf.xml");
os = fs.create(filePath);
StringBuilder longString = new StringBuilder();
for (int i = 0; i < 100000; i++) {
longString.append("hello");
} // 500KB
conf.set("foobar", longString.toString());
conf.writeXml(os);
os.close();
os = null;
fs.close();
fs = null;
} finally {
IOUtils.cleanup(null, os, fs);
cluster.shutdown();
}
}
示例14: initializeSSLConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Initialize SSL Config if same is set in conf
*
* @throws IOException - If any
*/
private void initializeSSLConf(Context context) throws IOException {
LOG.info("Initializing SSL configuration");
String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work";
Path[] cacheFiles = context.getLocalCacheFiles();
Configuration sslConfig = new Configuration(false);
String sslConfFileName = conf.get(DistCpConstants.CONF_LABEL_SSL_CONF);
Path sslClient = findCacheFile(cacheFiles, sslConfFileName);
if (sslClient == null) {
LOG.warn("SSL Client config file not found. Was looking for " + sslConfFileName +
" in " + Arrays.toString(cacheFiles));
return;
}
sslConfig.addResource(sslClient);
String trustStoreFile = conf.get("ssl.client.truststore.location");
Path trustStorePath = findCacheFile(cacheFiles, trustStoreFile);
sslConfig.set("ssl.client.truststore.location", trustStorePath.toString());
String keyStoreFile = conf.get("ssl.client.keystore.location");
Path keyStorePath = findCacheFile(cacheFiles, keyStoreFile);
sslConfig.set("ssl.client.keystore.location", keyStorePath.toString());
try {
OutputStream out = new FileOutputStream(workDir + "/" + sslConfFileName);
try {
sslConfig.writeXml(out);
} finally {
out.close();
}
conf.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfFileName);
} catch (IOException e) {
LOG.warn("Unable to write out the ssl configuration. " +
"Will fall back to default ssl-client.xml in class path, if there is one", e);
}
}
示例15: serializeConfiguration
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Write the configuration to a String
*
* @param conf
* to write
* @return String representation of that configuration
* @throws IOException
*/
private static String serializeConfiguration(Configuration conf)
throws IOException {
// write the configuration out to the data stream
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);
conf.writeXml(dos);
dos.flush();
byte[] data = bos.toByteArray();
return Bytes.toString(data);
}