本文整理汇总了Java中org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir方法的典型用法代码示例。如果您正苦于以下问题:Java JobSubmissionFiles.getStagingDir方法的具体用法?Java JobSubmissionFiles.getStagingDir怎么用?Java JobSubmissionFiles.getStagingDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.JobSubmissionFiles
的用法示例。
在下文中一共展示了JobSubmissionFiles.getStagingDir方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCleanup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test
public void testCleanup() throws Exception {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
DistCp distcp = new DistCp(conf, null);
String[] arg = { soure.toString(), target.toString() };
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
示例2: testCleanupTestViaToolRunner
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* test main method of DistCp. Method should to call System.exit().
*
*/
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
try {
String[] arg = {target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
} catch (ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status, 0);
Assert.assertEquals(
stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
}
}
示例3: testCleanup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
@Test(timeout=100000)
public void testCleanup() {
try {
Path sourcePath = new Path("noscheme:///file");
List<Path> sources = new ArrayList<Path>();
sources.add(sourcePath);
DistCpOptions options = new DistCpOptions(sources, target);
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf, options).execute();
} catch (Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).
listStatus(stagingDir).length, 0);
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
示例4: testCleanup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
@Test
public void testCleanup() {
try {
Path sourcePath = new Path("noscheme:///file");
List<Path> sources = new ArrayList<Path>();
sources.add(sourcePath);
DistCpOptions options = new DistCpOptions(sources, target);
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf, options).execute();
} catch (Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).
listStatus(stagingDir).length, 0);
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
示例5: createMetaFolderPath
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* Create a default working folder for the job, under the job staging directory
*
* @return Returns the working folder information
* @throws Exception - EXception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
LOG.debug("Meta folder location: {}", metaFolderPath);
configuration.set(S3MapReduceCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
示例6: createMetaFolderPath
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* Create a default working folder for the job, under the
* job staging directory
*
* @return Returns the working folder information
* @throws Exception - EXception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
if (LOG.isDebugEnabled())
LOG.debug("Meta folder location: " + metaFolderPath);
configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
示例7: createMetaFolderPath
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* Create a default working folder for the job, under the
* job staging directory
*
* @return Returns the working folder information
* @throws Exception - Exception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
if (LOG.isDebugEnabled())
LOG.debug("Meta folder location: " + metaFolderPath);
configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
示例8: setup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log)
throws IOException {
final String randomId = getRandomId();
JobClient jClient = new JobClient(jobconf);
Path stagingArea;
try {
stagingArea = JobSubmissionFiles.getStagingDir(
jClient.getClusterHandle(), jobconf);
} catch (InterruptedException ie){
throw new IOException(ie);
}
Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
LOG.info(JOB_DIR_LABEL + "=" + jobdir);
if (log == null) {
log = new Path(jobdir, "_logs");
}
FileOutputFormat.setOutputPath(jobconf, log);
LOG.info("log=" + log);
//create operation list
FileSystem fs = jobdir.getFileSystem(jobconf);
Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
jobconf.set(OP_LIST_LABEL, opList.toString());
int opCount = 0, synCount = 0;
try (SequenceFile.Writer opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
FileOperation.class, SequenceFile.CompressionType.NONE)) {
for(FileOperation op : ops) {
FileStatus srcstat = fs.getFileStatus(op.src);
if (srcstat.isDirectory() && op.isDifferent(srcstat)) {
++opCount;
opWriter.append(new Text(op.src.toString()), op);
}
Stack<Path> pathstack = new Stack<Path>();
for(pathstack.push(op.src); !pathstack.empty(); ) {
for(FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDirectory()) {
pathstack.push(stat.getPath());
}
if (op.isDifferent(stat)) {
++opCount;
if (++synCount > SYNC_FILE_MAX) {
opWriter.sync();
synCount = 0;
}
Path f = stat.getPath();
opWriter.append(new Text(f.toString()), new FileOperation(f, op));
}
}
}
}
}
checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
jobconf.setInt(OP_COUNT_LABEL, opCount);
LOG.info(OP_COUNT_LABEL + "=" + opCount);
jobconf.setNumMapTasks(getMapCount(opCount,
new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
return opCount != 0;
}
示例9: setup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log)
throws IOException {
final String randomId = getRandomId();
JobClient jClient = new JobClient(jobconf);
Path stagingArea;
try {
stagingArea = JobSubmissionFiles.getStagingDir(
jClient.getClusterHandle(), jobconf);
} catch (InterruptedException ie){
throw new IOException(ie);
}
Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
LOG.info(JOB_DIR_LABEL + "=" + jobdir);
if (log == null) {
log = new Path(jobdir, "_logs");
}
FileOutputFormat.setOutputPath(jobconf, log);
LOG.info("log=" + log);
//create operation list
FileSystem fs = jobdir.getFileSystem(jobconf);
Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
jobconf.set(OP_LIST_LABEL, opList.toString());
int opCount = 0, synCount = 0;
SequenceFile.Writer opWriter = null;
try {
opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
FileOperation.class, SequenceFile.CompressionType.NONE);
for(FileOperation op : ops) {
FileStatus srcstat = fs.getFileStatus(op.src);
if (srcstat.isDirectory() && op.isDifferent(srcstat)) {
++opCount;
opWriter.append(new Text(op.src.toString()), op);
}
Stack<Path> pathstack = new Stack<Path>();
for(pathstack.push(op.src); !pathstack.empty(); ) {
for(FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDirectory()) {
pathstack.push(stat.getPath());
}
if (op.isDifferent(stat)) {
++opCount;
if (++synCount > SYNC_FILE_MAX) {
opWriter.sync();
synCount = 0;
}
Path f = stat.getPath();
opWriter.append(new Text(f.toString()), new FileOperation(f, op));
}
}
}
}
} finally {
opWriter.close();
}
checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
jobconf.setInt(OP_COUNT_LABEL, opCount);
LOG.info(OP_COUNT_LABEL + "=" + opCount);
jobconf.setNumMapTasks(getMapCount(opCount,
new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
return opCount != 0;
}
示例10: setup
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log) throws IOException {
final String randomId = getRandomId();
JobClient jClient = new JobClient(jobconf);
Path stagingArea;
try {
stagingArea = JobSubmissionFiles.getStagingDir(
jClient, jobconf);
} catch (InterruptedException e) {
throw new IOException(e);
}
Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
LOG.info(JOB_DIR_LABEL + "=" + jobdir);
if (log == null) {
log = new Path(jobdir, "_logs");
}
FileOutputFormat.setOutputPath(jobconf, log);
LOG.info("log=" + log);
//create operation list
FileSystem fs = jobdir.getFileSystem(jobconf);
Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
jobconf.set(OP_LIST_LABEL, opList.toString());
int opCount = 0, synCount = 0;
SequenceFile.Writer opWriter = null;
try {
opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
FileOperation.class, SequenceFile.CompressionType.NONE);
for(FileOperation op : ops) {
FileStatus srcstat = fs.getFileStatus(op.src);
if (srcstat.isDir() && op.isDifferent(srcstat)) {
++opCount;
opWriter.append(new Text(op.src.toString()), op);
}
Stack<Path> pathstack = new Stack<Path>();
for(pathstack.push(op.src); !pathstack.empty(); ) {
for(FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDir()) {
pathstack.push(stat.getPath());
}
if (op.isDifferent(stat)) {
++opCount;
if (++synCount > SYNC_FILE_MAX) {
opWriter.sync();
synCount = 0;
}
Path f = stat.getPath();
opWriter.append(new Text(f.toString()), new FileOperation(f, op));
}
}
}
}
} finally {
opWriter.close();
}
checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
jobconf.setInt(OP_COUNT_LABEL, opCount);
LOG.info(OP_COUNT_LABEL + "=" + opCount);
jobconf.setNumMapTasks(getMapCount(opCount,
new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
return opCount != 0;
}
示例11: getStagingDir
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* Initializes the staging directory and returns the path.
*
* @param conf system configuration
* @return staging directory path
* @throws IOException
* @throws InterruptedException
*/
public static Path getStagingDir(Configuration conf)
throws IOException, InterruptedException {
return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
}
示例12: getStagingDir
import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* Initializes the staging directory and returns the path.
*
* @param conf system configuration
* @return staging directory path
* @throws IOException
* @throws InterruptedException
*/
public static Path getStagingDir(Configuration conf)
throws IOException, InterruptedException {
JobClient jobClient = new JobClient(new JobConf(conf));
return JobSubmissionFiles.getStagingDir(jobClient, conf);
}