本文整理汇总了Java中org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite方法的典型用法代码示例。如果您正苦于以下问题:Java LocalDirAllocator.getLocalPathForWrite方法的具体用法?Java LocalDirAllocator.getLocalPathForWrite怎么用?Java LocalDirAllocator.getLocalPathForWrite使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.LocalDirAllocator
的用法示例。
在下文中一共展示了LocalDirAllocator.getLocalPathForWrite方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupChildTaskConfiguration
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* Write the child's configuration to the disk and set it in configuration so
* that the child can pick it up from there.
*
* @param lDirAlloc
* @throws IOException
*/
void setupChildTaskConfiguration(LocalDirAllocator lDirAlloc)
throws IOException {
Path localTaskFile =
lDirAlloc.getLocalPathForWrite(TaskTracker.getTaskConfFile(
t.getUser(), t.getJobID().toString(), t.getTaskID().toString(), t
.isTaskCleanupTask()), conf);
// write the child's task configuration file to the local disk
JobLocalizer.writeLocalJobFile(localTaskFile, conf);
// Set the final job file in the task. The child needs to know the correct
// path to job.xml. So set this path accordingly.
t.setJobFile(localTaskFile.toString());
}
示例2: localizeTask
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
static void localizeTask(Task task, JobConf jobConf, String logLocation)
throws IOException{
// Do the task-type specific localization
task.localizeConfiguration(jobConf);
// Write files required to re-run the task with IsolationRunner
if (task.supportIsolationRunner(jobConf)) {
task.writeFilesRequiredForRerun(jobConf);
}
//write the localized task jobconf
LocalDirAllocator lDirAlloc =
new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
Path localTaskFile =
lDirAlloc.getLocalPathForWrite(TaskTracker.JOBFILE, jobConf);
JobLocalizer.writeLocalJobFile(localTaskFile, jobConf);
task.setJobFile(localTaskFile.toString());
task.setConf(jobConf);
}
示例3: setupChildTaskConfiguration
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* Write the child's configuration to the disk and set it in configuration so
* that the child can pick it up from there.
*
* @param lDirAlloc
* @throws IOException
*/
void setupChildTaskConfiguration(LocalDirAllocator lDirAlloc)
throws IOException {
Path localTaskFile =
lDirAlloc.getLocalPathForWrite(TaskTracker.getTaskConfFile(
t.getUser(), t.getJobID().toString(), t.getTaskID().toString(), t
.isTaskCleanupTask()), conf);
// write the child's task configuration file to the local disk
writeLocalTaskFile(localTaskFile.toString(), conf);
// Set the final job file in the task. The child needs to know the correct
// path to job.xml. So set this path accordingly.
t.setJobFile(localTaskFile.toString());
}
示例4: testUniqueDestinationPath
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testUniqueDestinationPath() throws Exception {
Configuration conf = new Configuration();
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
ExecutorService singleThreadedExec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), conf);
destPath =
new Path(destPath, Long.toString(uniqueNumberGenerator
.incrementAndGet()));
Path p = new Path(basedir, "dir" + 0 + ".jar");
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
LocalResource rsrc = createJar(files, p, vis);
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
Future<Path> rPath = singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(rPath.isDone());
// Now FSDownload will not create a random directory to localize the
// resource. Therefore the final localizedPath for the resource should be
// destination directory (passed as an argument) + file name.
Assert.assertEquals(destPath, rPath.get().getParent());
}
示例5: initializeJob
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* This routine initializes the local file system for running a job.
* Details:
* <ul>
* <li>Copies the credentials file from the TaskTracker's private space to
* the job's private space </li>
* <li>Creates the job work directory and set
* {@link TaskTracker#JOB_LOCAL_DIR} in the configuration</li>
* <li>Downloads the job.jar, unjars it, and updates the configuration to
* reflect the localized path of the job.jar</li>
* <li>Creates a base JobConf in the job's private space</li>
* <li>Sets up the distributed cache</li>
* <li>Sets up the user logs directory for the job</li>
* </ul>
* This method must be invoked in the access control context of the job owner
* user. This is because the distributed cache is also setup here and the
* access to the hdfs files requires authentication tokens in case where
* security is enabled.
* @param user the user in question (the job owner)
* @param jobid the ID of the job in question
* @param credentials the path to the credentials file that the TaskTracker
* downloaded
* @param jobConf the path to the job configuration file that the TaskTracker
* downloaded
* @param taskTracker the connection to the task tracker
* @throws IOException
* @throws InterruptedException
*/
@Override
public void initializeJob(String user, String jobid,
Path credentials, Path jobConf,
TaskUmbilicalProtocol taskTracker,
InetSocketAddress ttAddr
) throws IOException, InterruptedException {
final LocalDirAllocator lDirAlloc = allocator;
FileSystem localFs = FileSystem.getLocal(getConf());
JobLocalizer localizer = new JobLocalizer((JobConf)getConf(), user, jobid);
localizer.createLocalDirs();
localizer.createUserDirs();
localizer.createJobDirs();
JobConf jConf = new JobConf(jobConf);
localizer.createWorkDir(jConf);
//copy the credential file
Path localJobTokenFile = lDirAlloc.getLocalPathForWrite(
TaskTracker.getLocalJobTokenFile(user, jobid), getConf());
FileUtil.copy(
localFs, credentials, localFs, localJobTokenFile, false, getConf());
//setup the user logs dir
localizer.initializeJobLogDir();
// Download the job.jar for this job from the system FS
// setup the distributed cache
// write job acls
// write localized config
localizer.localizeJobFiles(JobID.forName(jobid), jConf, localJobTokenFile,
taskTracker);
}
示例6: localizeTask
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
static void localizeTask(Task task, JobConf jobConf, String logLocation)
throws IOException{
// Do the task-type specific localization
task.localizeConfiguration(jobConf);
//write the localized task jobconf
LocalDirAllocator lDirAlloc =
new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
Path localTaskFile =
lDirAlloc.getLocalPathForWrite(TaskTracker.JOBFILE, jobConf);
JobLocalizer.writeLocalJobFile(localTaskFile, jobConf);
task.setJobFile(localTaskFile.toString());
task.setConf(jobConf);
}
示例7: MapOutput
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, long size,
JobConf conf, LocalDirAllocator localDirAllocator,
int fetcher, boolean primaryMapOutput) throws IOException {
this.id = ID.incrementAndGet();
this.mapId = mapId;
this.merger = merger;
type = Type.DISK;
memory = null;
byteStream = null;
this.size = size;
this.localFS = FileSystem.getLocal(conf);
String filename = "map_" + mapId.getTaskID().getId() + ".out";
String tmpOutput = Path.SEPARATOR +
TaskTracker.getJobCacheSubdir(conf.getUser()) +
Path.SEPARATOR + mapId.getJobID() +
Path.SEPARATOR + merger.getReduceId() +
Path.SEPARATOR + "output" +
Path.SEPARATOR + filename +
"." + fetcher;
tmpOutputPath =
localDirAllocator.getLocalPathForWrite(tmpOutput, size, conf);
outputPath = new Path(tmpOutputPath.getParent(), filename);
disk = localFS.create(tmpOutputPath);
this.primaryMapOutput = primaryMapOutput;
}
示例8: testDownloadBadPublic
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testDownloadBadPublic() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource,Future<Path>> pending =
new HashMap<LocalResource,Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
int size = 512;
LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
Path path = new Path(basedir, "test-file");
LocalResource rsrc = createFile(files, path, size, rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
} catch (ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
示例9: downloadWithFileType
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
private void downloadWithFileType(TEST_FILE_TYPE fileType) throws IOException,
URISyntaxException, InterruptedException{
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
String strFileName = "";
LocalResource rsrc = null;
switch (fileType) {
case TAR:
rsrc = createTarFile(files, p, size, rand, vis);
break;
case JAR:
rsrc = createJarFile(files, p, size, rand, vis);
rsrc.setType(LocalResourceType.PATTERN);
break;
case ZIP:
rsrc = createZipFile(files, p, size, rand, vis);
strFileName = p.getName() + ".ZIP";
break;
case TGZ:
rsrc = createTgzFile(files, p, size, rand, vis);
break;
}
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
try {
pending.get(rsrc).get(); // see if there was an Exception during download
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if(strFileName.endsWith(".ZIP") &&
childfile.getPath().getName().equals(strFileName) &&
!childfile.isDirectory()) {
Assert.fail("Failure...After unzip, there should have been a" +
" directory formed with zip file name but found a file. "
+ childfile.getPath());
}
if (childfile.getPath().getName().startsWith("tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
示例10: downloadWithFileType
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
private void downloadWithFileType(TEST_FILE_TYPE fileType) throws IOException,
URISyntaxException, InterruptedException{
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
LocalResource rsrc = null;
switch (fileType) {
case TAR:
rsrc = createTarFile(files, p, size, rand, vis);
break;
case JAR:
rsrc = createJarFile(files, p, size, rand, vis);
rsrc.setType(LocalResourceType.PATTERN);
break;
case ZIP:
rsrc = createZipFile(files, p, size, rand, vis);
break;
case TGZ:
rsrc = createTgzFile(files, p, size, rand, vis);
break;
}
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
try {
pending.get(rsrc).get(); // see if there was an Exception during download
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if (childfile.getPath().getName().startsWith("tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
示例11: run
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* Main method.
*/
boolean run(String[] args)
throws ClassNotFoundException, IOException, InterruptedException {
if (args.length < 1) {
System.out.println("Usage: IsolationRunner <path>/job.xml " +
"<optional-user-name>");
return false;
}
File jobFilename = new File(args[0]);
if (!jobFilename.exists() || !jobFilename.isFile()) {
System.out.println(jobFilename + " is not a valid job file.");
return false;
}
String user;
if (args.length > 1) {
user = args[1];
} else {
user = UserGroupInformation.getCurrentUser().getShortUserName();
}
JobConf conf = new JobConf(new Path(jobFilename.toString()));
conf.setUser(user);
TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id"));
if (taskId == null) {
System.out.println("mapred.task.id not found in configuration;" +
" job.xml is not a task config");
}
boolean isMap = conf.getBoolean("mapred.task.is.map", true);
if (!isMap) {
System.out.println("Only map tasks are supported.");
return false;
}
int partition = conf.getInt("mapred.task.partition", 0);
// setup the local and user working directories
FileSystem local = FileSystem.getLocal(conf);
LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
Path workDirName;
boolean workDirExists = lDirAlloc.ifExists(MRConstants.WORKDIR, conf);
if (workDirExists) {
workDirName = TaskRunner.formWorkDir(lDirAlloc, conf);
} else {
workDirName = lDirAlloc.getLocalPathForWrite(MRConstants.WORKDIR,
conf);
}
local.setWorkingDirectory(new Path(workDirName.toString()));
FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory());
// set up a classloader with the right classpath
ClassLoader classLoader =
makeClassLoader(conf, new File(workDirName.toString()));
Thread.currentThread().setContextClassLoader(classLoader);
conf.setClassLoader(classLoader);
// split.dta file is used only by IsolationRunner. The file can now be in
// any of the configured local disks, so use LocalDirAllocator to find out
// where it is.
Path localMetaSplit =
new LocalDirAllocator("mapred.local.dir").getLocalPathToRead(
TaskTracker.getLocalSplitFile(conf.getUser(), taskId.getJobID()
.toString(), taskId.toString()), conf);
DataInputStream splitFile = FileSystem.getLocal(conf).open(localMetaSplit);
TaskSplitIndex splitIndex = new TaskSplitIndex();
splitIndex.readFields(splitFile);
splitFile.close();
Task task =
new MapTask(jobFilename.toString(), taskId, partition, splitIndex, 1);
task.setConf(conf);
task.run(conf, new FakeUmbilical());
return true;
}
示例12: truncateLogsAsUser
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
throws IOException {
Task firstTask = allAttempts.get(0);
String taskid = firstTask.getTaskID().toString();
LocalDirAllocator ldirAlloc =
new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
String taskRanFile = TaskTracker.TT_LOG_TMP_DIR + Path.SEPARATOR + taskid;
Configuration conf = getConf();
//write the serialized task information to a file to pass to the truncater
Path taskRanFilePath =
ldirAlloc.getLocalPathForWrite(taskRanFile, conf);
LocalFileSystem lfs = FileSystem.getLocal(conf);
FSDataOutputStream out = lfs.create(taskRanFilePath);
out.writeInt(allAttempts.size());
for (Task t : allAttempts) {
out.writeBoolean(t.isMapTask());
t.write(out);
}
out.close();
lfs.setPermission(taskRanFilePath,
FsPermission.createImmutable((short)0755));
List<String> command = new ArrayList<String>();
File jvm = // use same jvm as parent
new File(new File(System.getProperty("java.home"), "bin"), "java");
command.add(jvm.toString());
command.add("-Djava.library.path=" +
System.getProperty("java.library.path"));
command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir());
command.add("-Dhadoop.root.logger=INFO,console");
command.add("-classpath");
command.add(System.getProperty("java.class.path"));
// main of TaskLogsTruncater
command.add(TaskLogsTruncater.class.getName());
command.add(taskRanFilePath.toString());
String[] taskControllerCmd = new String[4 + command.size()];
taskControllerCmd[0] = taskControllerExe;
taskControllerCmd[1] = user;
taskControllerCmd[2] = localStorage.getDirsString();
taskControllerCmd[3] = Integer.toString(
Commands.RUN_COMMAND_AS_USER.getValue());
int i = 4;
for (String cmdArg : command) {
taskControllerCmd[i++] = cmdArg;
}
if (LOG.isDebugEnabled()) {
for (String cmd : taskControllerCmd) {
LOG.debug("taskctrl command = " + cmd);
}
}
ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd);
try {
shExec.execute();
} catch (Exception e) {
LOG.warn("Exit code from " + taskControllerExe.toString() + " is : "
+ shExec.getExitCode() + " for truncateLogs");
LOG.warn("Exception thrown by " + taskControllerExe.toString() + " : "
+ StringUtils.stringifyException(e));
LOG.info("Output from LinuxTaskController's "
+ taskControllerExe.toString() + " follows:");
logOutput(shExec.getOutput());
lfs.delete(taskRanFilePath, false);
throw new IOException(e);
}
lfs.delete(taskRanFilePath, false);
if (LOG.isDebugEnabled()) {
LOG.info("Output from LinuxTaskController's "
+ taskControllerExe.toString() + " follows:");
logOutput(shExec.getOutput());
}
}
示例13: testDownloadArchive
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testDownloadArchive() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
LocalResource rsrc = createTarFile(files, p, size, rand, vis);
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(pending.get(rsrc).isDone());
try {
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if (childfile.getPath().getName().equalsIgnoreCase("1.tar.tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
示例14: testDownloadPatternJar
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testDownloadPatternJar() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
LocalResource rsrcjar = createJarFile(files, p, size, rand, vis);
rsrcjar.setType(LocalResourceType.PATTERN);
Path destPathjar = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPathjar = new Path (destPathjar,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsdjar = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPathjar, rsrcjar);
pending.put(rsrcjar, exec.submit(fsdjar));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(pending.get(rsrcjar).isDone());
try {
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if (childfile.getPath().getName().equalsIgnoreCase("1.jar.tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
示例15: testDownloadArchiveZip
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testDownloadArchiveZip() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
LocalResource rsrczip = createZipFile(files, p, size, rand, vis);
Path destPathjar = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPathjar = new Path (destPathjar,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsdzip = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPathjar, rsrczip);
pending.put(rsrczip, exec.submit(fsdzip));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(pending.get(rsrczip).isDone());
try {
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if (childfile.getPath().getName().equalsIgnoreCase("1.gz.tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}