本文整理汇总了Java中org.apache.hadoop.util.Shell类的典型用法代码示例。如果您正苦于以下问题:Java Shell类的具体用法?Java Shell怎么用?Java Shell使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Shell类属于org.apache.hadoop.util包,在下文中一共展示了Shell类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: stashOriginalFilePermissions
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Override
protected void stashOriginalFilePermissions() throws IOException {
// save off permissions in case we need to
// rewrite the keystore in flush()
if (!Shell.WINDOWS) {
Path path = Paths.get(file.getCanonicalPath());
permissions = Files.getPosixFilePermissions(path);
} else {
// On Windows, the JDK does not support the POSIX file permission APIs.
// Instead, we can do a winutils call and translate.
String[] cmd = Shell.getGetPermissionCommand();
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = file.getCanonicalPath();
String out = Shell.execCommand(args);
StringTokenizer t = new StringTokenizer(out, Shell.TOKEN_SEPARATOR_REGEX);
// The winutils output consists of 10 characters because of the leading
// directory indicator, i.e. "drwx------". The JDK parsing method expects
// a 9-character string, so remove the leading character.
String permString = t.nextToken().substring(1);
permissions = PosixFilePermissions.fromString(permString);
}
}
示例2: flush
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Override
public void flush() throws IOException {
super.flush();
if (LOG.isDebugEnabled()) {
LOG.debug("Reseting permissions to '" + permissions + "'");
}
if (!Shell.WINDOWS) {
Files.setPosixFilePermissions(Paths.get(file.getCanonicalPath()),
permissions);
} else {
// FsPermission expects a 10-character string because of the leading
// directory indicator, i.e. "drwx------". The JDK toString method returns
// a 9-character string, so prepend a leading character.
FsPermission fsPermission = FsPermission.valueOf(
"-" + PosixFilePermissions.toString(permissions));
FileUtil.setPermission(file, fsPermission);
}
}
示例3: testWindowsShellScriptBuilderMkdir
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Test (timeout = 10000)
public void testWindowsShellScriptBuilderMkdir() throws IOException {
String mkDirCmd = "@if not exist \"\" mkdir \"\"";
// Test is only relevant on Windows
Assume.assumeTrue(Shell.WINDOWS);
// The tests are built on assuming 8191 max command line length
assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT);
ShellScriptBuilder builder = ShellScriptBuilder.create();
// test mkdir
builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024)));
builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat(
"E", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2)));
try {
builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat(
"X", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2 +1)));
fail("long mkdir was expected to throw");
} catch(IOException e) {
assertThat(e.getMessage(), CoreMatchers.containsString(expectedMessage));
}
}
示例4: serviceStop
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Override
protected synchronized void serviceStop() throws Exception {
if (resourceManagers[index] != null) {
waitForAppMastersToFinish(5000);
resourceManagers[index].stop();
}
if (Shell.WINDOWS) {
// On Windows, clean up the short temporary symlink that was created to
// work around path length limitation.
String testWorkDirPath = testWorkDir.getAbsolutePath();
try {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
true);
} catch (IOException e) {
LOG.warn("could not cleanup symlink: " +
testWorkDir.getAbsolutePath());
}
}
super.serviceStop();
}
示例5: testRunCommandWithCpuAndMemoryResources
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Test (timeout = 5000)
public void testRunCommandWithCpuAndMemoryResources() {
// Windows only test
assumeTrue(Shell.WINDOWS);
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
conf, Resource.newInstance(1024, 1));
float yarnProcessors = NodeManagerHardwareUtils.getContainersCores(
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf),
conf);
int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors));
// Assert the cpu and memory limits are set correctly in the command
String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
Assert.assertTrue(Arrays.equals(expected, command));
}
示例6: getExecString
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Override
protected String[] getExecString() {
String derefFlag = "-";
if (dereference) {
derefFlag = "-L";
}
if (Shell.LINUX) {
return new String[] {
"stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() };
} else if (Shell.FREEBSD || Shell.MAC) {
return new String[] {
"stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'",
path.toString() };
} else {
throw new UnsupportedOperationException(
"stat is not supported on this platform");
}
}
示例7: createUnhaltingScriptFile
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
/**
* Creates a script to run a container that will run forever unless
* stopped by external means.
*/
private static File createUnhaltingScriptFile(ContainerId cId,
File scriptFileDir, File processStartFile) throws IOException {
File scriptFile = Shell.appendScriptExtension(scriptFileDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
if (Shell.WINDOWS) {
fileWriter.println("@echo \"Running testscript for delayed kill\"");
fileWriter.println("@echo \"Writing pid to start file\"");
fileWriter.println("@echo " + cId + ">> " + processStartFile);
fileWriter.println("@pause");
} else {
fileWriter.write("#!/bin/bash\n\n");
fileWriter.write("echo \"Running testscript for delayed kill\"\n");
fileWriter.write("hello=\"Got SIGTERM\"\n");
fileWriter.write("umask 0\n");
fileWriter.write("trap \"echo $hello >> " + processStartFile +
"\" SIGTERM\n");
fileWriter.write("echo \"Writing pid to start file\"\n");
fileWriter.write("echo $$ >> " + processStartFile + "\n");
fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
}
fileWriter.close();
return scriptFile;
}
示例8: rename
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@Override
public boolean rename(Path src, Path dst) throws IOException {
// Attempt rename using Java API.
File srcFile = pathToFile(src);
File dstFile = pathToFile(dst);
if (srcFile.renameTo(dstFile)) {
return true;
}
// Else try POSIX style rename on Windows only
if (Shell.WINDOWS &&
handleEmptyDstDirectoryOnWindows(src, srcFile, dst, dstFile)) {
return true;
}
// The fallback behavior accomplishes the rename by a full copy.
if (LOG.isDebugEnabled()) {
LOG.debug("Falling through to a copy of " + src + " to " + dst);
}
return FileUtil.copy(this, src, this, dst, true, getConf());
}
示例9: createDefaultChannelConnector
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
SelectChannelConnector ret = new SelectChannelConnector();
ret.setLowResourceMaxIdleTime(10000);
ret.setAcceptQueueSize(128);
ret.setResolveNames(false);
ret.setUseDirectBuffers(false);
if(Shell.WINDOWS) {
// result of setting the SO_REUSEADDR flag is different on Windows
// http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
// without this 2 NN's can start on the same machine and listen on
// the same port with indeterminate routing of incoming requests to them
ret.setReuseAddress(false);
}
ret.setHeaderBufferSize(1024*64);
return ret;
}
示例10: chmod
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
public static void chmod(String path, int mode) throws IOException {
if (!Shell.WINDOWS) {
chmodImpl(path, mode);
} else {
try {
chmodImpl(path, mode);
} catch (NativeIOException nioe) {
if (nioe.getErrorCode() == 3) {
throw new NativeIOException("No such file or directory",
Errno.ENOENT);
} else {
LOG.warn(String.format("NativeIO.chmod error (%d): %s",
nioe.getErrorCode(), nioe.getMessage()));
throw new NativeIOException("Unknown error", Errno.UNKNOWN);
}
}
}
}
示例11: getOwner
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
public static String getOwner(FileDescriptor fd) throws IOException {
ensureInitialized();
if (Shell.WINDOWS) {
String owner = Windows.getOwner(fd);
owner = stripDomain(owner);
return owner;
} else {
long uid = POSIX.getUIDforFDOwnerforOwner(fd);
CachedUid cUid = uidCache.get(uid);
long now = System.currentTimeMillis();
if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
return cUid.username;
}
String user = POSIX.getUserName(uid);
LOG.info("Got UserName " + user + " for UID " + uid
+ " from the native implementation");
cUid = new CachedUid(user, now);
uidCache.put(uid, cUid);
return user;
}
}
示例12: getShareDeleteFileInputStream
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
/**
* Create a FileInputStream that shares delete permission on the
* file opened at a given offset, i.e. other process can delete
* the file the FileInputStream is reading. Only Windows implementation
* uses the native interface.
*/
public static FileInputStream getShareDeleteFileInputStream(File f, long seekOffset)
throws IOException {
if (!Shell.WINDOWS) {
RandomAccessFile rf = new RandomAccessFile(f, "r");
if (seekOffset > 0) {
rf.seek(seekOffset);
}
return new FileInputStream(rf.getFD());
} else {
// Use Windows native interface to create a FileInputStream that
// shares delete permission on the file opened, and set it to the
// given offset.
//
FileDescriptor fd = NativeIO.Windows.createFile(
f.getAbsolutePath(),
NativeIO.Windows.GENERIC_READ,
NativeIO.Windows.FILE_SHARE_READ |
NativeIO.Windows.FILE_SHARE_WRITE |
NativeIO.Windows.FILE_SHARE_DELETE,
NativeIO.Windows.OPEN_EXISTING);
if (seekOffset > 0)
NativeIO.Windows.setFilePointer(fd, seekOffset, NativeIO.Windows.FILE_BEGIN);
return new FileInputStream(fd);
}
}
示例13: test0
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test (timeout = 30000)
public void test0() throws Exception {
if (isWindows) return;
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
示例14: testROBufferDirAndRWBufferDir
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
/** Two buffer dirs. The first dir exists & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test (timeout = 30000)
public void testROBufferDirAndRWBufferDir() throws Exception {
if (isWindows) return;
String dir1 = buildBufferDir(ROOT, 1);
String dir2 = buildBufferDir(ROOT, 2);
try {
conf.set(CONTEXT, dir1 + "," + dir2);
assertTrue(localFs.mkdirs(new Path(dir2)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
示例15: testNoSideEffects
import org.apache.hadoop.util.Shell; //导入依赖的package包/类
/** Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test (timeout = 30000)
public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir = buildBufferDir(ROOT, 0);
try {
conf.set(CONTEXT, dir);
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}