本文整理汇总了Java中org.apache.hadoop.fs.Options类的典型用法代码示例。如果您正苦于以下问题:Java Options类的具体用法?Java Options怎么用?Java Options使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Options类属于org.apache.hadoop.fs包,在下文中一共展示了Options类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: rename
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/**
* Rename file or directory.
* @see ClientProtocol#rename2(String, String, Options.Rename...)
*/
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
try {
namenode.rename2(src, dst, options);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
示例2: rename2
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
@Override // ClientProtocol
public void rename2(String src, String dst, Options.Rename... options)
throws IOException {
checkNNStartup();
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.renameTo(src, dst, cacheEntry != null, options);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
metrics.incrFilesRenamed();
}
示例3: rename
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/**
* Rename file or directory.
* @see ClientProtocol#rename2(String, String, Options.Rename...)
*/
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
try (TraceScope ignored = newSrcDstTraceScope("rename2", src, dst)) {
namenode.rename2(src, dst, options);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
示例4: rename2
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
@Override // ClientProtocol
public void rename2(String src, String dst, Options.Rename... options)
throws IOException {
checkNNStartup();
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.renameTo(src, dst, cacheEntry != null, options);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
metrics.incrFilesRenamed();
}
示例5: rename
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/**
* Rename file or directory.
* @see ClientProtocol#rename2(String, String, Options.Rename...)
*/
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
try {
namenode.rename2(src, dst, options);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
示例6: renameTo
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/**
* @see #unprotectedRenameTo(String, String, long, Options.Rename...)
*/
void renameTo(String src, String dst, long mtime,
BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
throws FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, QuotaExceededException,
UnresolvedLinkException, IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src
+ " to " + dst);
}
writeLock();
try {
if (unprotectedRenameTo(src, dst, mtime, collectedBlocks, options)) {
namesystem.incrDeletedFileCount(1);
}
} finally {
writeUnlock();
}
}
示例7: renameToInternal
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
private void renameToInternal(FSPermissionChecker pc, String src,
String dst, boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks,
Options.Rename... options) throws IOException {
assert hasWriteLock();
if (isPermissionEnabled) {
// Rename does not operates on link targets
// Do not resolveLink when checking permissions of src and dst
// Check write access to parent of src
checkPermission(pc, src, false, null, FsAction.WRITE, null, null, false,
false);
// Check write access to ancestor of dst
checkPermission(pc, dst, false, FsAction.WRITE, null, null, null, false,
false);
}
waitForLoadingFSImage();
long mtime = now();
dir.renameTo(src, dst, mtime, collectedBlocks, options);
getEditLog().logRename(src, dst, mtime, logRetryCache, options);
}
示例8: rename2
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
@Override
public Rename2ResponseProto rename2(RpcController controller,
Rename2RequestProto req) throws ServiceException {
try {
Options.Rename[] options;
if (req.hasKeepEncodingStatus() && req.getKeepEncodingStatus()) {
options = new Rename[2];
options[1] = Rename.KEEP_ENCODING_STATUS;
} else {
options = new Rename[1];
}
options[0] = req.getOverwriteDest() ? Rename.OVERWRITE : Rename.NONE;
server.rename2(req.getSrc(), req.getDst(), options);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_RENAME2_RESPONSE;
}
示例9: getOutputStream
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
private DataOutputStream getOutputStream(FileSystemWALPointer pointer) throws IOException
{
Preconditions.checkArgument(outputStream == null, "output stream is not null");
if (pointer.offset > 0 && (fileSystemWAL.fileContext.getDefaultFileSystem() instanceof LocalFs ||
fileSystemWAL.fileContext.getDefaultFileSystem() instanceof RawLocalFs)) {
//On local file system the stream is always closed and never flushed so we open it again in append mode if the
//offset > 0. This block is entered only when appending to wal while writing on local fs.
return fileSystemWAL.fileContext.create(new Path(fileSystemWAL.tempPartFiles.get(pointer.partNum)),
EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND), Options.CreateOpts.CreateParent.createParent());
}
String partFile = fileSystemWAL.getPartFilePath(pointer.partNum);
String tmpFilePath = createTmpFilePath(partFile);
fileSystemWAL.tempPartFiles.put(pointer.partNum, tmpFilePath);
Preconditions.checkArgument(pointer.offset == 0, "offset > 0");
LOG.debug("open {} => {}", pointer.partNum, tmpFilePath);
outputStream = fileSystemWAL.fileContext.create(new Path(tmpFilePath),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.CreateParent.createParent());
return outputStream;
}
示例10: testCopyPartialHelper
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
private void testCopyPartialHelper(int dataSize, int offset, long size) throws IOException
{
FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
File file = new File("target/IOUtilsTest/testCopyPartial/input");
createDataFile(file, dataSize);
FileContext fileContext = FileContext.getFileContext();
DataInputStream inputStream = fileContext.open(new Path(file.getAbsolutePath()));
Path output = new Path("target/IOUtilsTest/testCopyPartial/output");
DataOutputStream outputStream = fileContext.create(output, EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.CreateParent.createParent());
if (offset == 0) {
IOUtils.copyPartial(inputStream, size, outputStream);
} else {
IOUtils.copyPartial(inputStream, offset, size, outputStream);
}
outputStream.close();
Assert.assertTrue("output exists", fileContext.util().exists(output));
Assert.assertEquals("output size", size, fileContext.getFileStatus(output).getLen());
// FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
}
示例11: renameTo
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/**
* @see #unprotectedRenameTo(String, String, long, Options.Rename...)
*/
void renameTo(String src, String dst, boolean logRetryCache,
Options.Rename... options)
throws FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, QuotaExceededException,
UnresolvedLinkException, IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src
+ " to " + dst);
}
waitForReady();
long now = now();
writeLock();
try {
if (unprotectedRenameTo(src, dst, now, options)) {
incrDeletedFileCount(1);
}
} finally {
writeUnlock();
}
fsImage.getEditLog().logRename(src, dst, now, logRetryCache, options);
}
示例12: rename2
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
@Override // ClientProtocol
public void rename2(String src, String dst, Options.Rename... options)
throws IOException {
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException(
"rename: Pathname too long. Limit " + MAX_PATH_LENGTH +
" characters, " + MAX_PATH_DEPTH + " levels.");
}
if (namesystem.isLegacyRenameEnabled()) {
namesystem.renameTo(src, dst, options);
} else {
namesystem.multiTransactionalRename(src, dst, options);
}
metrics.incrFilesRenamed();
}
示例13: renameDirectory
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
public void renameDirectory(final Path src, final Path dst, final boolean trashIfExists)
{
run(new Retryable<Void>()
{
@Override
public Void call()
throws Exception
{
if (fs.exists(dst)) {
if (!trashIfExists) {
throw new DataException(String.format("Directory Exists: %s", dst.toString()));
}
logger.info("Move To Trash: {}", dst);
if (!trash(dst)) {
throw new IllegalStateException(String.format("Failed to Move To Trash: %s", dst.toString()));
}
}
FileContext.getFileContext(conf).rename(src, dst, Options.Rename.NONE);
logger.debug("Rename: {} >>> {}", src, dst);
return null;
}
});
}
示例14: testCreateBase
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/** @throws Exception If failed. */
public void testCreateBase() throws Exception {
Path fsHome = new Path(primaryFsUri);
Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
Path file = new Path(dir, "someFile");
assertPathDoesNotExist(fs, file);
FsPermission fsPerm = new FsPermission((short)644);
FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
Options.CreateOpts.perms(fsPerm));
// Try to write something in file.
os.write("abc".getBytes());
os.close();
// Check file status.
FileStatus fileStatus = fs.getFileStatus(file);
assertFalse(fileStatus.isDirectory());
assertEquals(file, fileStatus.getPath());
assertEquals(fsPerm, fileStatus.getPermission());
}
示例15: testDeleteRecursively
import org.apache.hadoop.fs.Options; //导入依赖的package包/类
/** @throws Exception If failed. */
public void testDeleteRecursively() throws Exception {
Path fsHome = new Path(primaryFsUri);
Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
Options.CreateOpts.perms(FsPermission.getDefault()));
os.close();
Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
assertTrue(fs.delete(someDir2, true));
assertPathDoesNotExist(fs, someDir2);
assertPathDoesNotExist(fs, someDir3);
}