当前位置: 首页>>代码示例>>Java>>正文


Java FsPermission.getDefault方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.permission.FsPermission.getDefault方法的典型用法代码示例。如果您正苦于以下问题:Java FsPermission.getDefault方法的具体用法?Java FsPermission.getDefault怎么用?Java FsPermission.getDefault使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.permission.FsPermission的用法示例。


在下文中一共展示了FsPermission.getDefault方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HdfsFileStatus

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/**
 * Constructor
 * @param length the number of bytes the file has
 * @param isdir if the path is a directory
 * @param block_replication the replication factor
 * @param blocksize the block size
 * @param modification_time modification time
 * @param access_time access time
 * @param permission permission
 * @param owner the owner of the path
 * @param group the group of the path
 * @param path the local name in java UTF8 encoding the same as that in-memory
 * @param fileId the file id
 * @param feInfo the file's encryption info
 */
public HdfsFileStatus(long length, boolean isdir, int block_replication,
    long blocksize, long modification_time, long access_time,
    FsPermission permission, String owner, String group, byte[] symlink,
    byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
    byte storagePolicy) {
  this.length = length;
  this.isdir = isdir;
  this.block_replication = (short)block_replication;
  this.blocksize = blocksize;
  this.modification_time = modification_time;
  this.access_time = access_time;
  this.permission = (permission == null) ? 
      ((isdir || symlink!=null) ? 
          FsPermission.getDefault() : 
          FsPermission.getFileDefault()) :
      permission;
  this.owner = (owner == null) ? "" : owner;
  this.group = (group == null) ? "" : group;
  this.symlink = symlink;
  this.path = path;
  this.fileId = fileId;
  this.childrenNum = childrenNum;
  this.feInfo = feInfo;
  this.storagePolicy = storagePolicy;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:HdfsFileStatus.java

示例2: setup

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDefaultBlockPlacementPolicy.java

示例3: newDirectory

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
private FileStatus newDirectory(FileMetadata meta, Path path) {
  return new FileStatus (
      0,
      true,
      1,
      blockSize,
      meta == null ? 0 : meta.getLastModified(),
      0,
      meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
      meta == null ? "" : meta.getPermissionStatus().getUserName(),
      meta == null ? "" : meta.getPermissionStatus().getGroupName(),
      path.makeQualified(getUri(), getWorkingDirectory()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:NativeAzureFileSystem.java

示例4: deprecatedGetFileLinkStatusInternal

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/**
 * Deprecated. Remains for legacy support. Should be removed when {@link Stat}
 * gains support for Windows and other operating systems.
 */
@Deprecated
private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
    throws IOException {
  String target = FileUtil.readLink(new File(f.toString()));

  try {
    FileStatus fs = getFileStatus(f);
    // If f refers to a regular file or directory
    if (target.isEmpty()) {
      return fs;
    }
    // Otherwise f refers to a symlink
    return new FileStatus(fs.getLen(),
        false,
        fs.getReplication(),
        fs.getBlockSize(),
        fs.getModificationTime(),
        fs.getAccessTime(),
        fs.getPermission(),
        fs.getOwner(),
        fs.getGroup(),
        new Path(target),
        f);
  } catch (FileNotFoundException e) {
    /* The exists method in the File class returns false for dangling
     * links so we can get a FileNotFoundException for links that exist.
     * It's also possible that we raced with a delete of the link. Use
     * the readBasicFileAttributes method in java.nio.file.attributes
     * when available.
     */
    if (!target.isEmpty()) {
      return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(),
          "", "", new Path(target), f);
    }
    // f refers to a file or directory that does not exist
    throw e;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:43,代码来源:RawLocalFileSystem.java

示例5: addSymlink

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/**
 * Add the given symbolic link to the fs. Record it in the edits log.
 */
private static INodeSymlink addSymlink(FSDirectory fsd, String path,
    INodesInPath iip, String target, PermissionStatus dirPerms,
    boolean createParent, boolean logRetryCache) throws IOException {
  final long mtime = now();
  final byte[] localName = iip.getLastLocalName();
  if (createParent) {
    Map.Entry<INodesInPath, String> e = FSDirMkdirOp
        .createAncestorDirectories(fsd, iip, dirPerms);
    if (e == null) {
      return null;
    }
    iip = INodesInPath.append(e.getKey(), null, localName);
  }
  final String userName = dirPerms.getUserName();
  long id = fsd.allocateNewInodeId();
  PermissionStatus perm = new PermissionStatus(
      userName, null, FsPermission.getDefault());
  INodeSymlink newNode = unprotectedAddSymlink(fsd, iip.getExistingINodes(),
      localName, id, target, mtime, mtime, perm);
  if (newNode == null) {
    NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
    return null;
  }
  fsd.getEditLog().logSymlink(path, target, mtime, mtime, newNode,
      logRetryCache);

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
  }
  return newNode;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FSDirSymlinkOp.java

示例6: applyUMask

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
private FsPermission applyUMask(FsPermission permission) {
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  return permission.applyUMask(FsPermission.getUMask(getConf()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:WebHdfsFileSystem.java

示例7: testFsckFileNotFound

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {

  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;

  Configuration conf = new Configuration();
  NameNode namenode = mock(NameNode.class);
  NetworkTopology nettop = mock(NetworkTopology.class);
  Map<String,String[]> pmap = new HashMap<String, String[]>();
  Writer result = new StringWriter();
  PrintWriter out = new PrintWriter(result, true);
  InetAddress remoteAddress = InetAddress.getLocalHost();
  FSNamesystem fsName = mock(FSNamesystem.class);
  BlockManager blockManager = mock(BlockManager.class);
  DatanodeManager dnManager = mock(DatanodeManager.class);

  when(namenode.getNamesystem()).thenReturn(fsName);
  when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
                                anyLong(), anyLong(),
                                anyBoolean(), anyBoolean()))
      .thenThrow(new FileNotFoundException());
  when(fsName.getBlockManager()).thenReturn(blockManager);
  when(blockManager.getDatanodeManager()).thenReturn(dnManager);

  NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
      NUM_REPLICAS, remoteAddress);

  String pathString = "/tmp/testFile";

  long length = 123L;
  boolean isDir = false;
  int blockReplication = 1;
  long blockSize = 128 *1024L;
  long modTime = 123123123L;
  long accessTime = 123123120L;
  FsPermission perms = FsPermission.getDefault();
  String owner = "foo";
  String group = "bar";
  byte [] symlink = null;
  byte [] path = new byte[128];
  path = DFSUtil.string2Bytes(pathString);
  long fileId = 312321L;
  int numChildren = 1;
  byte storagePolicy = 0;

  HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
      blockSize, modTime, accessTime, perms, owner, group, symlink, path,
      fileId, numChildren, null, storagePolicy);
  Result res = new Result(conf);

  try {
    fsck.check(pathString, file, res);
  } catch (Exception e) {
    fail("Unexpected exception "+ e.getMessage());
  }
  assertTrue(res.toString().contains("HEALTHY"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestFsck.java

示例8: mkdirs

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/**
 * Create a directory (or hierarchy of directories) with the given
 * name and permission.
 *
 * @param src The path of the directory being created
 * @param permission The permission of the directory being created.
 * If permission == null, use {@link FsPermission#getDefault()}.
 * @param createParent create missing parent directory if true
 * 
 * @return True if the operation success.
 * 
 * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
 */
public boolean mkdirs(String src, FsPermission permission,
    boolean createParent) throws IOException {
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  return primitiveMkdir(src, masked, createParent);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DFSClient.java

示例9: defaultPermissionNoBlobMetadata

import org.apache.hadoop.fs.permission.FsPermission; //导入方法依赖的package包/类
/**
 * Default permission to use when no permission metadata is found.
 * 
 * @return The default permission to use.
 */
private static PermissionStatus defaultPermissionNoBlobMetadata() {
  return new PermissionStatus("", "", FsPermission.getDefault());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:AzureNativeFileSystemStore.java


注:本文中的org.apache.hadoop.fs.permission.FsPermission.getDefault方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。