当前位置: 首页>>代码示例>>Java>>正文


Java Path.makeQualified方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.makeQualified方法的典型用法代码示例。如果您正苦于以下问题:Java Path.makeQualified方法的具体用法?Java Path.makeQualified怎么用?Java Path.makeQualified使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.makeQualified方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFileStatus

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Convert the file information in FTPFile to a {@link FileStatus} object. *
 * 
 * @param ftpFile
 * @param parentPath
 * @return FileStatus
 */
private FileStatus getFileStatus(FTPFile ftpFile, Path parentPath) {
  long length = ftpFile.getSize();
  boolean isDir = ftpFile.isDirectory();
  int blockReplication = 1;
  // Using default block size since there is no way in FTP client to know of
  // block sizes on server. The assumption could be less than ideal.
  long blockSize = DEFAULT_BLOCK_SIZE;
  long modTime = ftpFile.getTimestamp().getTimeInMillis();
  long accessTime = 0;
  FsPermission permission = getPermissions(ftpFile);
  String user = ftpFile.getUser();
  String group = ftpFile.getGroup();
  Path filePath = new Path(parentPath, ftpFile.getName());
  return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
      accessTime, permission, user, group, filePath.makeQualified(this));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:FTPFileSystem.java

示例2: getFileStatus

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Convert the file information in LsEntry to a {@link FileStatus} object. *
 *
 * @param sftpFile
 * @param parentPath
 * @return file status
 * @throws IOException
 */
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
    Path parentPath) throws IOException {

  SftpATTRS attr = sftpFile.getAttrs();
  long length = attr.getSize();
  boolean isDir = attr.isDir();
  boolean isLink = attr.isLink();
  if (isLink) {
    String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
    try {
      link = channel.realpath(link);

      Path linkParent = new Path("/", link);

      FileStatus fstat = getFileStatus(channel, linkParent);
      isDir = fstat.isDirectory();
      length = fstat.getLen();
    } catch (Exception e) {
      throw new IOException(e);
    }
  }
  int blockReplication = 1;
  // Using default block size since there is no way in SFTP channel to know of
  // block sizes on server. The assumption could be less than ideal.
  long blockSize = DEFAULT_BLOCK_SIZE;
  long modTime = attr.getMTime() * 1000; // convert to milliseconds
  long accessTime = 0;
  FsPermission permission = getPermissions(sftpFile);
  // not be able to get the real user group name, just use the user and group
  // id
  String user = Integer.toString(attr.getUId());
  String group = Integer.toString(attr.getGId());
  Path filePath = new Path(parentPath, sftpFile.getFilename());

  return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
      accessTime, permission, user, group, filePath.makeQualified(
          this.getUri(), this.getWorkingDirectory()));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:47,代码来源:SFTPFileSystem.java

示例3: testLoadTooMayHFiles

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + i), FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"};
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage().contains("Trying to load more than "
      + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestLoadIncrementalHFiles.java

示例4: validatePathLen

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private FileStatus validatePathLen(Path path, int len) throws IOException {
  //verify that the length is what was written in a direct status check
  final Path qualifiedPath = path.makeQualified(fs);
  FileStatus[] parentDirListing = fs.listStatus(qualifiedPath.getParent());
  StringBuilder listing = lsToString(parentDirListing);
  String parentDirLS = listing.toString();
  FileStatus status = fs.getFileStatus(qualifiedPath);
  assertEquals("Length of written file " + qualifiedPath
               + " from status check " + status
               + " in dir " + listing,
               len,
               status.getLen());
  String fileInfo = qualifiedPath + "  " + status;
  assertFalse("File claims to be a directory " + fileInfo,
              status.isDir());

  FileStatus listedFileStat = resolveChild(parentDirListing, qualifiedPath);
  assertNotNull("Did not find " + path + " in " + parentDirLS,
                listedFileStat);
  //file is in the parent dir. Now validate it's stats
  assertEquals("Wrong len for " + path + " in listing " + parentDirLS,
               len,
               listedFileStat.getLen());
  listedFileStat.toString();
  return status;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例5: listStatus

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  Path absolutePath = makeAbsolute(f);
  INode inode = store.retrieveINode(absolutePath);
  if (inode == null) {
    throw new FileNotFoundException("File " + f + " does not exist.");
  }
  if (inode.isFile()) {
    return new FileStatus[] {
      new S3FileStatus(f.makeQualified(this), inode)
    };
  }
  ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
  for (Path p : store.listSubPaths(absolutePath)) {
    ret.add(getFileStatus(p.makeQualified(this)));
  }
  return ret.toArray(new FileStatus[0]);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:S3FileSystem.java

示例6: testTableWithCFNameStartWithUnderScore

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testTableWithCFNameStartWithUnderScore() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  String family = "_cf";
  Path familyDir = new Path(dir, family);

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  Configuration conf = util.getConfiguration();
  String tableName = "mytable_cfNameStartWithUnderScore";
  Table table = util.createTable(TableName.valueOf(tableName), family);
  HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
    QUALIFIER, from, to, 1000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
  String[] args = { dir.toString(), tableName };
  try {
    loader.run(args);
    assertEquals(1000, util.countRows(table));
  } finally {
    if (null != table) {
      table.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestLoadIncrementalHFiles.java

示例7: createAppDir

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private Path createAppDir(String user, String applicationId,
    UserGroupInformation userUgi, Configuration conf,
    Path remoteNodeTmpLogFileForApp) throws IOException {
  FileSystem remoteFS = FileSystem.get(conf);

  // Only creating directories if they are missing to avoid
  // unnecessary load on the filesystem from all of the nodes
  String remoteRootLogDirSuffix = conf.get(
      YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
      YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
  Path appDir = LogAggregationUtils.getRemoteAppLogDir(
      remoteNodeTmpLogFileForApp,
      ConverterUtils.toApplicationId(applicationId), user,
      remoteRootLogDirSuffix);
  appDir = appDir.makeQualified(remoteFS.getUri(),
      remoteFS.getWorkingDirectory());

  if (!checkExists(remoteFS, appDir, APP_DIR_PERMISSIONS)) {
    Path suffixDir = LogAggregationUtils.getRemoteLogSuffixedDir(
        remoteNodeTmpLogFileForApp, user, remoteRootLogDirSuffix);
    suffixDir = suffixDir.makeQualified(remoteFS.getUri(),
        remoteFS.getWorkingDirectory());

    if (!checkExists(remoteFS, suffixDir, APP_DIR_PERMISSIONS)) {
      Path userDir = LogAggregationUtils.getRemoteLogUserDir(
          remoteNodeTmpLogFileForApp, user);
      userDir = userDir.makeQualified(remoteFS.getUri(),
          remoteFS.getWorkingDirectory());

      if (!checkExists(remoteFS, userDir, APP_DIR_PERMISSIONS)) {
        createDir(remoteFS, userDir, APP_DIR_PERMISSIONS);
      }

      createDir(remoteFS, suffixDir, APP_DIR_PERMISSIONS);
    }

    createDir(remoteFS, appDir, APP_DIR_PERMISSIONS);
  }
  return appDir;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:41,代码来源:HPCLogAggregateHandler.java

示例8: runTest

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestLoadIncrementalHFiles.java

示例9: SequenceFileIterator

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
     * @throws IOException if path can't be read, or its key or value class can't be instantiated
     */

public SequenceFileIterator(Path path, boolean reuseKeyValueInstances, Configuration conf) throws IOException {
  key = null;
  value = null;
  FileSystem fs = path.getFileSystem(conf);
  path = path.makeQualified(fs);
  reader = new SequenceFile.Reader(fs, path, conf);
  this.conf = conf;
  keyClass = (Class<K>) reader.getKeyClass();
  valueClass = (Class<V>) reader.getValueClass();
  noValue = NullWritable.class.equals(valueClass);
  this.reuseKeyValueInstances = reuseKeyValueInstances;
}
 
开发者ID:huyang1,项目名称:LDA,代码行数:17,代码来源:SequenceFileIterator.java

示例10: open

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
  f = f.makeQualified(getUri(), getWorkingDirectory());
  String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
  String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
  URL u = getNamenodeURL(path, query);
  return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:HftpFileSystem.java

示例11: newDirectory

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private FileStatus newDirectory(FileMetadata meta, Path path) {
  return new FileStatus (
      0,
      true,
      1,
      blockSize,
      meta == null ? 0 : meta.getLastModified(),
      0,
      meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
      meta == null ? "" : meta.getPermissionStatus().getUserName(),
      meta == null ? "" : meta.getPermissionStatus().getGroupName(),
      path.makeQualified(getUri(), getWorkingDirectory()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:NativeAzureFileSystem.java

示例12: testNonHfileFolder

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Write a random data file and a non-file in a dir with a valid family name
 * but not part of the table families. we should we able to bulkload without
 * getting the unmatched family exception. HBASE-13037/HBASE-13227
 */
private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tableName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);

  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_0"),
      FAMILY, QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500);
  createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024);

  final String NON_FAMILY_FOLDER = "_logs";
  Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER);
  fs.mkdirs(nonFamilyDir);
  fs.mkdirs(new Path(nonFamilyDir, "non-file"));
  createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024);

  Table table = null;
  try {
    if (preCreateTable) {
      table = util.createTable(TableName.valueOf(tableName), FAMILY);
    } else {
      table = util.getConnection().getTable(TableName.valueOf(tableName));
    }

    final String[] args = {dir.toString(), tableName};
    new LoadIncrementalHFiles(util.getConfiguration()).run(args);
    assertEquals(500, util.countRows(table));
  } finally {
    if (table != null) {
      table.close();
    }
    fs.delete(dir, true);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestLoadIncrementalHFiles.java

示例13: BulkLoadHelper

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public BulkLoadHelper(Path loadPath) throws IOException {
  fs = TEST_UTIL.getTestFileSystem();
  conf = TEST_UTIL.getConfiguration();
  loadPath = loadPath.makeQualified(fs);
  this.loadPath = loadPath;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:TestAccessController.java

示例14: getInputPath

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
protected Path getInputPath() throws IOException {
  Path inputPath = new Path(context.getOptions().getExportDir());
  Configuration conf = options.getConf();
  inputPath = inputPath.makeQualified(FileSystem.get(conf));
  return inputPath;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:7,代码来源:HdfsOdpsImportJob.java

示例15: testFilePartUpload

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestSwiftFileSystemPartitionedUploads.java


注:本文中的org.apache.hadoop.fs.Path.makeQualified方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。