当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem类的具体用法?Java DistributedFileSystem怎么用?Java DistributedFileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DistributedFileSystem类属于org.apache.hadoop.hdfs包,在下文中一共展示了DistributedFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: download

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * 从hadoop中下载文件
 *
 * @param taskName
 * @param filePath
 */
public static void download(String taskName, String filePath, boolean existDelete) {
    File file = new File(filePath);
    if (file.exists()) {
        if (existDelete) {
            file.deleteOnExit();
        } else {
            return;
        }
    }
    String hadoopAddress = propertyConfig.getProperty("sqoop.task." + taskName + ".tolink.linkConfig.uri");
    String itemmodels = propertyConfig.getProperty("sqoop.task." + taskName + ".recommend.itemmodels");
    try {
        DistributedFileSystem distributedFileSystem = distributedFileSystem(hadoopAddress);
        FSDataInputStream fsDataInputStream = distributedFileSystem.open(new Path(itemmodels));
        byte[] bs = new byte[fsDataInputStream.available()];
        fsDataInputStream.read(bs);
        log.info(new String(bs));

        FileOutputStream fileOutputStream = new FileOutputStream(new File(filePath));
        IOUtils.write(bs, fileOutputStream);
        IOUtils.closeQuietly(fileOutputStream);
    } catch (IOException e) {
        log.error(e);
    }
}
 
开发者ID:babymm,项目名称:mmsns,代码行数:32,代码来源:HadoopUtil.java

示例2: moveAround

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * Move hot files to warm and cold, warm files to hot and cold,
 * and cold files to hot and warm.
 */
void moveAround(DistributedFileSystem dfs) throws Exception {
  for(Path srcDir : map.keySet()) {
    int i = 0;
    for(Path dstDir : map.keySet()) {
      if (!srcDir.equals(dstDir)) {
        final Path src = new Path(srcDir, "file" + i++);
        final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
        LOG.info("rename " + src + " to " + dst);
        dfs.rename(src, dst);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestStorageMover.java

示例3: print

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * 查看输出结果
 *
 * @param path
 */
public void print(String path) {
    log.info("mapreduce输出结果:...................................................");
    DistributedFileSystem distributedFileSystem = distributedFileSystem();
    try {
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
        for (FileStatus fs : fileStatuses) {
            log.info(fs);
            FSDataInputStream fsDataInputStream = distributedFileSystem.open(fs.getPath());
            byte[] bs = new byte[fsDataInputStream.available()];
            fsDataInputStream.read(bs);
            log.info("\n" + new String(bs) + "\n");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        close(distributedFileSystem);
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:24,代码来源:MapReduceConfiguration.java

示例4: run

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    final TableListing listing = new TableListing.Builder()
      .addField("").addField("", true)
      .wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
    final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
    while (it.hasNext()) {
      EncryptionZone ez = it.next();
      listing.addRow(ez.getPath(), ez.getKeyName());
    }
    System.out.println(listing.toString());
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }

  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:CryptoAdmin.java

示例5: run

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  String name = StringUtils.popFirstNonOption(args);
  if (name == null) {
    System.err.println("You must specify a name when deleting a " +
        "cache pool.");
    return 1;
  }
  if (!args.isEmpty()) {
    System.err.print("Can't understand arguments: " +
      Joiner.on(" ").join(args) + "\n");
    System.err.println("Usage is " + getShortUsage());
    return 1;
  }
  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    dfs.removeCachePool(name);
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.out.println("Successfully removed cache pool " + name + ".");
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:CacheAdmin.java

示例6: main

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
public static void main(String... args) throws IOException {

    if(args.length < 2) {
      System.err.println("Usage HDFSConcat target srcs..");
      System.exit(0);
    }
    
    Configuration conf = new Configuration();
    String uri = conf.get("fs.default.name", def_uri);
    Path path = new Path(uri);
    DistributedFileSystem dfs = 
      (DistributedFileSystem)FileSystem.get(path.toUri(), conf);
    
    Path [] srcs = new Path[args.length-1];
    for(int i=1; i<args.length; i++) {
      srcs[i-1] = new Path(args[i]);
    }
    dfs.concat(new Path(args[0]), srcs);
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:HDFSConcat.java

示例7: writeFile

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
private void writeFile(final DistributedFileSystem dfs,
    Path dir, String fileName) throws IOException {
  Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
  final FSDataOutputStream out = dfs.create(filePath);
  out.writeChars("teststring");
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestFsck.java

示例8: run

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    System.out.println("Block Storage Policies:");
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        System.out.println("\t" + policy);
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:StoragePolicyAdmin.java

示例9: run

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
  final RollingUpgradeAction action = RollingUpgradeAction.fromString(
      argv.length >= 2? argv[1]: "");
  if (action == null) {
    throw new IllegalArgumentException("Failed to covert \"" + argv[1]
        +"\" to " + RollingUpgradeAction.class.getSimpleName());
  }

  System.out.println(action + " rolling upgrade ...");

  final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
  switch(action){
  case QUERY:
    break;
  case PREPARE:
    Preconditions.checkState(info.isStarted());
    break;
  case FINALIZE:
    Preconditions.checkState(info == null || info.isFinalized());
    break;
  }
  printMessage(info, System.out);
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSAdmin.java

示例10: metaSave

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSAdmin.java

示例11: testEditsLogOldRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例12: testEditsLogRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例13: getDifferentUser

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
/**
 * This method clones the passed <code>c</code> configuration setting a new
 * user into the clone.  Use it getting new instances of FileSystem.  Only
 * works for DistributedFileSystem w/o Kerberos.
 * @param c Initial configuration
 * @param differentiatingSuffix Suffix to differentiate this user from others.
 * @return A new configuration instance with a different user set into it.
 * @throws IOException
 */
public static User getDifferentUser(final Configuration c,
  final String differentiatingSuffix)
throws IOException {
  FileSystem currentfs = FileSystem.get(c);
  if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
    return User.getCurrent();
  }
  // Else distributed filesystem.  Make a new instance per daemon.  Below
  // code is taken from the AppendTestUtil over in hdfs.
  String username = User.getCurrent().getName() +
    differentiatingSuffix;
  User user = User.createUserForTesting(c, username,
      new String[]{"supergroup"});
  return user;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseTestingUtility.java

示例14: setUp

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestXAttrCLI.java

示例15: setUp

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入依赖的package包/类
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestCacheAdminCLI.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。