当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils.setFsDefault方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.setFsDefault方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.setFsDefault方法的具体用法?Java FSUtils.setFsDefault怎么用?Java FSUtils.setFsDefault使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.FSUtils的用法示例。


在下文中一共展示了FSUtils.setFsDefault方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: MasterFileSystem

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:MasterFileSystem.java

示例2: startMiniDFSCluster

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
    throws Exception {
  createDirsAndSetProperties();
  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, racks, hosts, null);

  // Set this just-started cluster as our filesystem.
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HBaseTestingUtility.java

示例3: init

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void init() throws IOException {
  this.rootDir = FSUtils.getRootDir(conf);
  FSUtils.setFsDefault(getConf(), rootDir);
  this.fs = FileSystem.get(conf);
  Path tmpDataDir = new Path(rootDir, TMP_DATA_DIR);
  sysNsDir = new Path(tmpDataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
  defNsDir = new Path(tmpDataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
  baseDirs = new Path[]{rootDir,
      new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
      new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY)};
  backupDir = new Path(rootDir, HConstants.MIGRATION_NAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:NamespaceUpgrade.java

示例4: verifySnapshot

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private void verifySnapshot(final Configuration baseConf,
    final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException {
  // Update the conf with the current root dir, since may be a different cluster
  Configuration conf = new Configuration(baseConf);
  FSUtils.setRootDir(conf, rootDir);
  FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
  SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
  SnapshotReferenceUtil.verifySnapshot(conf, fs, snapshotDir, snapshotDesc);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:ExportSnapshot.java

示例5: main

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Pass one or more log file names and it will either dump out a text version
 * on <code>stdout</code> or split the specified log files.
 *
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 2) {
    usage();
    System.exit(-1);
  }
  // either dump using the WALPrettyPrinter or split, depending on args
  if (args[0].compareTo("--dump") == 0) {
    WALPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
  } else if (args[0].compareTo("--perf") == 0) {
    LOG.fatal("Please use the WALPerformanceEvaluation tool instead. i.e.:");
    LOG.fatal("\thbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation --iterations " +
        args[1]);
    System.exit(-1);
  } else if (args[0].compareTo("--split") == 0) {
    Configuration conf = HBaseConfiguration.create();
    for (int i = 1; i < args.length; i++) {
      try {
        Path logPath = new Path(args[i]);
        FSUtils.setFsDefault(conf, logPath);
        split(conf, logPath);
      } catch (IOException t) {
        t.printStackTrace(System.err);
        System.exit(-1);
      }
    }
  } else {
    usage();
    System.exit(-1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:FSHLog.java

示例6: setFs

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private void setFs() throws IOException {
  if(this.dfsCluster == null){
    LOG.info("Skipping setting fs because dfsCluster is null");
    return;
  }
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
  if (this.conf.getBoolean(USE_LOCAL_FILESYSTEM, false)) {
    FSUtils.setFsDefault(this.conf, new Path("file:///"));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:HBaseTestingUtility.java

示例7: shutdownMiniDFSCluster

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
 * or does nothing.
 * @throws IOException
 */
public void shutdownMiniDFSCluster() throws IOException {
  if (this.dfsCluster != null) {
    // The below throws an exception per dn, AsynchronousCloseException.
    this.dfsCluster.shutdown();
    dfsCluster = null;
    dataTestDirOnTestFS = null;
    FSUtils.setFsDefault(this.conf, new Path("file:///"));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HBaseTestingUtility.java

示例8: run

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
  justification="Intentional")
public int run(String[] args) throws IOException, InterruptedException {
  final Configuration conf = getConf();
  boolean listSnapshots = false;
  String snapshotName = null;
  boolean showSchema = false;
  boolean showFiles = false;
  boolean showStats = false;

  // Process command line args
  for (int i = 0; i < args.length; i++) {
    String cmd = args[i];
    try {
      if (cmd.equals("-snapshot")) {
        snapshotName = args[++i];
      } else if (cmd.equals("-files")) {
        showFiles = true;
        showStats = true;
      } else if (cmd.equals("-stats")) {
        showStats = true;
      } else if (cmd.equals("-schema")) {
        showSchema = true;
      } else if (cmd.equals("-remote-dir")) {
        Path sourceDir = new Path(args[++i]);
        URI defaultFs = sourceDir.getFileSystem(conf).getUri();
        FSUtils.setFsDefault(conf, new Path(defaultFs));
        FSUtils.setRootDir(conf, sourceDir);
      } else if (cmd.equals("-list-snapshots")) {
        listSnapshots = true;
      } else if (cmd.equals("-size-in-bytes")) {
        printSizeInBytes = true;
      } else if (cmd.equals("-h") || cmd.equals("--help")) {
        printUsageAndExit();
      } else {
        System.err.println("UNEXPECTED: " + cmd);
        printUsageAndExit();
      }
    } catch (Exception e) {
      printUsageAndExit(); // FindBugs: REC_CATCH_EXCEPTION
    }
  }

  // List Available Snapshots
  if (listSnapshots) {
    SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
    System.out.printf("%-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TABLE NAME");
    for (SnapshotDescription desc: getSnapshotList(conf)) {
      System.out.printf("%-20s | %20s | %s%n",
                        desc.getName(),
                        df.format(new Date(desc.getCreationTime())),
                        desc.getTable());
    }
    return 0;
  }

  if (snapshotName == null) {
    System.err.println("Missing snapshot name!");
    printUsageAndExit();
    return 1;
  }

  rootDir = FSUtils.getRootDir(conf);
  fs = FileSystem.get(rootDir.toUri(), conf);
  LOG.debug("fs=" + fs.getUri().toString() + " root=" + rootDir);

  // Load snapshot information
  if (!loadSnapshotInfo(snapshotName)) {
    System.err.println("Snapshot '" + snapshotName + "' not found!");
    return 1;
  }

  printInfo();
  if (showSchema) printSchema();
  printFiles(showFiles, showStats);

  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:80,代码来源:SnapshotInfo.java

示例9: main

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Main program
 *
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {

  // create a fsck object
  Configuration conf = HBaseConfiguration.create();
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
  HBaseFsck fsck = new HBaseFsck(conf);
  boolean fixHoles = false;

  // Process command-line args.
  for (int i = 0; i < args.length; i++) {
    String cmd = args[i];
    if (cmd.equals("-details")) {
      fsck.setDisplayFullReport();
    } else if (cmd.equals("-base")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -base needs an HDFS path.");
        printUsageAndExit();
      }
      // update hbase root dir to user-specified base
      i++;
      FSUtils.setRootDir(conf, new Path(args[i]));
      FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
    } else if (cmd.equals("-sidelineDir")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
        printUsageAndExit();
      }
      // set the hbck sideline dir to user-specified one
      i++;
      fsck.setSidelineDir(args[i]);
    } else if (cmd.equals("-fixHoles")) {
      fixHoles = true;
    } else if (cmd.equals("-fix")) {
      // make all fix options true
      fixHoles = true;
    } else {
      String str = "Unknown command line option : " + cmd;
      LOG.info(str);
      System.out.println(str);
      printUsageAndExit();
    }
  }

  System.out.println("OfflineMetaRepair command line options: " + StringUtils.join(args, " "));

  // Fsck doesn't shutdown and and doesn't provide a way to shutdown its
  // threads cleanly, so we do a System.exit.
  boolean success = false;
  try {
    success = fsck.rebuildMeta(fixHoles);
  } catch (MultipleIOException mioes) {
    for (IOException ioe : mioes.getExceptions()) {
      LOG.error("Bailed out due to:", ioe);
    }
  } catch (Exception e) {
    LOG.error("Bailed out due to: ", e);
  } finally {
    System.exit(success ? 0 : 1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:OfflineMetaRepair.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils.setFsDefault方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。