当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.writeUTF方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeUTF方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeUTF方法的具体用法?Java FSDataOutputStream.writeUTF怎么用?Java FSDataOutputStream.writeUTF使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.writeUTF方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testVersion

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testVersion() throws DeserializationException, IOException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  final FileSystem fs = htu.getTestFileSystem();
  final Path rootdir = htu.getDataTestDir();
  assertNull(FSUtils.getVersion(fs, rootdir));
  // Write out old format version file.  See if we can read it in and convert.
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FSDataOutputStream s = fs.create(versionFile);
  final String version = HConstants.FILE_SYSTEM_VERSION;
  s.writeUTF(version);
  s.close();
  assertTrue(fs.exists(versionFile));
  FileStatus [] status = fs.listStatus(versionFile);
  assertNotNull(status);
  assertTrue(status.length > 0);
  String newVersion = FSUtils.getVersion(fs, rootdir);
  assertEquals(version.length(), newVersion.length());
  assertEquals(version, newVersion);
  // File will have been converted. Exercise the pb format
  assertEquals(version, FSUtils.getVersion(fs, rootdir));
  FSUtils.checkVersion(fs, rootdir, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestFSUtils.java

示例2: testRewritingClusterIdToPB

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testRewritingClusterIdToPB() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  TEST_UTIL.createRootDir();
  TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
  Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
  FSDataOutputStream s = null;
  try {
    s = fs.create(filePath);
    s.writeUTF(UUID.randomUUID().toString());
  } finally {
    if (s != null) {
      s.close();
    }
  }
  TEST_UTIL.startMiniHBaseCluster(1, 1);
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  assertEquals(1, master.getServerManager().getOnlineServersList().size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClusterId.java

示例3: addToLocalResources

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
																 String fileDstPath, String appId, Map<String, LocalResource> localResources,
																 String resources) throws IOException {
	String suffix =
			"prkeyrotation" + "/" + appId + "/" + fileDstPath;
	Path dst =
			new Path(fs.getHomeDirectory(), suffix);
	if (fileSrcPath == null) {
		FSDataOutputStream ostream = null;
		try {
			ostream = FileSystem
					.create(fs, dst, new FsPermission((short) 0710));
			ostream.writeUTF(resources);
		} finally {
			IOUtils.closeQuietly(ostream);
		}
	} else {
		fs.copyFromLocalFile(new Path(fileSrcPath), dst);
	}
	FileStatus scFileStatus = fs.getFileStatus(dst);
	LocalResource scRsrc =
			LocalResource.newInstance(
					ConverterUtils.getYarnUrlFromPath(dst),
					LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
					scFileStatus.getLen(), scFileStatus.getModificationTime());
	localResources.put(fileDstPath, scRsrc);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:28,代码来源:Client.java

示例4: writeGlobalCleanerPidFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * To ensure there are not multiple instances of the SCM running on a given
 * cluster, a global pid file is used. This file contains the hostname of the
 * machine that owns the pid file.
 *
 * @return true if the pid file was written, false otherwise
 * @throws YarnException
 */
private boolean writeGlobalCleanerPidFile() throws YarnException {
  String root =
      conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
          YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
  Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
  try {
    FileSystem fs = FileSystem.get(this.conf);

    if (fs.exists(pidPath)) {
      return false;
    }

    FSDataOutputStream os = fs.create(pidPath, false);
    // write the hostname and the process id in the global cleaner pid file
    final String ID = ManagementFactory.getRuntimeMXBean().getName();
    os.writeUTF(ID);
    os.close();
    // add it to the delete-on-exit to ensure it gets deleted when the JVM
    // exits
    fs.deleteOnExit(pidPath);
  } catch (IOException e) {
    throw new YarnException(e);
  }
  LOG.info("Created the global cleaner pid file at " + pidPath.toString());
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:CleanerService.java

示例5: addToLocalResources

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
    String fileDstPath, String appId, Map<String, LocalResource> localResources,
    String resources) throws IOException {
  String suffix =
      appName + "/" + appId + "/" + fileDstPath;
  Path dst =
      new Path(fs.getHomeDirectory(), suffix);
  if (fileSrcPath == null) {
    FSDataOutputStream ostream = null;
    try {
      ostream = FileSystem
          .create(fs, dst, new FsPermission((short) 0710));
      ostream.writeUTF(resources);
    } finally {
      IOUtils.closeQuietly(ostream);
    }
  } else {
    fs.copyFromLocalFile(new Path(fileSrcPath), dst);
  }
  FileStatus scFileStatus = fs.getFileStatus(dst);
  LocalResource scRsrc =
      LocalResource.newInstance(
          ConverterUtils.getYarnUrlFromURI(dst.toUri()),
          LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
          scFileStatus.getLen(), scFileStatus.getModificationTime());
  localResources.put(fileDstPath, scRsrc);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:Client.java

示例6: testFilesystemIsCaseSensitive

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Assert that a filesystem is case sensitive.
 * This is done by creating a mixed-case filename and asserting that
 * its lower case version is not there.
 *
 * @throws Exception failures
 */
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testFilesystemIsCaseSensitive() throws Exception {
  String mixedCaseFilename = "/test/UPPER.TXT";
  Path upper = path(mixedCaseFilename);
  Path lower = path(StringUtils.toLowerCase(mixedCaseFilename));
  assertFalse("File exists" + upper, fs.exists(upper));
  assertFalse("File exists" + lower, fs.exists(lower));
  FSDataOutputStream out = fs.create(upper);
  out.writeUTF("UPPER");
  out.close();
  FileStatus upperStatus = fs.getFileStatus(upper);
  assertExists("Original upper case file" + upper, upper);
  //verify the lower-case version of the filename doesn't exist
  assertPathDoesNotExist("lower case file", lower);
  //now overwrite the lower case version of the filename with a
  //new version.
  out = fs.create(lower);
  out.writeUTF("l");
  out.close();
  assertExists("lower case file", lower);
  //verifEy the length of the upper file hasn't changed
  assertExists("Original upper case file " + upper, upper);
  FileStatus newStatus = fs.getFileStatus(upper);
  assertEquals("Expected status:" + upperStatus
          + " actual status " + newStatus,
          upperStatus.getLen(),
          newStatus.getLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestSwiftFileSystemExtendedContract.java

示例7: run

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public int run(String[] args, PrintStream stream) throws Exception {
  out = stream;
  List<String> paths = parseArgs(args);
  if (paths.size() != 1) {
    errorln(USAGE);
    return E_USAGE;
  }
  println("Hadoop %s", getVersion());
  println("Compiled by %s on %s", getUser(), getDate());
  println("Compiled with protoc %s", getProtocVersion());
  println("From source with checksum %s", getSrcChecksum());


  Configuration conf = getConf();
  Path path = new Path(paths.get(0));
  FileSystem fs = path.getFileSystem(conf);

  println("Filesystem for %s is %s", path, fs);

  // examine the FS
  Configuration fsConf = fs.getConf();
  for (int i = 0; i < props.length; i++) {
    showProp(fsConf, (String) props[i][0], (Boolean) props[i][1]);
  }

  Path root = fs.makeQualified(new Path("/"));
  try (DurationInfo d = new DurationInfo(LOG,
      "Listing  %s", root)) {
    println("%s has %d entries", root, fs.listStatus(root).length);
  }

  String dirName = "dir-" + UUID.randomUUID();
  Path dir = new Path(root, dirName);
  try (DurationInfo d = new DurationInfo(LOG,
      "Creating a directory %s", dir)) {
    fs.mkdirs(dir);
  }
  try {
    Path file = new Path(dir, "file");
    try (DurationInfo d = new DurationInfo(LOG,
        "Creating a file %s", file)) {
      FSDataOutputStream data = fs.create(file, true);
      data.writeUTF(HELLO);
      data.close();
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Listing  %s", dir)) {
      fs.listFiles(dir, false);
    }

    try (DurationInfo d = new DurationInfo(LOG,
        "Reading a file %s", file)) {
      FSDataInputStream in = fs.open(file);
      String utf = in.readUTF();
      in.close();
      if (!HELLO.equals(utf)) {
        throw new IOException("Expected " + file + " to contain the text "
            + HELLO + " -but it has the text \"" + utf + "\"");
      }
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting file %s", file)) {
      fs.delete(file, true);
    }
  } finally {
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting directory %s", dir)) {
      try {
        fs.delete(dir, true);
      } catch (Exception e) {
        LOG.warn("When deleting {}: ", dir, e);
      }
    }


  }


  // Validate parameters.
  return SUCCESS;
}
 
开发者ID:steveloughran,项目名称:cloudup,代码行数:82,代码来源:S3ADiag.java

示例8: testCopyDfsToDfsUpdateWithSkipCRC

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:75,代码来源:TestCopyFiles.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.writeUTF方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。