當前位置: 首頁>>代碼示例>>Java>>正文


Java FSDataInputStream.readUTF方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FSDataInputStream.readUTF方法的典型用法代碼示例。如果您正苦於以下問題:Java FSDataInputStream.readUTF方法的具體用法?Java FSDataInputStream.readUTF怎麽用?Java FSDataInputStream.readUTF使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FSDataInputStream的用法示例。


在下文中一共展示了FSDataInputStream.readUTF方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getModelFormat

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 * Read model row type
 *
 * @param modelDir model save directory
 * @return row type 0:sparse double, 1:dense double, 2:sparse int, 3:dense int, 4:dense float,
 * 5:sparse float, 7:sparse long key double
 */
public static int getModelFormat(String modelDir) throws IOException {
  Configuration conf = new Configuration();
  Path meteFilePath = new Path(new Path(modelDir), "meta");

  FileSystem fs = meteFilePath.getFileSystem(conf);
  if (!fs.exists(meteFilePath)) {
    throw new IOException("matrix meta file does not exist ");
  }

  FSDataInputStream input = fs.open(meteFilePath);

  try {
    input.readInt();
    input.readUTF();
    return input.readInt();
  } finally {
    input.close();
  }
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:27,代碼來源:ModelLoader.java

示例2: getJobSummary

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private String getJobSummary(FileContext fc, Path path) throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:HistoryFileManager.java

示例3: getJobSummary

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private static String getJobSummary(FileContext fc, Path path)
    throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:TestJobHistoryParsing.java

示例4: run

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
public int run(String[] args, PrintStream stream) throws Exception {
  out = stream;
  List<String> paths = parseArgs(args);
  if (paths.size() != 1) {
    errorln(USAGE);
    return E_USAGE;
  }
  println("Hadoop %s", getVersion());
  println("Compiled by %s on %s", getUser(), getDate());
  println("Compiled with protoc %s", getProtocVersion());
  println("From source with checksum %s", getSrcChecksum());


  Configuration conf = getConf();
  Path path = new Path(paths.get(0));
  FileSystem fs = path.getFileSystem(conf);

  println("Filesystem for %s is %s", path, fs);

  // examine the FS
  Configuration fsConf = fs.getConf();
  for (int i = 0; i < props.length; i++) {
    showProp(fsConf, (String) props[i][0], (Boolean) props[i][1]);
  }

  Path root = fs.makeQualified(new Path("/"));
  try (DurationInfo d = new DurationInfo(LOG,
      "Listing  %s", root)) {
    println("%s has %d entries", root, fs.listStatus(root).length);
  }

  String dirName = "dir-" + UUID.randomUUID();
  Path dir = new Path(root, dirName);
  try (DurationInfo d = new DurationInfo(LOG,
      "Creating a directory %s", dir)) {
    fs.mkdirs(dir);
  }
  try {
    Path file = new Path(dir, "file");
    try (DurationInfo d = new DurationInfo(LOG,
        "Creating a file %s", file)) {
      FSDataOutputStream data = fs.create(file, true);
      data.writeUTF(HELLO);
      data.close();
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Listing  %s", dir)) {
      fs.listFiles(dir, false);
    }

    try (DurationInfo d = new DurationInfo(LOG,
        "Reading a file %s", file)) {
      FSDataInputStream in = fs.open(file);
      String utf = in.readUTF();
      in.close();
      if (!HELLO.equals(utf)) {
        throw new IOException("Expected " + file + " to contain the text "
            + HELLO + " -but it has the text \"" + utf + "\"");
      }
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting file %s", file)) {
      fs.delete(file, true);
    }
  } finally {
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting directory %s", dir)) {
      try {
        fs.delete(dir, true);
      } catch (Exception e) {
        LOG.warn("When deleting {}: ", dir, e);
      }
    }


  }


  // Validate parameters.
  return SUCCESS;
}
 
開發者ID:steveloughran,項目名稱:cloudup,代碼行數:82,代碼來源:S3ADiag.java

示例5: testCopyDfsToDfsUpdateWithSkipCRC

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:75,代碼來源:TestCopyFiles.java


注:本文中的org.apache.hadoop.fs.FSDataInputStream.readUTF方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。