当前位置: 首页>>代码示例>>Java>>正文


Java TFile类代码示例

本文整理汇总了Java中org.apache.hadoop.io.file.tfile.TFile的典型用法代码示例。如果您正苦于以下问题:Java TFile类的具体用法?Java TFile怎么用?Java TFile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TFile类属于org.apache.hadoop.io.file.tfile包,在下文中一共展示了TFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: next

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
/**
 * Read the next key and return the value-stream.
 * 
 * @param key
 * @return the valueStream if there are more keys or null otherwise.
 * @throws IOException
 */
public DataInputStream next(LogKey key) throws IOException {
  if (!this.atBeginning) {
    this.scanner.advance();
  } else {
    this.atBeginning = false;
  }
  if (this.scanner.atEnd()) {
    return null;
  }
  TFile.Reader.Scanner.Entry entry = this.scanner.entry();
  key.readFields(entry.getKeyStream());
  // Skip META keys
  if (RESERVED_KEYS.containsKey(key.toString())) {
    return next(key);
  }
  DataInputStream valueStream = entry.getValueStream();
  return valueStream;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:AggregatedLogFormat.java

示例2: getApplicationOwner

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
/**
 * Returns the owner of the application.
 * 
 * @return the application owner.
 * @throws IOException
 */
public String getApplicationOwner() throws IOException {
  TFile.Reader.Scanner ownerScanner = null;
  try {
    ownerScanner = reader.createScanner();
    LogKey key = new LogKey();
    while (!ownerScanner.atEnd()) {
      TFile.Reader.Scanner.Entry entry = ownerScanner.entry();
      key.readFields(entry.getKeyStream());
      if (key.toString().equals(APPLICATION_OWNER_KEY.toString())) {
        DataInputStream valueStream = entry.getValueStream();
        return valueStream.readUTF();
      }
      ownerScanner.advance();
    }
    return null;
  } finally {
    IOUtils.cleanup(LOG, ownerScanner);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:AggregatedLogFormat.java

示例3: writeTFile

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
public void writeTFile(Path file, String cname) throws Exception
{


  FSDataOutputStream fos = hdfs.create(file);

  TFile.Writer writer =
      new TFile.Writer(fos, blockSize, cname, "jclass:" +
      BytesWritable.Comparator.class.getName(), new Configuration());

  for (int i = 0; i < testSize; i++) {
    String k = getKey(i);
    String v = getValue();
    writer.append(k.getBytes(), v.getBytes());
  }

  writer.close();
  fos.close();
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:20,代码来源:HadoopFilePerformanceTest.java

示例4: testTFileWrite

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testTFileWrite() throws Exception
{
  Path file = Testfile.TFILE.filepath();
  logger.info("Writing {} with {} key/value pairs", file, String.format("%,d", testSize));

  startTimer();
  writeTFile(file, TFile.COMPRESSION_NONE);
  logger.info("Duration: {}",  stopTimer(Testfile.TFILE, "WRITE"));

  Assert.assertTrue(hdfs.exists(file));
  ContentSummary fileInfo = hdfs.getContentSummary(file);
  logger.debug("Space consumed: {} bytes in {} files",
      String.format("%,d", fileInfo.getSpaceConsumed()),
      String.format("%,d", fileInfo.getFileCount()));
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:17,代码来源:HadoopFilePerformanceTest.java

示例5: testTFileWriteGZ

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testTFileWriteGZ() throws Exception
{
  Path file = Testfile.TFILE_GZ.filepath();
  logger.info("Writing {} with {} key/value pairs", file, String.format("%,d", testSize));

  startTimer();
  writeTFile(file, TFile.COMPRESSION_GZ);
  logger.info("Duration: {}",  stopTimer(Testfile.TFILE_GZ, "WRITE"));

  Assert.assertTrue(hdfs.exists(file));
  ContentSummary fileInfo = hdfs.getContentSummary(file);
  logger.debug("Space consumed: {} bytes in {} files",
      String.format("%,d", fileInfo.getSpaceConsumed()),
      String.format("%,d", fileInfo.getFileCount()));
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:17,代码来源:HadoopFilePerformanceTest.java

示例6: testTFileRead

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testTFileRead() throws Exception
{

  Path file = Testfile.TFILE.filepath();
  logger.info("Reading {} with {} key/value pairs", file, String.format("%,d", testSize));
  writeTFile(file, TFile.COMPRESSION_NONE);

  startTimer();
  readTFileSeq(file);
  logger.info("Duration for scanner.next() SEQUENTIAL keys: {}", stopTimer(Testfile.TFILE, "READ-SEQ"));

  startTimer();
  readTFileSeqId(file);
  logger.info("Duration for scanner.seekTo(key) SEQUENTIAL keys: {}", stopTimer(Testfile.TFILE, "READ-SEQ-ID"));

  startTimer();
  readTFileRandom(file);
  logger.info("Duration for scanner.seekTo(key) RANDOM keys: {}", stopTimer(Testfile.TFILE, "READ-RAND"));

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:22,代码来源:HadoopFilePerformanceTest.java

示例7: testTFileReadGZ

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testTFileReadGZ() throws Exception
{

  Path file = Testfile.TFILE_GZ.filepath();
  logger.info("Reading {} with {} key/value pairs", file, String.format("%,d", testSize));
  writeTFile(file, TFile.COMPRESSION_GZ);

  startTimer();
  readTFileSeq(file);
  logger.info("Duration for scanner.next() SEQUENTIAL keys: {}", stopTimer(Testfile.TFILE_GZ, "READ-SEQ"));

  startTimer();
  readTFileSeqId(file);
  logger.info("Duration for scanner.seekTo(key) SEQUENTIAL keys: {}", stopTimer(Testfile.TFILE_GZ, "READ-SEQ-ID"));

  startTimer();
  readTFileRandom(file);
  logger.info("Duration for scanner.seekTo(key) RANDOM keys: {}",  stopTimer(Testfile.TFILE_GZ, "READ-RAND"));

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:22,代码来源:HadoopFilePerformanceTest.java

示例8: readTFileRandom

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
private void readTFileRandom(Path file) throws IOException
  {

    Random random = new Random();

    FSDataInputStream in = hdfs.open(file);
    long size = hdfs.getContentSummary(file).getLength();
    TFile.Reader reader = new TFile.Reader(in, size, new Configuration());
    Scanner scanner = reader.createScanner();
    scanner.rewind();

    for (int i = 0; i < testSize; i++) {
//      scanner.rewind();
      scanner.seekTo(getKey(random.nextInt(testSize)).getBytes());
//      Entry en = scanner.entry();
//      en.get(new BytesWritable(new byte[en.getKeyLength()]), new BytesWritable(new byte[en.getValueLength()]));
    }
    reader.close();


  }
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:22,代码来源:HadoopFilePerformanceTest.java

示例9: readTFileSeqId

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
private void readTFileSeqId(Path file) throws IOException
{

  FSDataInputStream in = hdfs.open(file);
  long size = hdfs.getContentSummary(file).getLength();
  TFile.Reader reader = new TFile.Reader(in, size, new Configuration());
  Scanner scanner = reader.createScanner();
  scanner.rewind();

  for (int i = 0; i < testSize; i++) {
    scanner.seekTo(getKey(i).getBytes());
    Entry en = scanner.entry();
    en.get(new BytesWritable(new byte[en.getKeyLength()]), new BytesWritable(new byte[en.getValueLength()]));
  }
  reader.close();

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:18,代码来源:HadoopFilePerformanceTest.java

示例10: readTFileSeq

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
private void readTFileSeq(Path file) throws IOException
{

  FSDataInputStream in = hdfs.open(file);
  long size = hdfs.getContentSummary(file).getLength();
  TFile.Reader reader = new TFile.Reader(in, size, new Configuration());
  Scanner scanner = reader.createScanner();
  scanner.rewind();
  do {
    Entry en = scanner.entry();
    en.get(new BytesWritable(new byte[en.getKeyLength()]), new BytesWritable(new byte[en.getValueLength()]));
  } while (scanner.advance() && !scanner.atEnd());

  reader.close();

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:17,代码来源:HadoopFilePerformanceTest.java

示例11: testDTFileRead

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testDTFileRead() throws Exception
{

  Path file = Testfile.DTFILE.filepath();
  logger.info("Reading {} with {} key/value pairs", file, String.format("%,d", testSize));
  writeTFile(file, TFile.COMPRESSION_NONE);

  startTimer();
  readDTFileSeq(file);
  logger.info("Duration for scanner.next() SEQUENTIAL keys: {}", stopTimer(Testfile.DTFILE, "READ-SEQ"));

  startTimer();
  readDTFileSeq(file);
  logger.info("Duration for scanner.seekTo(key) SEQUENTIAL keys: {}", stopTimer(Testfile.DTFILE, "READ-SEQ-ID"));

  startTimer();
  readDTFileRandom(file);
  logger.info("Duration for scanner.seekTo(key) RANDOM keys: {}", stopTimer(Testfile.DTFILE, "READ-RAND"));

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:22,代码来源:HadoopFilePerformanceTest.java

示例12: testDTFileReadGZ

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
@Test
public void testDTFileReadGZ() throws Exception
{

  Path file = Testfile.DTFILE_GZ.filepath();
  logger.info("Reading {} with {} key/value pairs", file, String.format("%,d", testSize));
  writeTFile(file, TFile.COMPRESSION_GZ);

  startTimer();
  readDTFileSeq(file);
  logger.info("Duration for scanner.next() SEQUENTIAL keys: {}", stopTimer(Testfile.DTFILE_GZ, "READ-SEQ"));

  startTimer();
  readDTFileSeqId(file);
  logger.info("Duration for scanner.seekTo(key) SEQUENTIAL keys: {}", stopTimer(Testfile.DTFILE_GZ, "READ-SEQ-ID"));

  startTimer();
  readDTFileRandom(file);
  logger.info("Duration for scanner.seekTo(key) RANDOM keys: {}",  stopTimer(Testfile.DTFILE_GZ, "READ-RAND"));

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:22,代码来源:HadoopFilePerformanceTest.java

示例13: next

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
/**
   * Read the next key and return the value-stream.
   *
   * @param key
   * @return the valueStream if there are more keys or null otherwise.
   * @throws IOException
   */
  public DataInputStream next(LogKey key) throws IOException {
    if (!this.atBeginning) {
      this.scanner.advance();
    } else {
      this.atBeginning = false;
    }
    if (this.scanner.atEnd()) {
      return null;
    }
    TFile.Reader.Scanner.Entry entry = this.scanner.entry();
    key.readFields(entry.getKeyStream());
//     Skip META keys
    if (RESERVED_KEYS.containsKey(key.toString())) {
      return next(key);
    }
    DataInputStream valueStream = entry.getValueStream();
    return valueStream;
  }
 
开发者ID:hopshadoop,项目名称:hopsworks,代码行数:26,代码来源:LogReader.java

示例14: HistoryFileWriter

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
public HistoryFileWriter(Path historyFile) throws IOException {
  if (fs.exists(historyFile)) {
    fsdos = fs.append(historyFile);
  } else {
    fsdos = fs.create(historyFile);
  }
  try {
    fs.setPermission(historyFile, HISTORY_FILE_UMASK);
    writer =
        new TFile.Writer(fsdos, MIN_BLOCK_SIZE, getConfig().get(
            YarnConfiguration.FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE,
            YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null,
            getConfig());
  } catch (IOException e) {
    IOUtils.cleanup(LOG, fsdos);
    throw e;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:19,代码来源:FileSystemApplicationHistoryStore.java

示例15: getScanner

import org.apache.hadoop.io.file.tfile.TFile; //导入依赖的package包/类
/**
 * @param path
 * @return
 * @throws IOException
 */
private TFile.Reader.Scanner getScanner(final Path path) throws IOException {
  LOG.log(Level.FINE, "Creating Scanner for path {0}", path);
  final TFile.Reader reader = new TFile.Reader(this.fileSystem.open(path),
      this.fileSystem.getFileStatus(path).getLen(),
      this.configuration);
  final TFile.Reader.Scanner scanner = reader.createScanner();
  for (int counter = 0;
       counter < 3 && !scanner.atEnd();
       counter += 1) {
    //skip VERSION, APPLICATION_ACL, and APPLICATION_OWNER
    scanner.advance();
  }
  LOG.log(Level.FINE, "Created Scanner for path {0}", path);
  return scanner;
}
 
开发者ID:apache,项目名称:reef,代码行数:21,代码来源:TFileParser.java


注:本文中的org.apache.hadoop.io.file.tfile.TFile类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。