当前位置: 首页>>代码示例>>Java>>正文


Java Reader.createScanner方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner方法的典型用法代码示例。如果您正苦于以下问题:Java Reader.createScanner方法的具体用法?Java Reader.createScanner怎么用?Java Reader.createScanner使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.file.tfile.TFile.Reader的用法示例。


在下文中一共展示了Reader.createScanner方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: unsortedWithSomeCodec

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:TestTFile.java

示例2: testFailureNegativeOffset

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testFailureNegativeOffset() throws IOException {
  if (skip)
    return;
  writeRecords(2, true, true);

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  byte[] buf = new byte[K];
  try {
    scanner.entry().getKey(buf, -1);
    fail("Failed to handle key negative offset.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  finally {
  }
  scanner.close();
  reader.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:TestTFileStreams.java

示例3: testFailureReadValueManyTimes

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testFailureReadValueManyTimes() throws IOException {
  if (skip)
    return;
  writeRecords(5);

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();

  byte[] vbuf = new byte[BUF_SIZE];
  int vlen = scanner.entry().getValueLength();
  scanner.entry().getValue(vbuf);
  Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + 0);
  try {
    scanner.entry().getValue(vbuf);
    Assert.fail("Cannot get the value mlutiple times.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }

  scanner.close();
  reader.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:TestTFileByteArrays.java

示例4: testFailureNegativeOffset_2

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testFailureNegativeOffset_2() throws IOException {
  if (skip)
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  try {
    scanner.lowerBound("keyX".getBytes(), -1, 4);
    Assert.fail("Error on handling negative offset.");
  } catch (Exception e) {
    // noop, expecting exceptions
  } finally {
    reader.close();
    scanner.close();
  }
  closeOutput();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:TestTFileByteArrays.java

示例5: testFailureNegativeOffset

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
public void testFailureNegativeOffset() throws IOException {
  if (skip)
    return;
  writeRecords(2, true, true);

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  byte[] buf = new byte[K];
  try {
    scanner.entry().getKey(buf, -1);
    Assert.fail("Failed to handle key negative offset.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  finally {
  }
  scanner.close();
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestTFileStreams.java

示例6: testFailureNegativeLength_2

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testFailureNegativeLength_2() throws IOException {
  if (skip)
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  try {
    scanner.lowerBound("keyX".getBytes(), 0, -1);
    Assert.fail("Error on handling negative length.");
  } catch (Exception e) {
    // noop, expecting exceptions
  } finally {
    scanner.close();
    reader.close();
  }
  closeOutput();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestTFileByteArrays.java

示例7: testScanRange

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
public void testScanRange() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  Scanner scanner = reader.createScanner();

  try {

    // read key and value
    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");

    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");

    scanner.advance();

    // now try get value first
    vbuf = new byte[BUF_SIZE];
    vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");

    kbuf = new byte[BUF_SIZE];
    klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestTFileUnsortedByteArrays.java

示例8: testScan

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
public void testScan() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  Scanner scanner = reader.createScanner();

  try {

    // read key and value
    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");

    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");

    scanner.advance();

    // now try get value first
    vbuf = new byte[BUF_SIZE];
    vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");

    kbuf = new byte[BUF_SIZE];
    klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestTFileUnsortedByteArrays.java

示例9: testLocate

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testLocate() throws IOException {
  if (skip)
    return;
  writeRecords(3 * records1stBlock);
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  locate(scanner, composeSortedKey(KEY, 2).getBytes());
  locate(scanner, composeSortedKey(KEY, records1stBlock - 1).getBytes());
  locate(scanner, composeSortedKey(KEY, records1stBlock).getBytes());
  Location locX = locate(scanner, "keyX".getBytes());
  Assert.assertEquals(scanner.endLocation, locX);
  scanner.close();
  reader.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:16,代码来源:TestTFileByteArrays.java

示例10: readRecords

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
static void readRecords(FileSystem fs, Path path, int count,
    Configuration conf) throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();

  try {
    for (int nx = 0; nx < count; nx++, scanner.advance()) {
      Assert.assertFalse(scanner.atEnd());
      // Assert.assertTrue(scanner.next());

      byte[] kbuf = new byte[BUF_SIZE];
      int klen = scanner.entry().getKeyLength();
      scanner.entry().getKey(kbuf);
      Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
          nx));

      byte[] vbuf = new byte[BUF_SIZE];
      int vlen = scanner.entry().getValueLength();
      scanner.entry().getValue(vbuf);
      Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + nx);
    }

    Assert.assertTrue(scanner.atEnd());
    Assert.assertFalse(scanner.advance());
  } finally {
    scanner.close();
    reader.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:TestTFileByteArrays.java

示例11: testScan

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testScan() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  Scanner scanner = reader.createScanner();

  try {

    // read key and value
    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");

    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");

    scanner.advance();

    // now try get value first
    vbuf = new byte[BUF_SIZE];
    vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");

    kbuf = new byte[BUF_SIZE];
    klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:41,代码来源:TestTFileUnsortedByteArrays.java

示例12: checkBlockIndex

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
private void checkBlockIndex(int recordIndex, int blockIndexExpected) throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  scanner.seekTo(composeSortedKey(KEY, recordIndex).getBytes());
  Assert.assertEquals(blockIndexExpected, scanner.currentLocation
      .getBlockIndex());
  scanner.close();
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestTFileByteArrays.java

示例13: seekTFile

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestTFileSeek.java

示例14: testNoDataEntry

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
@Test
public void testNoDataEntry() throws IOException {
  if (skip) 
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertTrue(reader.isSorted());
  Scanner scanner = reader.createScanner();
  Assert.assertTrue(scanner.atEnd());
  scanner.close();
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestTFileByteArrays.java

示例15: seekTFile

import org.apache.hadoop.io.file.tfile.TFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.getBytes(), 0, key.getLength());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getLength();
      totalBytes += val.getLength();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestTFileSeek.java


注:本文中的org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。