當前位置: 首頁>>代碼示例>>Java>>正文


Java MapFile.Reader方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.MapFile.Reader方法的典型用法代碼示例。如果您正苦於以下問題:Java MapFile.Reader方法的具體用法?Java MapFile.Reader怎麽用?Java MapFile.Reader使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.MapFile的用法示例。


在下文中一共展示了MapFile.Reader方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: codecTestMapFile

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:19,代碼來源:TestCodec.java

示例2: getReaders

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
    Configuration conf) throws IOException {
  FileSystem fs = dir.getFileSystem(conf);
  PathFilter filter = new PathFilter() {
    @Override
    public boolean accept(Path path) {
      String name = path.getName();
      if (name.startsWith("_") || name.startsWith("."))
        return false;
      return true;
    }
  };
  Path[] names = FileUtil.stat2Paths(fs.listStatus(dir, filter));

  // sort names, so that hash partitioning works
  Arrays.sort(names);
  
  MapFile.Reader[] parts = new MapFile.Reader[names.length];
  for (int i = 0; i < names.length; i++) {
    parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
  }
  return parts;
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:25,代碼來源:MapFileOutputFormat.java

示例3: getSharedMapFile

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
public static final IndexedMapFile getSharedMapFile(String symbol, JobConf job) throws IOException {
	
	int slots = job.getInt(symbol, 0);
	
	if (slots <=0) {
		log.error("slots number should be no less than 1 !!!");
		System.exit(-1);
	}
	
	FileSystem fs = FileSystem.getLocal(job);
	MapFile.Reader[] readers = new MapFile.Reader[slots];
	for (int i=0; i<slots; i++) {
		String symbfile = fs.getWorkingDirectory().toString() + "/" + symbol + "-" + Integer.toString(i);
		readers[i] = new MapFile.Reader(fs, symbfile, job);
	}
	
	return new IndexedMapFile(slots, readers);
}
 
開發者ID:thrill,項目名稱:fst-bench,代碼行數:19,代碼來源:Utils.java

示例4: getMapRecords

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
private List<Writable> getMapRecords(Path dir, Text key) throws Exception {
  MapFile.Reader[] readers = MapFileOutputFormat.getReaders(fs, dir,
      getConf());
  ArrayList<Writable> res = new ArrayList<Writable>();
  Class<?> keyClass = readers[0].getKeyClass();
  Class<?> valueClass = readers[0].getValueClass();
  if (!keyClass.getName().equals("org.apache.hadoop.io.Text"))
    throw new IOException("Incompatible key (" + keyClass.getName() + ")");
  Writable value = (Writable) valueClass.newInstance();
  // we don't know the partitioning schema
  for (int i = 0; i < readers.length; i++) {
    if (readers[i].get(key, value) != null) {
      res.add(value);
      value = (Writable) valueClass.newInstance();
      Text aKey = (Text) keyClass.newInstance();
      while (readers[i].next(aKey, value) && aKey.equals(key)) {
        res.add(value);
        value = (Writable) valueClass.newInstance();
      }
    }
    readers[i].close();
  }
  return res;
}
 
開發者ID:jorcox,項目名稱:GeoCrawler,代碼行數:25,代碼來源:SegmentReader.java

示例5: MapFileReader

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
public MapFileReader(List<String> paths, IndexToKey indexToKey, Class<? extends Writable> recordClass)
                throws IOException {

    this.indexToKey = indexToKey;
    this.recordClass = recordClass;
    this.readers = new MapFile.Reader[paths.size()];

    SequenceFile.Reader.Option[] opts = new SequenceFile.Reader.Option[0];

    Configuration config = new Configuration();
    for (int i = 0; i < paths.size(); i++) {
        readers[i] = new MapFile.Reader(new Path(paths.get(i)), config, opts);
        if (readers[i].getValueClass() != recordClass) {
            throw new UnsupportedOperationException("MapFile record class: " + readers[i].getValueClass()
                            + ", but got class " + recordClass + ", path = " + paths.get(i));
        }
    }

    recordIndexesEachReader = indexToKey.initialize(readers, recordClass);
}
 
開發者ID:deeplearning4j,項目名稱:DataVec,代碼行數:21,代碼來源:MapFileReader.java

示例6: testMapWriteTextWithKey

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
@Test
public void testMapWriteTextWithKey() throws Exception {
    if (!canTest()) {
        return;
    }
    String txtKey = "THEKEY";
    String txtValue = "CIAO MONDO !";
    template.sendBodyAndHeader("direct:write_text3", txtValue, "KEY", txtKey);

    Configuration conf = new Configuration();
    Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text3");
    FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
    MapFile.Reader reader = new MapFile.Reader(fs1, "file:///" + TEMP_DIR.toUri() + "/test-camel-text3", conf);
    Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    assertEquals(key.toString(), txtKey);
    assertEquals(value.toString(), txtValue);

    IOHelper.close(reader);
}
 
開發者ID:HydAu,項目名稱:Camel,代碼行數:22,代碼來源:HdfsProducerTest.java

示例7: testMapWriteTextWithKey

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
@Test
public void testMapWriteTextWithKey() throws Exception {
    if (!canTest()) {
        return;
    }
    String txtKey = "THEKEY";
    String txtValue = "CIAO MONDO !";
    template.sendBodyAndHeader("direct:write_text3", txtValue, "KEY", txtKey);

    Configuration conf = new Configuration();
    MapFile.Reader reader = new MapFile.Reader(new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text3"), conf);
    Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    assertEquals(key.toString(), txtKey);
    assertEquals(value.toString(), txtValue);

    IOHelper.close(reader);
}
 
開發者ID:HydAu,項目名稱:Camel,代碼行數:20,代碼來源:HdfsProducerTest.java

示例8: loadLSMapFile

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
public static LocalStructureCylinder [][] loadLSMapFile(Configuration conf) {

		String name = conf.get(Util.MAPFILENAMEPROPERTY, Util.MAPFILEDEFAULTNAME);
		MapFile.Reader lsmapfile = Util.createMapFileReader(conf, name);

		LocalStructureCylinder [][] result = null;

		WritableComparable<?> key = (WritableComparable<?>) ReflectionUtils.newInstance(lsmapfile.getKeyClass(), conf);

		LSCylinderArray value = (LSCylinderArray) ReflectionUtils.newInstance(lsmapfile.getValueClass(), conf);

		try {
			while(lsmapfile.next(key, value)) {
				result = (LocalStructureCylinder [][]) ArrayUtils.add(result,
						Arrays.copyOf(value.get(), value.get().length, LocalStructureCylinder[].class));
			}
		} catch (Exception e) {
			System.err.println("LocalStructureCylinder.loadLSMapFile: unable to read fingerprint "
					+ key + " in MapFile " + name + ": " + e.getMessage());
			e.printStackTrace();
		}

		IOUtils.closeStream(lsmapfile);

		return result;		
	}
 
開發者ID:dperaltac,項目名稱:bigdata-fingerprint,代碼行數:27,代碼來源:PartialScoreLSSR.java

示例9: codecTestMapFile

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(GenericTestUtils.getTempPath(
      clazz.getSimpleName() + "-" + type + "-" + records));

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:18,代碼來源:TestCodec.java

示例10: runHadoopGetNames

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
private static void runHadoopGetNames(String in) throws IOException {
	Configuration conf = new Configuration();
	FileSystem fs = FileSystem.get(conf);

	if (!fs.exists(new Path(in))) {
		System.out.println("Error: Hdfs file " + in + " does not exist!");
		System.exit(-1);
	}

	MapFile.Reader reader = null;

	try {
		reader = new MapFile.Reader(fs, in, conf);
		Text key = (Text) reader.getKeyClass().newInstance();
		BytesWritable value = (BytesWritable) reader.getValueClass().newInstance();
		while (reader.next(key, value)) {
			System.out.println(key.toString());
		}
	} catch (Exception e) {
		e.printStackTrace();
		if (reader != null)	reader.close();
	}


}
 
開發者ID:opencb,項目名稱:hpg-pore,代碼行數:26,代碼來源:Fast5NamesCmd.java

示例11: loadLSMapFile

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
public static LocalStructureJiang [][] loadLSMapFile(Configuration conf) {

		String name = conf.get(Util.MAPFILENAMEPROPERTY, Util.MAPFILEDEFAULTNAME);
    	MapFile.Reader lsmapfile = Util.createMapFileReader(conf, name);
    	
    	LocalStructureJiang [][] result = null;

		WritableComparable<?> key = (WritableComparable<?>) ReflectionUtils.newInstance(lsmapfile.getKeyClass(), conf);

		LSJiangArray value = (LSJiangArray) ReflectionUtils.newInstance(lsmapfile.getValueClass(), conf);
		
		try {
			while(lsmapfile.next(key, value)) {
				result = (LocalStructureJiang [][]) ArrayUtils.add(result,
						Arrays.copyOf(value.get(), value.get().length, LocalStructureJiang[].class));
			}
		} catch (Exception e) {
			System.err.println("LocalStructureJiang.loadLSMapFile: unable to read fingerprint "
					+ key + " in MapFile " + name + ": " + e.getMessage());
			e.printStackTrace();
		}
		
		IOUtils.closeStream(lsmapfile);
		
		return result;		
	}
 
開發者ID:dperaltac,項目名稱:bigdata-fingerprint,代碼行數:27,代碼來源:LocalStructureJiang.java

示例12: getReaders

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(FileSystem ignored, Path dir,
                                          Configuration conf)
    throws IOException {
  return org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat.
    getReaders(dir, conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:MapFileOutputFormat.java

示例13: getEntry

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
/** Get an entry from output generated by this class. */
public static <K extends WritableComparable, V extends Writable>
Writable getEntry(MapFile.Reader[] readers,
                                Partitioner<K, V> partitioner,
                                K key,
                                V value) throws IOException {
  int part = partitioner.getPartition(key, value, readers.length);
  return readers[part].get(key, value);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:MapFileOutputFormat.java

示例14: getReaders

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
    Configuration conf) throws IOException {
  FileSystem fs = dir.getFileSystem(conf);
  Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));

  // sort names, so that hash partitioning works
  Arrays.sort(names);
  
  MapFile.Reader[] parts = new MapFile.Reader[names.length];
  for (int i = 0; i < names.length; i++) {
    parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
  }
  return parts;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:16,代碼來源:MapFileOutputFormat.java

示例15: getEntry

import org.apache.hadoop.io.MapFile; //導入方法依賴的package包/類
/** Get an entry from output generated by this class. */
public static <K extends WritableComparable<?>, V extends Writable>
    Writable getEntry(MapFile.Reader[] readers, 
    Partitioner<K, V> partitioner, K key, V value) throws IOException {
  int part = partitioner.getPartition(key, value, readers.length);
  return readers[part].get(key, value);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:MapFileOutputFormat.java


注:本文中的org.apache.hadoop.io.MapFile.Reader方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。