當前位置: 首頁>>代碼示例>>Java>>正文


Java Writer.close方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.SequenceFile.Writer.close方法的典型用法代碼示例。如果您正苦於以下問題:Java Writer.close方法的具體用法?Java Writer.close怎麽用?Java Writer.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.SequenceFile.Writer的用法示例。


在下文中一共展示了Writer.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testJavaSerialization

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
@Test
public void testJavaSerialization() throws Exception {
  Path file = new Path(System.getProperty("test.build.data",".") +
      "/testseqser.seq");
  
  fs.delete(file, true);
  Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
      String.class);
  
  writer.append(1L, "one");
  writer.append(2L, "two");
  
  writer.close();
  
  Reader reader = new Reader(fs, file, conf);
  assertEquals(1L, reader.next((Object) null));
  assertEquals("one", reader.getCurrentValue((Object) null));
  assertEquals(2L, reader.next((Object) null));
  assertEquals("two", reader.getCurrentValue((Object) null));
  assertNull(reader.next((Object) null));
  reader.close();
  
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:24,代碼來源:TestSequenceFileSerialization.java

示例2: testJavaSerialization

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
public void testJavaSerialization() throws Exception {
  Path file = new Path(System.getProperty("test.build.data",".") +
      "/testseqser.seq");
  
  fs.delete(file, true);
  Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
      String.class);
  
  writer.append(1L, "one");
  writer.append(2L, "two");
  
  writer.close();
  
  Reader reader = new Reader(fs, file, conf);
  assertEquals(1L, reader.next((Object) null));
  assertEquals("one", reader.getCurrentValue((Object) null));
  assertEquals(2L, reader.next((Object) null));
  assertEquals("two", reader.getCurrentValue((Object) null));
  assertNull(reader.next((Object) null));
  reader.close();
  
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:TestSequenceFileSerialization.java

示例3: testJavaSerialization

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
public void testJavaSerialization() throws Exception {
  Path file = new Path(System.getProperty("test.build.data",".") +
      "/test.seq");
  
  fs.delete(file, true);
  Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
      String.class);
  
  writer.append(1L, "one");
  writer.append(2L, "two");
  
  writer.close();
  
  Reader reader = new Reader(fs, file, conf);
  assertEquals(1L, reader.next((Object) null));
  assertEquals("one", reader.getCurrentValue((Object) null));
  assertEquals(2L, reader.next((Object) null));
  assertEquals("two", reader.getCurrentValue((Object) null));
  assertNull(reader.next((Object) null));
  reader.close();
  
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:23,代碼來源:TestSequenceFileSerialization.java

示例4: testJavaSerialization

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
@Test
public void testJavaSerialization() throws Exception {
  Path file = new Path(GenericTestUtils.getTempPath("testseqser.seq"));
  
  fs.delete(file, true);
  Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
      String.class);
  
  writer.append(1L, "one");
  writer.append(2L, "two");
  
  writer.close();
  
  Reader reader = new Reader(fs, file, conf);
  assertEquals(1L, reader.next((Object) null));
  assertEquals("one", reader.getCurrentValue((Object) null));
  assertEquals(2L, reader.next((Object) null));
  assertEquals("two", reader.getCurrentValue((Object) null));
  assertNull(reader.next((Object) null));
  reader.close();
  
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:23,代碼來源:TestSequenceFileSerialization.java

示例5: storeGenerations

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
private synchronized void storeGenerations() throws IOException {
  FileSystem fileSystem = _path.getFileSystem(_configuration);
  FileStatus[] listStatus = fileSystem.listStatus(_path);
  SortedSet<FileStatus> existing = new TreeSet<FileStatus>(Arrays.asList(listStatus));
  long currentFile;
  if (!existing.isEmpty()) {
    FileStatus last = existing.last();
    currentFile = Long.parseLong(last.getPath().getName());
  } else {
    currentFile = 0;
  }
  Path path = new Path(_path, buffer(currentFile + 1));
  LOG.info("Creating new snapshot file [{0}]", path);
  FSDataOutputStream outputStream = fileSystem.create(path, false);
  Writer writer = SequenceFile.createWriter(_configuration, outputStream, Text.class, LongWritable.class,
      CompressionType.NONE, null);
  for (Entry<String, Long> e : _namesToGenerations.entrySet()) {
    writer.append(new Text(e.getKey()), new LongWritable(e.getValue()));
  }
  writer.close();
  outputStream.close();
  cleanupOldFiles(fileSystem, existing);
}
 
開發者ID:apache,項目名稱:incubator-blur,代碼行數:24,代碼來源:SnapshotIndexDeletionPolicy.java

示例6: testAppendRecordCompression

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
@Test(timeout = 30000)
public void testAppendRecordCompression() throws Exception {
  GenericTestUtils.assumeInNativeProfile();

  Path file = new Path(ROOT_PATH, "testseqappendblockcompr.seq");
  fs.delete(file, true);

  Option compressOption = Writer.compression(CompressionType.RECORD,
      new GzipCodec());
  Writer writer = SequenceFile.createWriter(conf,
      SequenceFile.Writer.file(file),
      SequenceFile.Writer.keyClass(Long.class),
      SequenceFile.Writer.valueClass(String.class), compressOption);

  writer.append(1L, "one");
  writer.append(2L, "two");
  writer.close();

  verify2Values(file);

  writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
      SequenceFile.Writer.keyClass(Long.class),
      SequenceFile.Writer.valueClass(String.class),
      SequenceFile.Writer.appendIfExists(true), compressOption);

  writer.append(3L, "three");
  writer.append(4L, "four");
  writer.close();

  verifyAll4Values(file);

  fs.deleteOnExit(file);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:34,代碼來源:TestSequenceFileAppend.java

示例7: writeOutput

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
private void writeOutput(RemoteIterator<? extends FileStatus> input) throws IOException {
    Path outPath = new Path(output);
    if (distribFs.exists(outPath)) {
        throw new IllegalArgumentException("Output file already exists, Not overwriting it:" + output);
    }

    Writer writer = SequenceFile.createWriter(distribFs.getConf(),
            Writer.file(outPath),
            Writer.keyClass(Text.class),
            Writer.valueClass(BytesWritable.class),
            Writer.compression(SequenceFile.CompressionType.RECORD));
    Text key = new Text();
    BytesWritable value = new BytesWritable();
    long skipped = 0;
    long copied = 0;
    while (input.hasNext()) {
        FileStatus next = input.next();
        if (filter(next)) {
            key.set(next.getPath().toString());
            FSDataInputStream stream = localFs.open(next.getPath());
            //CAUTION : this could cause memory overflow
            byte[] bytes = IOUtils.toByteArray(stream);
            value.set(bytes, 0, bytes.length);
            writer.append(key, value);
            copied++;
        } else {
            skipped++;
        }
    }
    writer.close();
    System.out.println("Files copied ::" + copied);
    System.out.println("Files skipped ::" + skipped);
}
 
開發者ID:thammegowda,項目名稱:tika-dl4j-spark-imgrec,代碼行數:34,代碼來源:Local2SeqFile.java

示例8: testSequenceFileSync

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
/** Test hsync via SequenceFiles */
@Test
public void testSequenceFileSync() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  final FileSystem fs = cluster.getFileSystem();
  final Path p = new Path("/testSequenceFileSync/foo");
  final int len = 1 << 16;
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  Writer w = SequenceFile.createWriter(new Configuration(),
      Writer.stream(out),
      Writer.keyClass(RandomDatum.class),
      Writer.valueClass(RandomDatum.class),
      Writer.compression(CompressionType.NONE, new DefaultCodec()));
  w.hflush();
  checkSyncMetric(cluster, 0);
  w.hsync();
  checkSyncMetric(cluster, 1);
  int seed = new Random().nextInt();
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  generator.next();
  w.append(generator.getKey(), generator.getValue());
  w.hsync();
  checkSyncMetric(cluster, 2);
  w.close();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:TestHSync.java

示例9: run

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
public int run(String[] args) throws IOException {
	if (args.length != 2) {
		System.err.println("USAGE: hadoop fr.liglab.mining.AsciiToSequenceFile INPUT OUTPUT");
	}
	
	FileSystem fs = FileSystem.get(getConf());
	Writer writer = new Writer(fs, getConf(), new Path(args[1]), NullWritable.class, TransactionWritable.class);
	
	NullWritable keyW = NullWritable.get();
	TransactionWritable valueW = new TransactionWritable();
	
	FileReader reader = new FileReader(args[0]);
	ItemsetsFactory factory = new ItemsetsFactory();
	
	while(reader.hasNext()) {
		TransactionReader source = reader.next();
		
		while(source.hasNext()) {
			factory.add(source.next());
		}
		
		valueW.set(factory.get());
		writer.append(keyW, valueW);
	}
	
	writer.close();
	reader.close();
	
	return 0;
}
 
開發者ID:slide-lig,項目名稱:TopPI,代碼行數:31,代碼來源:AsciiToSequenceFile.java

示例10: testSequenceFileSync

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
/**
 * Test hsync via SequenceFiles
 */
@Test
public void testSequenceFileSync() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  final FileSystem fs = cluster.getFileSystem();
  final Path p = new Path("/testSequenceFileSync/foo");
  final int len = 1 << 16;
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet
      .of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  Writer w = SequenceFile
      .createWriter(new Configuration(), Writer.stream(out),
          Writer.keyClass(RandomDatum.class),
          Writer.valueClass(RandomDatum.class),
          Writer.compression(CompressionType.NONE, new DefaultCodec()));
  w.hflush();
  checkSyncMetric(cluster, 0);
  w.hsync();
  checkSyncMetric(cluster, 1);
  int seed = new Random().nextInt();
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  generator.next();
  w.append(generator.getKey(), generator.getValue());
  w.hsync();
  checkSyncMetric(cluster, 2);
  w.close();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:36,代碼來源:TestHSync.java

示例11: testAppendSort

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
@Test(timeout = 30000)
public void testAppendSort() throws Exception {
  GenericTestUtils.assumeInNativeProfile();

  Path file = new Path(ROOT_PATH, "testseqappendSort.seq");
  fs.delete(file, true);

  Path sortedFile = new Path(ROOT_PATH, "testseqappendSort.seq.sort");
  fs.delete(sortedFile, true);

  SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
      new JavaSerializationComparator<Long>(), Long.class, String.class, conf);

  Option compressOption = Writer.compression(CompressionType.BLOCK,
      new GzipCodec());
  Writer writer = SequenceFile.createWriter(conf,
      SequenceFile.Writer.file(file),
      SequenceFile.Writer.keyClass(Long.class),
      SequenceFile.Writer.valueClass(String.class), compressOption);

  writer.append(2L, "two");
  writer.append(1L, "one");

  writer.close();

  writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
      SequenceFile.Writer.keyClass(Long.class),
      SequenceFile.Writer.valueClass(String.class),
      SequenceFile.Writer.appendIfExists(true), compressOption);

  writer.append(4L, "four");
  writer.append(3L, "three");
  writer.close();

  // Sort file after append
  sorter.sort(file, sortedFile);
  verifyAll4Values(sortedFile);

  fs.deleteOnExit(file);
  fs.deleteOnExit(sortedFile);
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:42,代碼來源:TestSequenceFileAppend.java

示例12: copyTo64MB

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
public static void copyTo64MB(String src, String dst) throws IOException {
    Configuration hconf = new Configuration();
    Path srcPath = new Path(src);
    Path dstPath = new Path(dst);

    FileSystem fs = FileSystem.get(hconf);
    long srcSize = fs.getFileStatus(srcPath).getLen();
    int copyTimes = (int) (67108864 / srcSize); // 64 MB
    System.out.println("Copy " + copyTimes + " times");

    Reader reader = new Reader(hconf, SequenceFile.Reader.file(srcPath));
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), hconf);
    Text value = new Text();

    Writer writer = SequenceFile.createWriter(hconf, Writer.file(dstPath), Writer.keyClass(key.getClass()), Writer.valueClass(Text.class), Writer.compression(CompressionType.BLOCK, getLZOCodec(hconf)));

    int count = 0;
    while (reader.next(key, value)) {
        for (int i = 0; i < copyTimes; i++) {
            writer.append(key, value);
            count++;
        }
    }

    System.out.println("Len: " + writer.getLength());
    System.out.println("Rows: " + count);

    reader.close();
    writer.close();
}
 
開發者ID:KylinOLAP,項目名稱:Kylin,代碼行數:31,代碼來源:CopySeq.java

示例13: generateData

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
private void generateData(String mrIncWorkingPathStr, String rowId, String recordId, String value) throws IOException {
  Path path = new Path(new Path(mrIncWorkingPathStr), "new");
  Writer writer = new SequenceFile.Writer(miniCluster.getFileSystem(), conf, new Path(path, UUID.randomUUID()
      .toString()), Text.class, BlurRecord.class);
  BlurRecord blurRecord = new BlurRecord();
  blurRecord.setRowId(rowId);
  blurRecord.setRecordId(recordId);
  blurRecord.setFamily("fam0");
  blurRecord.addColumn("col0", value);
  writer.append(new Text(rowId), blurRecord);
  writer.close();
}
 
開發者ID:apache,項目名稱:incubator-blur,代碼行數:13,代碼來源:DriverTest.java

示例14: getMrWorkingPathWriter

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
private org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getMrWorkingPathWriter(
    final Configuration configuration) throws IOException {
  PrivilegedExceptionAction<org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter> privilegedExceptionAction = new PrivilegedExceptionAction<org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter>() {
    @Override
    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter run() throws Exception {
      String workingPathStr = configuration.get(BlurConstants.BLUR_BULK_UPDATE_WORKING_PATH);
      Path workingPath = new Path(workingPathStr);
      Path tmpDir = new Path(workingPath, "tmp");
      FileSystem fileSystem = tmpDir.getFileSystem(configuration);
      String loadId = configuration.get(BlurSerDe.BLUR_MR_LOAD_ID);
      Path loadPath = new Path(tmpDir, loadId);
      final Writer writer = new SequenceFile.Writer(fileSystem, configuration, new Path(loadPath, UUID.randomUUID()
          .toString()), Text.class, BlurRecord.class);

      return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() {

        @Override
        public void write(Writable w) throws IOException {
          BlurRecord blurRecord = (BlurRecord) w;
          String rowId = blurRecord.getRowId();
          writer.append(new Text(rowId), blurRecord);
        }

        @Override
        public void close(boolean abort) throws IOException {
          writer.close();
        }
      };
    }
  };

  UserGroupInformation userGroupInformation = getUGI(configuration);
  try {
    return userGroupInformation.doAs(privilegedExceptionAction);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
開發者ID:apache,項目名稱:incubator-blur,代碼行數:39,代碼來源:BlurHiveOutputFormat.java

示例15: convertToSeq

import org.apache.hadoop.io.SequenceFile.Writer; //導入方法依賴的package包/類
public void convertToSeq (String inputPath, String outputPath) throws Exception {
	if (inputPath == null && outputPath == null) {
		System.err.println("ComplaintsCSVtoSeq::main(): Don't have input and output paths");
		return;
	}

	// We will read the respective input file and output directory given in above arguments by user
	String inputFile = inputPath;
	String outputDir = outputPath;
	Configuration configuration = new Configuration();
	FileSystem fileSystem = FileSystem.get(configuration);
	Writer writer = new SequenceFile.Writer(fileSystem, configuration, new Path(outputDir + "/chunk-0"),
			Text.class, Text.class);
	
	int count = 0;

	// We need to catch this exception
	try {
		BufferedReader reader = new BufferedReader(new FileReader(inputFile));

		// Text stores text using standard UTF8 encoding
		Text key = new Text();
		Text value = new Text();
		while(true) {
			String everyLine = reader.readLine();
			if (everyLine == null) {
				break;
			}

			// Here, I am splitting the data on a per line basis by commas
			String[] columns = everyLine.split(",", 3);
			if (columns.length != 3) {
				System.out.println("ComplaintsCSVtoSeq::main(): Invalid Line " + everyLine);
			}
			String productClassified = columns[0];
			String complaintId = columns[1];
			String customerIssue = columns[2];
			key.set("/" + productClassified + "/" + complaintId);
			value.set(customerIssue);
			writer.append(key, value);
			count++;
		}
		reader.close();
		writer.close();
		System.out.println("ComplaintsCSVtoSeq::main(): Wrote " + count + " entries to Seq File.");
	}
	catch(FileNotFoundException fe) { System.err.println("ComplaintsCSVtoSeq::convertToSeq(): File not Found"); }
}
 
開發者ID:Sapphirine,項目名稱:Customer-Complaint-Analyses,代碼行數:49,代碼來源:ComplaintsCSVtoSeq.java


注:本文中的org.apache.hadoop.io.SequenceFile.Writer.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。