當前位置: 首頁>>代碼示例>>Java>>正文


Java CsvMapper類代碼示例

本文整理匯總了Java中com.fasterxml.jackson.dataformat.csv.CsvMapper的典型用法代碼示例。如果您正苦於以下問題:Java CsvMapper類的具體用法?Java CsvMapper怎麽用?Java CsvMapper使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


CsvMapper類屬於com.fasterxml.jackson.dataformat.csv包,在下文中一共展示了CsvMapper類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: readTruckEventsFromCsv

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
private MappingIterator<TruckEvent> readTruckEventsFromCsv(InputStream csvStream) throws IOException {
        CsvSchema bootstrap = CsvSchema.builder()
// driverId,truckId,eventTime,eventType,longitude,latitude,eventKey,correlationId,driverName,routeId,routeName,eventDate
                .addColumn("driverId", CsvSchema.ColumnType.NUMBER)
                .addColumn("truckId", CsvSchema.ColumnType.NUMBER)
                .addColumn("eventTime", CsvSchema.ColumnType.STRING)
                .addColumn("eventType", CsvSchema.ColumnType.STRING)
                .addColumn("longitude", CsvSchema.ColumnType.NUMBER)
                .addColumn("latitude", CsvSchema.ColumnType.NUMBER)
                .addColumn("eventKey", CsvSchema.ColumnType.STRING)
                .addColumn("correlationId", CsvSchema.ColumnType.NUMBER)
                .addColumn("driverName", CsvSchema.ColumnType.STRING)
                .addColumn("routeId", CsvSchema.ColumnType.NUMBER)
                .addColumn("routeName", CsvSchema.ColumnType.STRING)
                .addColumn("eventDate", CsvSchema.ColumnType.STRING)
//                .addColumn("miles", CsvSchema.ColumnType.NUMBER)
                .build().withHeader();

        CsvMapper csvMapper = new CsvMapper();
        return csvMapper.readerFor(TruckEvent.class).with(bootstrap).readValues(csvStream);
    }
 
開發者ID:hortonworks,項目名稱:registry,代碼行數:22,代碼來源:TruckEventsCsvConverter.java

示例2: testRunSorterFirstColumn

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterFirstColumn() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput1, 1, 2, 4, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true);
	}

	verifyCSV(testOutput, 1, 2, 4, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:23,代碼來源:CSVSorterTest.java

示例3: testRunSorterSecondColumn

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterSecondColumn() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput1, 1, 2, 4, mapper, schema);
	try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1), true);
	}

	verifyCSV(testOutput, 1, 2, 4, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:22,代碼來源:CSVSorterTest.java

示例4: testRunSorterSecondColumnThenFirst

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterSecondColumnThenFirst() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);

	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput3, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1, 0), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:23,代碼來源:CSVSorterTest.java

示例5: testRunSorterFirstColumnThenSecond

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterFirstColumnThenSecond() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput3, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:23,代碼來源:CSVSorterTest.java

示例6: testRunSorterTSV

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterTSV() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build();
	verifyCSV(testInput4, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:22,代碼來源:CSVSorterTest.java

示例7: testRunSorterTSVMultipleHeaderLines

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testRunSorterTSVMultipleHeaderLines() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build();
	verifyCSV(testInput5, 10, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput5, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 10, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 10, 2, 5, mapper, schema);
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:22,代碼來源:CSVSorterTest.java

示例8: testWriteFullCode

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
@Test
public final void testWriteFullCode()
	throws Exception
{
	List<String> headers = Arrays.asList("TestHeader1", "TestHeader2");
	List<List<String>> dataSource = Arrays.asList();
	// Or alternatively,
	// List<List<String>> dataSource = Arrays.asList(Arrays.asList("TestValue1", "TestValue2"));
	java.io.Writer writer = new StringWriter();
	CsvSchema.Builder builder = CsvSchema.builder();
	for (String nextHeader : headers) {
		builder = builder.addColumn(nextHeader);
	}
	CsvSchema schema = builder.setUseHeader(true).build();
	try (SequenceWriter csvWriter = new CsvMapper().writerWithDefaultPrettyPrinter().with(schema).forType(
			List.class).writeValues(writer);)
	{
		for (List<String> nextRow : dataSource) {
			csvWriter.write(nextRow);
		}
		// Check to see whether dataSource is empty 
		// and if so write a single empty list to trigger header output
		if (dataSource.isEmpty()) {
			csvWriter.write(Arrays.asList());
		}
	}
	System.out.println(writer.toString());
}
 
開發者ID:ansell,項目名稱:csvsum,代碼行數:29,代碼來源:CSVUtilTest.java

示例9: processForkFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
private void processForkFile(File f, Set<String> users, Set<String> projects) {
	logger.info("Processing "+f);

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubForkEvent> it = mapper.readerFor(GitHubForkEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubForkEvent cde = it.next();
			converterService.mapUserForkEvent(cde,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
	
	
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:19,代碼來源:GithubConverter.java

示例10: processCreateDeleteEntity

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
private void processCreateDeleteEntity(File f, Set<String> users, Set<String> projects) {
	logger.info("Processing "+f);

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubCreateDeleteEvent> it = mapper.readerFor(GitHubCreateDeleteEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubCreateDeleteEvent cde = it.next();
			converterService.mapUserCreateDeleteEvent(cde,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
	
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:18,代碼來源:GithubConverter.java

示例11: processPushEvents

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
private void processPushEvents(File f, Set<String> users, Set<String> projects) {
	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		long rows = 0;
		MappingIterator<GitHubPushEvent> it = mapper.readerFor(GitHubPushEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubPushEvent pe = it.next();
			if (pe.getShas() != null && pe.getShas().length() > 0) {
				String [] shas = pe.getShas().split(";");
				converterService.mapPushEvent(pe, users, projects, commit_shas, shas);
			}
			rows += 1;
			if (rows%10000 == 0) {
				logger.info("....read " + rows + " rows of push_events.csv");
				
			}
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    	
			
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:24,代碼來源:GithubConverter.java

示例12: processCommitCommentEvents

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
private void processCommitCommentEvents(File f, Set<String> users, Set<String> projects, Map<String,Long> commit_shas) {
	logger.info("Processing "+f + ", first for Commit messages... ");

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubCommitCommentEvent> it = mapper.readerFor(GitHubCommitCommentEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubCommitCommentEvent cde = it.next();
			converterService.mapCommitCommentEvent(cde,
					users, projects, commit_shas.get(cde.getProject() + "#" + cde.getSha()));
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:17,代碼來源:GithubConverter.java

示例13: processVersionHistoryFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
/**
 * Parses a pypi_versions.csv file and calls converterService to process it.  
 * 
 * Example header plus one row:
 * 
 * project_owner,project_name,pypi_name,pypi_rawname,version,upload_time,python_version,filename
 * skwashd,python-acquia-cloud,acapi,acapi,0.4.1,2015-11-21 09:30:17,source,acapi-0.4.1.tar.gz
 * 
 * @param filename to process
 */
private void processVersionHistoryFile(File file) {
	logger.info("Processing " + file);
	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<RevisionEvent> it = mapper.readerFor(RevisionEvent.class).with(schema).readValues(in);
		boolean first = true;
		while (it.hasNextValue()) {
			RevisionEvent revision = it.next();
			//logger.info("Version: " + revision.getProjectFullName() + ", " + revision.getPypiName() + "/" + revision.getVersion() + " " + revision.getUploadTime());
			converterService.mapVersionInfo(
					revision.getProjectFullName(),
					revision.getPypiName(),
					revision.getVersion(), revision.getFilename() + "?" + revision.getPythonVersion(),
					revision.getUploadTime()
					);
		}
	} catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}  
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:32,代碼來源:GithubConverter.java

示例14: processWatchEvent

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
/**
 * Parses a CSV file listing who watched what project when, 
 * binds its contents to a GitHubWatcherList object,
 * and passes it on to the DiscourseDB converter
 *
 * File format example:
 * 
 * actor,project,created_at
 * F21,danielstjules/Stringy,2015-01-01T00:01:53Z
    * radlws,tomchristie/django-rest-framework,2015-01-01T00:05:29Z
    * 
 * @param file a dataset file to process
 */
private void processWatchEvent(File file, Set<String> users, Set<String> projects){
	logger.info("Processing "+file);

	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubWatchEvent> it = mapper.readerFor(GitHubWatchEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubWatchEvent gwl = it.next();
			converterService.mapUserRepoEvent(
					gwl.getActor(), gwl.getProject(), gwl.getCreatedAt(),
					DiscoursePartInteractionTypes.WATCH,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}    		
	
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:33,代碼來源:GithubConverter.java

示例15: processPullShasFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //導入依賴的package包/類
/**
 * Parses a CSV file listing which pull requests contained which
 * commits (by SHA),
 * and passes it on to the DiscourseDB converter
 *
 * File format example:
 * 
 * (fix me)
    * 
 * @param file a dataset file to process
 */
private void processPullShasFile(File file, Set<String> users, Set<String> projects, Map<String,Long> commit_shas){
	logger.info("Processing "+file);

	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubPullReqCommits> it = mapper.readerFor(GitHubPullReqCommits.class).with(schema).readValues(in);
		long row=0;
		while (it.hasNextValue()) {
			GitHubPullReqCommits prc = it.next();
			converterService.mapPullRequestCommits(prc, users, projects, commit_shas);
			row += 1;
			if (row%10000 == 0) {
				logger.info("pullShasFile row " + row + " out of about 46,000,000");
			}					
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}    		
	
}
 
開發者ID:DiscourseDB,項目名稱:discoursedb-core,代碼行數:33,代碼來源:GithubConverter.java


注:本文中的com.fasterxml.jackson.dataformat.csv.CsvMapper類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。