当前位置: 首页>>代码示例>>Java>>正文


Java CsvMapper类代码示例

本文整理汇总了Java中com.fasterxml.jackson.dataformat.csv.CsvMapper的典型用法代码示例。如果您正苦于以下问题:Java CsvMapper类的具体用法?Java CsvMapper怎么用?Java CsvMapper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CsvMapper类属于com.fasterxml.jackson.dataformat.csv包,在下文中一共展示了CsvMapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readTruckEventsFromCsv

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
private MappingIterator<TruckEvent> readTruckEventsFromCsv(InputStream csvStream) throws IOException {
        CsvSchema bootstrap = CsvSchema.builder()
// driverId,truckId,eventTime,eventType,longitude,latitude,eventKey,correlationId,driverName,routeId,routeName,eventDate
                .addColumn("driverId", CsvSchema.ColumnType.NUMBER)
                .addColumn("truckId", CsvSchema.ColumnType.NUMBER)
                .addColumn("eventTime", CsvSchema.ColumnType.STRING)
                .addColumn("eventType", CsvSchema.ColumnType.STRING)
                .addColumn("longitude", CsvSchema.ColumnType.NUMBER)
                .addColumn("latitude", CsvSchema.ColumnType.NUMBER)
                .addColumn("eventKey", CsvSchema.ColumnType.STRING)
                .addColumn("correlationId", CsvSchema.ColumnType.NUMBER)
                .addColumn("driverName", CsvSchema.ColumnType.STRING)
                .addColumn("routeId", CsvSchema.ColumnType.NUMBER)
                .addColumn("routeName", CsvSchema.ColumnType.STRING)
                .addColumn("eventDate", CsvSchema.ColumnType.STRING)
//                .addColumn("miles", CsvSchema.ColumnType.NUMBER)
                .build().withHeader();

        CsvMapper csvMapper = new CsvMapper();
        return csvMapper.readerFor(TruckEvent.class).with(bootstrap).readValues(csvStream);
    }
 
开发者ID:hortonworks,项目名称:registry,代码行数:22,代码来源:TruckEventsCsvConverter.java

示例2: testRunSorterFirstColumn

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterFirstColumn() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput1, 1, 2, 4, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true);
	}

	verifyCSV(testOutput, 1, 2, 4, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:23,代码来源:CSVSorterTest.java

示例3: testRunSorterSecondColumn

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterSecondColumn() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput1, 1, 2, 4, mapper, schema);
	try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1), true);
	}

	verifyCSV(testOutput, 1, 2, 4, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:22,代码来源:CSVSorterTest.java

示例4: testRunSorterSecondColumnThenFirst

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterSecondColumnThenFirst() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);

	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput3, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1, 0), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:23,代码来源:CSVSorterTest.java

示例5: testRunSorterFirstColumnThenSecond

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterFirstColumnThenSecond() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).build();
	verifyCSV(testInput3, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:23,代码来源:CSVSorterTest.java

示例6: testRunSorterTSV

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterTSV() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build();
	verifyCSV(testInput4, 1, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 1, 2, 5, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:22,代码来源:CSVSorterTest.java

示例7: testRunSorterTSVMultipleHeaderLines

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testRunSorterTSVMultipleHeaderLines() throws Exception {
	CsvFactory csvFactory = new CsvFactory();
	csvFactory.enable(CsvParser.Feature.TRIM_SPACES);
	// csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	CsvMapper mapper = new CsvMapper(csvFactory);
	mapper.enable(CsvParser.Feature.TRIM_SPACES);
	// mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
	mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true);
	// mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY,
	// true);
	CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build();
	verifyCSV(testInput5, 10, 2, 5, mapper, schema);

	try (Reader inputReader = Files.newBufferedReader(testInput5, StandardCharsets.UTF_8)) {
		CSVSorter.runSorter(inputReader, testOutput, 10, schema, CSVSorter.getComparator(0, 1), true);
	}

	verifyCSV(testOutput, 10, 2, 5, mapper, schema);
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:22,代码来源:CSVSorterTest.java

示例8: testWriteFullCode

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
@Test
public final void testWriteFullCode()
	throws Exception
{
	List<String> headers = Arrays.asList("TestHeader1", "TestHeader2");
	List<List<String>> dataSource = Arrays.asList();
	// Or alternatively,
	// List<List<String>> dataSource = Arrays.asList(Arrays.asList("TestValue1", "TestValue2"));
	java.io.Writer writer = new StringWriter();
	CsvSchema.Builder builder = CsvSchema.builder();
	for (String nextHeader : headers) {
		builder = builder.addColumn(nextHeader);
	}
	CsvSchema schema = builder.setUseHeader(true).build();
	try (SequenceWriter csvWriter = new CsvMapper().writerWithDefaultPrettyPrinter().with(schema).forType(
			List.class).writeValues(writer);)
	{
		for (List<String> nextRow : dataSource) {
			csvWriter.write(nextRow);
		}
		// Check to see whether dataSource is empty 
		// and if so write a single empty list to trigger header output
		if (dataSource.isEmpty()) {
			csvWriter.write(Arrays.asList());
		}
	}
	System.out.println(writer.toString());
}
 
开发者ID:ansell,项目名称:csvsum,代码行数:29,代码来源:CSVUtilTest.java

示例9: processForkFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
private void processForkFile(File f, Set<String> users, Set<String> projects) {
	logger.info("Processing "+f);

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubForkEvent> it = mapper.readerFor(GitHubForkEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubForkEvent cde = it.next();
			converterService.mapUserForkEvent(cde,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
	
	
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:19,代码来源:GithubConverter.java

示例10: processCreateDeleteEntity

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
private void processCreateDeleteEntity(File f, Set<String> users, Set<String> projects) {
	logger.info("Processing "+f);

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubCreateDeleteEvent> it = mapper.readerFor(GitHubCreateDeleteEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubCreateDeleteEvent cde = it.next();
			converterService.mapUserCreateDeleteEvent(cde,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
	
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:18,代码来源:GithubConverter.java

示例11: processPushEvents

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
private void processPushEvents(File f, Set<String> users, Set<String> projects) {
	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		long rows = 0;
		MappingIterator<GitHubPushEvent> it = mapper.readerFor(GitHubPushEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubPushEvent pe = it.next();
			if (pe.getShas() != null && pe.getShas().length() > 0) {
				String [] shas = pe.getShas().split(";");
				converterService.mapPushEvent(pe, users, projects, commit_shas, shas);
			}
			rows += 1;
			if (rows%10000 == 0) {
				logger.info("....read " + rows + " rows of push_events.csv");
				
			}
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    	
			
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:24,代码来源:GithubConverter.java

示例12: processCommitCommentEvents

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
private void processCommitCommentEvents(File f, Set<String> users, Set<String> projects, Map<String,Long> commit_shas) {
	logger.info("Processing "+f + ", first for Commit messages... ");

	try(InputStream in = new FileInputStream(f);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubCommitCommentEvent> it = mapper.readerFor(GitHubCommitCommentEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubCommitCommentEvent cde = it.next();
			converterService.mapCommitCommentEvent(cde,
					users, projects, commit_shas.get(cde.getProject() + "#" + cde.getSha()));
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+f, e);
	}    		
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:17,代码来源:GithubConverter.java

示例13: processVersionHistoryFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
/**
 * Parses a pypi_versions.csv file and calls converterService to process it.  
 * 
 * Example header plus one row:
 * 
 * project_owner,project_name,pypi_name,pypi_rawname,version,upload_time,python_version,filename
 * skwashd,python-acquia-cloud,acapi,acapi,0.4.1,2015-11-21 09:30:17,source,acapi-0.4.1.tar.gz
 * 
 * @param filename to process
 */
private void processVersionHistoryFile(File file) {
	logger.info("Processing " + file);
	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<RevisionEvent> it = mapper.readerFor(RevisionEvent.class).with(schema).readValues(in);
		boolean first = true;
		while (it.hasNextValue()) {
			RevisionEvent revision = it.next();
			//logger.info("Version: " + revision.getProjectFullName() + ", " + revision.getPypiName() + "/" + revision.getVersion() + " " + revision.getUploadTime());
			converterService.mapVersionInfo(
					revision.getProjectFullName(),
					revision.getPypiName(),
					revision.getVersion(), revision.getFilename() + "?" + revision.getPythonVersion(),
					revision.getUploadTime()
					);
		}
	} catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}  
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:32,代码来源:GithubConverter.java

示例14: processWatchEvent

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
/**
 * Parses a CSV file listing who watched what project when, 
 * binds its contents to a GitHubWatcherList object,
 * and passes it on to the DiscourseDB converter
 *
 * File format example:
 * 
 * actor,project,created_at
 * F21,danielstjules/Stringy,2015-01-01T00:01:53Z
    * radlws,tomchristie/django-rest-framework,2015-01-01T00:05:29Z
    * 
 * @param file a dataset file to process
 */
private void processWatchEvent(File file, Set<String> users, Set<String> projects){
	logger.info("Processing "+file);

	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubWatchEvent> it = mapper.readerFor(GitHubWatchEvent.class).with(schema).readValues(in);
		while (it.hasNextValue()) {
			GitHubWatchEvent gwl = it.next();
			converterService.mapUserRepoEvent(
					gwl.getActor(), gwl.getProject(), gwl.getCreatedAt(),
					DiscoursePartInteractionTypes.WATCH,
					users, projects);
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}    		
	
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:33,代码来源:GithubConverter.java

示例15: processPullShasFile

import com.fasterxml.jackson.dataformat.csv.CsvMapper; //导入依赖的package包/类
/**
 * Parses a CSV file listing which pull requests contained which
 * commits (by SHA),
 * and passes it on to the DiscourseDB converter
 *
 * File format example:
 * 
 * (fix me)
    * 
 * @param file a dataset file to process
 */
private void processPullShasFile(File file, Set<String> users, Set<String> projects, Map<String,Long> commit_shas){
	logger.info("Processing "+file);

	try(InputStream in = new FileInputStream(file);) {
		CsvMapper mapper = new CsvMapper();
		CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
		MappingIterator<GitHubPullReqCommits> it = mapper.readerFor(GitHubPullReqCommits.class).with(schema).readValues(in);
		long row=0;
		while (it.hasNextValue()) {
			GitHubPullReqCommits prc = it.next();
			converterService.mapPullRequestCommits(prc, users, projects, commit_shas);
			row += 1;
			if (row%10000 == 0) {
				logger.info("pullShasFile row " + row + " out of about 46,000,000");
			}					
		}
	}catch(Exception e){
		logger.error("Could not parse data file "+file, e);
	}    		
	
}
 
开发者ID:DiscourseDB,项目名称:discoursedb-core,代码行数:33,代码来源:GithubConverter.java


注:本文中的com.fasterxml.jackson.dataformat.csv.CsvMapper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。