本文整理汇总了Java中org.supercsv.io.ICsvBeanWriter类的典型用法代码示例。如果您正苦于以下问题:Java ICsvBeanWriter类的具体用法?Java ICsvBeanWriter怎么用?Java ICsvBeanWriter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ICsvBeanWriter类属于org.supercsv.io包,在下文中一共展示了ICsvBeanWriter类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getCsv
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
@RequestMapping(value = "/exportCsvFile/{stats}", method = RequestMethod.GET)
public void getCsv(@PathVariable("stats") String stats, HttpServletRequest request, HttpServletResponse response, Locale locale) throws UnsupportedEncodingException, IOException {
response.setContentType("text/csv");
String reportName = "editable.csv";
response.setHeader("Set-Cookie", "fileDownload=true; path=/");
response.setHeader("Content-disposition", "attachment;filename=" + reportName);
final String[] header = exportService.getHeader(stats);
Writer writer = new OutputStreamWriter(response.getOutputStream(), "UTF8");
ICsvBeanWriter beanWriter = new CsvBeanWriter(writer, CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE);
beanWriter.writeHeader(header);
try{
List<ExportBean> objs = exportService.getBean(stats, locale);
for(ExportBean item : objs) {
beanWriter.write(item, header);
}
beanWriter.flush();
writer.close();
}catch(Exception e){
log.error("interruption de l'export !",e);
} finally {
if( beanWriter != null ) {
beanWriter.close();
}
}
}
示例2: downloadCSV
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
@RequestMapping("download-csv/{id}")
public void downloadCSV(HttpServletResponse response, @PathVariable("id") Long id) throws IOException {
final Material material = materialRepository.findOne(id);
if (material == null)
throw new IllegalArgumentException("[" + id + "] data is not exist.");
String videoName = material.getVideoName();
int lastIndexOf = videoName.lastIndexOf("/");
videoName = lastIndexOf >= 0 ? videoName.substring(lastIndexOf + 1, videoName.length()) : videoName;
final String csvFileName = videoName + "_" + (new SimpleDateFormat("yyyy-MM-dd'T'HH:mm").format(material.getCreatedAt())) + ".csv";
response.setContentType("text/csv");
String headerKey = "Content-Disposition";
String headerValue = String.format("attachment; filename=\"%s\"", csvFileName);
response.setHeader(headerKey, headerValue);
final ICsvBeanWriter csvWriter = new CsvBeanWriter(response.getWriter(), CsvPreference.STANDARD_PREFERENCE);
final String[] header = {"timestamp", "key"};
csvWriter.writeHeader(header);
for (MaterialData data : material.getMaterialDataList())
csvWriter.write(data, header);
csvWriter.close();
}
示例3: csvWrite
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
@Transactional(readOnly=true)
public void csvWrite(Writer writer, List<PosteCandidature> posteCandidatures) throws IOException {
log.info("Generate CSV for " + posteCandidatures.size() + " posteCandidatures");
final String[] header = new String[] { "poste", "nom", "email", "prenom", "galaxie", "recevable", "auditionnable", "vue", "creation", "modification", "gestionnaire", "dateGestion"};
final CellProcessor[] processors = getProcessors();
ICsvBeanWriter beanWriter = new CsvBeanWriter(writer, CsvPreference.STANDARD_PREFERENCE);
beanWriter.writeHeader(header);
for (PosteCandidature posteCandidature : posteCandidatures) {
CsvPosteCandidatureMetadataFileBean csvMetadataFileBean = new CsvPosteCandidatureMetadataFileBean(posteCandidature);
beanWriter.write(csvMetadataFileBean, header, processors);
}
beanWriter.close();
log.info("Generate CSV OK");
}
示例4: createCitationFile
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
/**
* Creates the dataset citation file using the the Solr query response.
*
* @param datasetUsages record count per dataset
* @param citationFileName output file name
* @param datasetOccUsageService usage service
* @param downloadKey download key
*/
public static void createCitationFile(Map<UUID, Long> datasetUsages, String citationFileName,
DatasetOccurrenceDownloadUsageService datasetOccUsageService,
DatasetService datasetService, String downloadKey) {
if (datasetUsages != null && !datasetUsages.isEmpty()) {
try (ICsvBeanWriter beanWriter = new CsvBeanWriter(new FileWriterWithEncoding(citationFileName, Charsets.UTF_8),
CsvPreference.TAB_PREFERENCE)) {
for (Entry<UUID, Long> entry : datasetUsages.entrySet()) {
if (entry.getKey() != null) {
beanWriter.write(new Facet.Count(entry.getKey().toString(), entry.getValue()), HEADER, PROCESSORS);
persistDatasetUsage(entry, downloadKey, datasetOccUsageService, datasetService);
}
}
beanWriter.flush();
} catch (IOException e) {
LOG.error("Error creating citations file", e);
throw Throwables.propagate(e);
}
}
}
示例5: downloadCSV
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
private void downloadCSV(final HttpServletResponse response, final List<EventLoggerDTO> events,
final int processModelId, final String subject) throws IOException {
final String date = DateTime.now().toString("ddMMyyyy-HHmm");
final String csvFileName = "Eventlog_" + processModelId + "_" + subject + "_" + date + ".csv";
response.setContentType("text/csv");
final String headerKey = "Content-Disposition";
final String headerValue = String.format("attachment; filename=\"%s\"", csvFileName);
response.setHeader(headerKey, headerValue);
// uses the Super CSV API to generate CSV data from the model data
final ICsvBeanWriter csvWriter =
new CsvBeanWriter(response.getWriter(), CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE);
final String[] header =
{"EventId", "CaseId", "Timestamp", "Activity", "Resource", "State", "MessageType", "Recipient", "Sender"};
csvWriter.writeHeader(header);
events.forEach(event -> {
try {
if (!event.getResource().isEmpty()) {
csvWriter.write(event, header);
}
} catch (IOException e) {
e.printStackTrace();
}
});
csvWriter.close();
}
示例6: exportStream
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
@Override
public void exportStream(OutputStream outputStream, Iterator<T> iterator) throws IOException, ClassNotFoundException, IllegalAccessException {
if (iterator == null)
throw new NullPointerException("List can not be null or empty.");
Writer writer = new OutputStreamWriter(outputStream, "UTF-8");
ICsvBeanWriter beanWriter = new CsvBeanWriter(writer, preference);
while (iterator.hasNext()) {
T entry = iterator.next();
beanWriter.write(entry, fieldNames, processors);
}
beanWriter.flush();
}
示例7: getCSVBeanWriter
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
public ICsvBeanWriter getCSVBeanWriter(String fileToWrite) {
try {
return new CsvBeanWriter(new FileWriter(fileToWrite, true),
new CsvPreference.Builder(CsvPreference.EXCEL_PREFERENCE)
.useEncoder(new DefaultCsvEncoder())
.build() );
} catch (IOException e) {
// TODO Auto-generated catch block
logger.error("Error in creating CSV Bean writer!");
logger.error("Exception",e);
}
return null;
}
示例8: writeCollectorTweetIDSCSV
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
public ICsvBeanWriter writeCollectorTweetIDSCSV(ICsvBeanWriter beanWriter, List<Tweet> tweetsList, String collectionDIR, String fileName) {
try {
// the header elements are used to map the bean values to each column (names must match)
//final String[] header = new String[]{"tweetID", "message","userID", "userName", "userURL", "createdAt", "tweetURL"};
//final CellProcessor[] processors = getProcessors();
// koushik: shouldn't we be writing only the tweetIDs?
final String[] header = new String[]{"tweetID"};
final CellProcessor[] processors = getProcessors4TweetIDSCCSV();
String persisterDIR = PersisterConfigurator.getInstance().getProperty(PersisterConfigurationProperty.DEFAULT_PERSISTER_FILE_PATH);
//fileName = StringUtils.substringBefore(fileName, ".json"); //removing .json extension
String fileToWrite = persisterDIR + collectionDIR + "/" + fileName;
logger.info(collectionDIR + ": Writing CSV file : " + fileToWrite);
if (null == beanWriter) {
beanWriter = getCSVBeanWriter(fileToWrite);
// write the header
beanWriter.writeHeader(header);
}
for (final Tweet tweet : tweetsList) {
try {
if (tweet.getTweetID() != null) {
beanWriter.write(tweet, header, processors);
}
} catch (SuperCsvCellProcessorException e) {
logger.error(collectionDIR + ": SuperCSV error");
}
}
} catch (IOException ex) {
logger.error(collectionDIR + ": IO Exception occured");
}
//return fileName+".csv";
return beanWriter;
}
示例9: writeCollectorTweetsCSV
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
public ICsvBeanWriter writeCollectorTweetsCSV(List<Tweet> tweetsList, String collectionDIR, String fileName, ICsvBeanWriter beanWriter) {
try {
final String[] header = new String[]{"tweetID", "message","userID", "userName", "userURL", "createdAt", "tweetURL"};
final CellProcessor[] processors = getCollectorTweetsProcessors();
if(null == beanWriter){
String persisterDIR = PersisterConfigurator.getInstance().getProperty(PersisterConfigurationProperty.DEFAULT_PERSISTER_FILE_PATH);
//fileName = StringUtils.substringBefore(fileName, ".json"); //removing .json extension
String fileToWrite = persisterDIR + collectionDIR + "/" + fileName;
logger.info(collectionDIR + ": Writing CSV file : " + fileToWrite);
beanWriter = getCSVBeanWriter(fileToWrite);
beanWriter.writeHeader(header);
}
for (final Tweet tweet : tweetsList) {
try {
beanWriter.write(tweet, header, processors);
} catch (SuperCsvCellProcessorException e) {
logger.error(collectionDIR + ": SuperCSV error");
}
}
} catch (IOException ex) {
logger.error(collectionDIR + ": IO Exception occured");
}
return beanWriter;
}
示例10: writeMediaObjects
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
/**
* Writes the multimedia objects into the file referenced by multimediaCsvWriter.
*/
private static void writeMediaObjects(ICsvBeanWriter multimediaCsvWriter,
org.apache.hadoop.hbase.client.Result result,
Integer occurrenceKey) throws IOException {
List<MediaObject> multimedia = OccurrenceBuilder.buildMedia(result);
if (multimedia != null) {
for (MediaObject mediaObject : multimedia) {
multimediaCsvWriter.write(new InnerMediaObject(mediaObject, occurrenceKey),
MULTIMEDIA_COLUMNS,
MEDIA_CELL_PROCESSORS);
}
}
}
示例11: doWork
import org.supercsv.io.ICsvBeanWriter; //导入依赖的package包/类
/**
* Executes the job.query and creates a data file that will contains the records from job.from to job.to positions.
*/
public void doWork(final DownloadFileWork work) throws IOException {
final DatasetUsagesCollector datasetUsagesCollector = new DatasetUsagesCollector();
try (
ICsvMapWriter intCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName()
+ TableSuffixes.INTERPRETED_SUFFIX,
Charsets.UTF_8),
CsvPreference.TAB_PREFERENCE);
ICsvMapWriter verbCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName()
+ TableSuffixes.VERBATIM_SUFFIX,
Charsets.UTF_8),
CsvPreference.TAB_PREFERENCE);
ICsvBeanWriter multimediaCsvWriter = new CsvBeanWriter(new FileWriterWithEncoding(work.getJobDataFileName()
+ TableSuffixes.MULTIMEDIA_SUFFIX,
Charsets.UTF_8),
CsvPreference.TAB_PREFERENCE)) {
SolrQueryProcessor.processQuery(work, new Predicate<Integer>() {
@Override
public boolean apply(@Nullable Integer occurrenceKey) {
try {
// Writes the occurrence record obtained from HBase as Map<String,Object>.
org.apache.hadoop.hbase.client.Result result = work.getOccurrenceMapReader().get(occurrenceKey);
Map<String, String> occurrenceRecordMap = OccurrenceMapReader.buildInterpretedOccurrenceMap(result);
Map<String, String> verbOccurrenceRecordMap = OccurrenceMapReader.buildVerbatimOccurrenceMap(result);
if (occurrenceRecordMap != null) {
datasetUsagesCollector.incrementDatasetUsage(occurrenceRecordMap.get(GbifTerm.datasetKey.simpleName()));
intCsvWriter.write(occurrenceRecordMap, INT_COLUMNS);
verbCsvWriter.write(verbOccurrenceRecordMap, VERB_COLUMNS);
writeMediaObjects(multimediaCsvWriter, result, occurrenceKey);
return true;
} else {
LOG.error(String.format("Occurrence id %s not found!", occurrenceKey));
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
return false;
}
});
} finally {
// Unlock the assigned lock.
work.getLock().unlock();
LOG.info("Lock released, job detail: {} ", work.toString());
}
getSender().tell(new Result(work, datasetUsagesCollector.getDatasetUsages()), getSelf());
}