本文整理汇总了Java中org.apache.commons.csv.CSVFormat.RFC4180属性的典型用法代码示例。如果您正苦于以下问题:Java CSVFormat.RFC4180属性的具体用法?Java CSVFormat.RFC4180怎么用?Java CSVFormat.RFC4180使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.commons.csv.CSVFormat
的用法示例。
在下文中一共展示了CSVFormat.RFC4180属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: write
@Override
public void write(String outputFilePath) throws Exception{
try(Writer out = new BufferedWriter(new FileWriter(outputFilePath));
CSVPrinter csvPrinter = new CSVPrinter(out, CSVFormat.RFC4180)) {
if(this.getHeaders() != null){
csvPrinter.printRecord(this.getHeaders());
}
Iterator<CSVRecord> recordIter = this.getCSVParser().iterator();
while(recordIter.hasNext()){
CSVRecord record = recordIter.next();
csvPrinter.printRecord(record);
}
csvPrinter.flush();
}catch(Exception e){
throw e;
}
}
示例2: exportData
public void exportData(DBHelper db, OutputStreamWriter output) throws IOException, InterruptedException
{
CSVPrinter printer = new CSVPrinter(output, CSVFormat.RFC4180);
// Print the header
printer.printRecord(DBHelper.LoyaltyCardDbIds.ID,
DBHelper.LoyaltyCardDbIds.STORE,
DBHelper.LoyaltyCardDbIds.NOTE,
DBHelper.LoyaltyCardDbIds.CARD_ID,
DBHelper.LoyaltyCardDbIds.BARCODE_TYPE);
Cursor cursor = db.getLoyaltyCardCursor();
while(cursor.moveToNext())
{
LoyaltyCard card = LoyaltyCard.toLoyaltyCard(cursor);
printer.printRecord(card.id,
card.store,
card.note,
card.cardId,
card.barcodeType);
if(Thread.currentThread().isInterrupted())
{
throw new InterruptedException();
}
}
cursor.close();
printer.close();
}
示例3: readFile
/**
* Reads the csv file of the TagDownloader
*/
public static void readFile(File file) {
try {
logger.info("Starting to read file of TagDownloader ...");
BufferedReader reader =
new BufferedReader(
new InputStreamReader(
new BZip2CompressorInputStream(
new BufferedInputStream(
new FileInputStream(file))), "UTF-8"));
CSVParser parser = new CSVParser(reader, CSVFormat.RFC4180);
dataStore.connect();
for (CSVRecord csvRecord: parser) {
parseRecord(csvRecord);
if (csvRecord.getRecordNumber() % 1000000 == 0) {
logger.info("Current Record: " + csvRecord.getRecordNumber());
}
}
dataStore.disconnect();
parser.close();
logger.info("Tag Distribution:\n"
+ FrequencyUtils.formatFrequency(tagDistribution));
logger.info("Finished");
} catch (Exception e) {
logger.error("", e);
}
}
示例4: readFile
/**
* Reads the csv file of the TagDownloader
*/
public static void readFile(File file) {
try {
logger.info("Starting to read file of GeolocationDatabase ...");
BufferedReader reader =
new BufferedReader(
new InputStreamReader(
new BZip2CompressorInputStream(
new BufferedInputStream(
new FileInputStream(file))), "UTF-8"));
CSVParser parser = new CSVParser(reader, CSVFormat.RFC4180);
for (CSVRecord csvRecord: parser) {
parseRecord(csvRecord);
if (csvRecord.getRecordNumber() % 1000000 == 0) {
logger.info("Current Record: " + csvRecord.getRecordNumber());
}
}
parser.close();
logger.info("Finished");
} catch (Exception e) {
logger.error("", e);
}
}
示例5: appendMetadata
public synchronized void appendMetadata(List<List<String>> metadata) {
try {
BufferedWriter writer = new BufferedWriter(new FileWriter(metadataFile, true));
CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.RFC4180);
csvPrinter.printRecords(metadata);
csvPrinter.close();
} catch (IOException e) {
e.printStackTrace();
}
}
示例6: getCsvFormat
/**
* Returns a CSVFormat object given the CSV format as a string.
*
* @param format
* @return
*/
private CSVFormat getCsvFormat(String format) {
CSVFormat csvFormat = null;
switch (format.trim().toLowerCase()) {
case "default":
csvFormat = CSVFormat.DEFAULT;
break;
case "excel":
csvFormat = CSVFormat.EXCEL;
break;
case "informixunload":
case "informix-unload":
case "informix_unload":
csvFormat = CSVFormat.INFORMIX_UNLOAD;
break;
case "informixunloadcsv":
case "informix-unload-csv":
case "informix_unload_csv":
csvFormat = CSVFormat.INFORMIX_UNLOAD_CSV;
break;
case "mysql":
csvFormat = CSVFormat.MYSQL;
break;
case "postgres":
case "postgresql-csv":
case "postgresql_csv":
csvFormat = CSVFormat.POSTGRESQL_CSV;
break;
case "postgresql-text":
case "postgresql_text":
csvFormat = CSVFormat.POSTGRESQL_TEXT;
break;
case "rfc4180":
csvFormat = CSVFormat.RFC4180;
case "tdf":
csvFormat = CSVFormat.TDF;
default:
throw new RuntimeException(String.format("CSV format \"%s\" is not among the supported formats"));
}
return csvFormat;
}
示例7: readEntityDictionary
void readEntityDictionary(String filename, String charset, String type) {
String absoluteFilename = IOUtils.getAbsoluteFile(getBaseDir(),filename);
CSVFormat format = CSVFormat.RFC4180;
format = format.withHeader();
try {
InputStream in = IOUtils.getInputStream(absoluteFilename);
java.io.Reader reader = new InputStreamReader(in,charset);
CSVParser parser = format.parse(reader);
for(CSVRecord record :parser.getRecords()) {
String entityName = record.get("name");
String entityUrl = record.get("url");
StringTokenizer tokenizer = new StringTokenizer(entityName);
int tokenCount = 0;
StringBuilder tokenPhrase = new StringBuilder();
while(tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if(dictionaryMap.size() < tokenCount+1) {
dictionaryMap.add(new HashMap<String, Entity>());
}
if(tokenCount > 0) {
tokenPhrase.append(" ");
}
tokenPhrase.append(token);
Map<String,Entity> entityMap = dictionaryMap.get(tokenCount);
Entity newEntity = new Entity(entityName, !tokenizer.hasMoreTokens());
newEntity.setUrl(entityUrl);
newEntity.setType(type);
String key = normalize(tokenPhrase.toString());
Entity entity = entityMap.get(key);
if(entity == null) {
entityMap.put(key, newEntity);
} else if(newEntity.getName().length() < entity.getName().length()) {
entityMap.put(key, newEntity);
}
tokenCount++;
}
}
in.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例8: loadCSV
public static DataTable loadCSV(String fileName, String formatType, VariableType[] colTypesOverride, String[] colNamesOverride, boolean hasHeaderRow) {
try {
// use apache commons io + csv to load but convert to list of String[]
// byte-order markers are handled if present at start of file.
FileInputStream fis = new FileInputStream(fileName);
final Reader reader = new InputStreamReader(new BOMInputStream(fis), "UTF-8");
CSVFormat format;
if ( formatType==null ) {
format = hasHeaderRow ? CSVFormat.RFC4180.withHeader() : CSVFormat.RFC4180;
}
else {
switch ( formatType.toLowerCase() ) {
case "tsv":
format = hasHeaderRow ? CSVFormat.TDF.withHeader() : CSVFormat.TDF;
break;
case "mysql":
format = hasHeaderRow ? CSVFormat.MYSQL.withHeader() : CSVFormat.MYSQL;
break;
case "excel":
format = hasHeaderRow ? CSVFormat.EXCEL.withHeader() : CSVFormat.EXCEL;
break;
case "rfc4180":
default:
format = hasHeaderRow ? CSVFormat.RFC4180.withHeader() : CSVFormat.RFC4180;
break;
}
}
final CSVParser parser = new CSVParser(reader, format);
List<String[]> rows = new ArrayList<>();
int numHeaderNames = parser.getHeaderMap().size();
try {
for (final CSVRecord record : parser) {
String[] row = new String[record.size()];
for (int j = 0; j<record.size(); j++) {
row[j] = record.get(j);
}
rows.add(row);
}
}
finally {
parser.close();
reader.close();
}
VariableType[] actualTypes = computeColTypes(rows, numHeaderNames);
Set<String> colNameSet = parser.getHeaderMap().keySet();
String[] colNames = colNameSet.toArray(new String[colNameSet.size()]);
if ( colNamesOverride!=null ) {
colNames = colNamesOverride;
}
if ( colTypesOverride!=null ) {
actualTypes = colTypesOverride;
}
return fromStrings(rows, actualTypes, colNames, false);
}
catch (Exception e) {
throw new IllegalArgumentException("Can't open and/or read "+fileName, e);
}
}