本文整理汇总了Java中org.apache.commons.csv.CSVFormat.parse方法的典型用法代码示例。如果您正苦于以下问题:Java CSVFormat.parse方法的具体用法?Java CSVFormat.parse怎么用?Java CSVFormat.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.csv.CSVFormat
的用法示例。
在下文中一共展示了CSVFormat.parse方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: csvLine2BeamRecord
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
public static BeamRecord csvLine2BeamRecord(
CSVFormat csvFormat,
String line,
BeamRecordSqlType beamRecordSqlType) {
List<Object> fieldsValue = new ArrayList<>(beamRecordSqlType.getFieldCount());
try (StringReader reader = new StringReader(line)) {
CSVParser parser = csvFormat.parse(reader);
CSVRecord rawRecord = parser.getRecords().get(0);
if (rawRecord.size() != beamRecordSqlType.getFieldCount()) {
throw new IllegalArgumentException(String.format(
"Expect %d fields, but actually %d",
beamRecordSqlType.getFieldCount(), rawRecord.size()
));
} else {
for (int idx = 0; idx < beamRecordSqlType.getFieldCount(); idx++) {
String raw = rawRecord.get(idx);
fieldsValue.add(autoCastField(beamRecordSqlType.getFieldTypeByIndex(idx), raw));
}
}
} catch (IOException e) {
throw new IllegalArgumentException("decodeRecord failed!", e);
}
return new BeamRecord(beamRecordSqlType, fieldsValue);
}
示例2: decode
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
/**
* Decodes data from the provided stream and invoke the provided {@link Consumer} for each decoded record.
*
* @param in the {@link InputStream} for the CSV file
* @param headers a list of the headers to keep from decoded records
* @param mapToResult the function to invoke for reach decoded record
* @throws IOException in the event of an I/O error.
* @throws DecodingDataFromAdapterException if an error occurred while decoding the CSV file.
*/
public void decode(InputStream in, List<String> headers, Consumer<DataSample<T>> mapToResult) throws IOException, DecodingDataFromAdapterException {
try (Profiler ignored = Profiler.start("Building time series from csv data", logger::trace)) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, encoding))) {
CSVFormat csvFormat = CSVFormat.DEFAULT
.withAllowMissingColumnNames(false)
.withFirstRecordAsHeader()
.withSkipHeaderRecord()
.withDelimiter(delimiter);
Iterable<CSVRecord> records = csvFormat.parse(reader);
for (CSVRecord csvRecord : records) {
ZonedDateTime timeStamp = dateParser.apply(csvRecord.get(0));
DataSample<T> tRecord = new DataSample<>(timeStamp);
for (String h : headers) {
tRecord.getCells().put(h, numberParser.apply(csvRecord.get(h)));
}
mapToResult.accept(tRecord);
}
}
}
}
示例3: getDataColumnHeaders
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
/**
* Returns the columns headers of the CSV file.
*
* @param in an input stream for the CSV file.
* @return the columns headers of the CSV file.
* @throws IOException in the event of an I/O error.
* @throws DecodingDataFromAdapterException if an error occurred while decoding the CSV file.
*/
public List<String> getDataColumnHeaders(InputStream in) throws IOException, DecodingDataFromAdapterException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, encoding))) {
CSVFormat csvFormat = CSVFormat.DEFAULT
.withAllowMissingColumnNames(false)
.withDelimiter(delimiter);
Iterable<CSVRecord> records = csvFormat.parse(reader);
return this.parseColumnHeaders(records.iterator().next());
}
}
示例4: readEntityDictionary
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
void readEntityDictionary(String filename, String charset, String type) {
String absoluteFilename = IOUtils.getAbsoluteFile(getBaseDir(),filename);
CSVFormat format = CSVFormat.RFC4180;
format = format.withHeader();
try {
InputStream in = IOUtils.getInputStream(absoluteFilename);
java.io.Reader reader = new InputStreamReader(in,charset);
CSVParser parser = format.parse(reader);
for(CSVRecord record :parser.getRecords()) {
String entityName = record.get("name");
String entityUrl = record.get("url");
StringTokenizer tokenizer = new StringTokenizer(entityName);
int tokenCount = 0;
StringBuilder tokenPhrase = new StringBuilder();
while(tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if(dictionaryMap.size() < tokenCount+1) {
dictionaryMap.add(new HashMap<String, Entity>());
}
if(tokenCount > 0) {
tokenPhrase.append(" ");
}
tokenPhrase.append(token);
Map<String,Entity> entityMap = dictionaryMap.get(tokenCount);
Entity newEntity = new Entity(entityName, !tokenizer.hasMoreTokens());
newEntity.setUrl(entityUrl);
newEntity.setType(type);
String key = normalize(tokenPhrase.toString());
Entity entity = entityMap.get(key);
if(entity == null) {
entityMap.put(key, newEntity);
} else if(newEntity.getName().length() < entity.getName().length()) {
entityMap.put(key, newEntity);
}
tokenCount++;
}
}
in.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例5: init
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
@Override
public void init() {
topicValues = new HashMap<String, HashSet<String>>();
InputStream in = null;
try {
String filename = getProperty("filename", null);
String absoluteFilename = IOUtils.getAbsoluteFile(getBaseDir(), filename);
in = IOUtils.getInputStream(absoluteFilename);
java.io.Reader reader = new InputStreamReader(in, StandardCharsets.UTF_8.name());
CSVFormat format = CSVFormat.RFC4180
.withHeader()
.withDelimiter(',');
CSVParser parser = format.parse(reader);
Iterator<CSVRecord> csvIterator = parser.iterator();
while(csvIterator.hasNext()) {
CSVRecord record = csvIterator.next();
String topic = record.get(0);
String value = record.get(1);
if(!topicValues.containsKey(topic)) {
topicValues.put(topic, new HashSet<String>());
}
topicValues.get(topic).add(value);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
finally {
if(in != null){
try {
in.close();
} catch (IOException ignored) {
}
}
}
fieldTopic = getProperty("fieldTopic", null);
fieldValue = getProperty("fieldValue", null);
super.init();
}
示例6: init
import org.apache.commons.csv.CSVFormat; //导入方法依赖的package包/类
@Override
public void init() {
topicValues = new HashMap<String, HashSet<String>>();
InputStream in = null;
try {
String filename = getProperty("filename", null);
String absoluteFilename = IOUtils.getAbsoluteFile(getBaseDir(), filename);
in = IOUtils.getInputStream(absoluteFilename);
java.io.Reader reader = new InputStreamReader(in, StandardCharsets.UTF_8.name());
CSVFormat format = CSVFormat.RFC4180
.withHeader()
.withDelimiter(',');
CSVParser parser = format.parse(reader);
Iterator<CSVRecord> csvIterator = parser.iterator();
while(csvIterator.hasNext()) {
CSVRecord record = csvIterator.next();
String topic = record.get(0);
String value = record.get(1).toLowerCase();
if(!topicValues.containsKey(topic)) {
topicValues.put(topic, new HashSet<String>());
}
topicValues.get(topic).add(value);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
finally {
if(in != null){
try {
in.close();
} catch (IOException ignored) {
}
}
}
fieldTopic = getProperty("fieldTopic", null);
fieldValue = getProperty("fieldValue", null);
super.init();
}