本文整理汇总了Java中org.apache.commons.lang3.text.StrTokenizer.reset方法的典型用法代码示例。如果您正苦于以下问题:Java StrTokenizer.reset方法的具体用法?Java StrTokenizer.reset怎么用?Java StrTokenizer.reset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.lang3.text.StrTokenizer
的用法示例。
在下文中一共展示了StrTokenizer.reset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: test
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private long test(StrTokenizer tokenizer, File source) throws IOException {
FileInputStream fis = new FileInputStream(source);
InputStreamReader reader = new InputStreamReader(fis, "utf8");
BufferedReader br = new BufferedReader(reader);
// keep track of time while iterating
long start = System.currentTimeMillis();
String row = br.readLine();
while (row != null) {
tokenizer.reset(row);
String[] columns = tokenizer.getTokenArray();
row = br.readLine();
}
long dur = System.currentTimeMillis() - start;
br.close();
return dur;
}
示例2: testCsvUnquoted
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testCsvUnquoted() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterString(",");
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121,432423, 9099053,Frieda karla L.,DC.,Ahrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("Frieda karla L.", columns[3]);
assertEquals("DC.", columns[4]);
assertEquals("Ahrens", columns[5]);
tokenizer.reset(",,,,zzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例3: _tokenizeString
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
static String[] _tokenizeString(String string) {
final StrTokenizer _tokenizer = new StrTokenizer().
setDelimiterMatcher(StrMatcher.trimMatcher()).
setQuoteMatcher(StrMatcher.quoteMatcher()).
setTrimmerMatcher(StrMatcher.trimMatcher()).
setIgnoredMatcher(StrMatcher.quoteMatcher());
_tokenizer.reset(string.toLowerCase());
return _tokenizer.getTokenArray();
}
示例4: testCsvQuoted
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testCsvQuoted() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterString(",");
tokenizer.setQuoteChar('"');
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121,432423, 9099053,\"Frieda karla L.,DC.\",Ahrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("Frieda karla L.,DC.", columns[3]);
assertEquals("Ahrens", columns[4]);
tokenizer.reset(" ,4321");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertEquals("4321", columns[1]);
tokenizer.reset(" ,,,,zzz ");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
tokenizer.reset(",,,,zzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例5: testPipes
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testPipes() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterChar('|');
tokenizer.setQuoteChar('"');
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121|432423| 9099053|\"Frieda karla L.|DC.\"|Ahrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("Frieda karla L.|DC.", columns[3]);
assertEquals("Ahrens", columns[4]);
tokenizer.reset(" |4321");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertEquals("4321", columns[1]);
tokenizer.reset(" ||||zzz ");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
tokenizer.reset("||||zzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例6: testTabQuoted
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testTabQuoted() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterString("\t");
tokenizer.setQuoteChar('"');
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("Frieda karla L.,DC.", columns[3]);
assertEquals("Ahrens", columns[4]);
tokenizer.reset(" \t4321");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertEquals("4321", columns[1]);
tokenizer.reset(" \t\t\t\tzzz ");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
tokenizer.reset("\t\t\t\tzzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例7: testTabUnquoted
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testTabUnquoted() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterString("\t");
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("\"Frieda karla L.,DC.\"", columns[3]);
assertEquals("Ahrens", columns[4]);
tokenizer.reset(" \t4321");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertEquals("4321", columns[1]);
tokenizer.reset(" \t\t\t\tzzz ");
columns = tokenizer.getTokenArray();
assertEquals(" ", columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
tokenizer.reset("\t\t\t\tzzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例8: load
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
* Loads the CSV file from the file system.
*/
public void load() throws IOException {
lines = Lists.newLinkedList();
headers = null;
StrTokenizer st = StrTokenizer.getCSVInstance();
st.setDelimiterChar(';');
// Default encoding is used (--> UTF-8).
BufferedReader br = null;
try {
br = new BufferedReader(new FileReader(fileName));
for (String line = null; (line = br.readLine()) != null;) {
String trimmedLine = StringUtils.trimToNull(line);
if (trimmedLine == null || trimmedLine.startsWith("#")) {
continue;
}
st.reset(line);
ArrayList<String> tokens = Lists.newArrayList(st.getTokenArray());
if (headers == null) {
headers = tokens;
} else {
lines.add(tokens);
}
}
} finally {
IOUtils.closeQuietly(br);
}
}
示例9: initColumns
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private List<Column> initColumns(final StrTokenizer st, final String headerLine) {
st.reset(headerLine);
String[] headers = st.getTokenArray();
List<Column> columns = newArrayListWithCapacity(headers.length);
for (String header : headers) {
columns.add(new Column(header));
}
return columns;
}
示例10: loadData
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private List<? extends List<String>> loadData(final File file) throws IOException {
try (BufferedReader br = newReader(file, Charsets.UTF_8)) {
List<List<String>> rows = newArrayList();
StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
tokenizer.setDelimiterChar(DELIMITER);
for (String line; (line = br.readLine()) != null; ) {
tokenizer.reset(line);
List<String> tokenList = tokenizer.getTokenList();
rows.add(tokenList);
}
return rows;
}
}
示例11: readAggregatedMap
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public static Map<String, String> readAggregatedMap(final File executionsFile, final Charset charset) throws IOException {
final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
tokenizer.setDelimiterChar(';');
Map<String, String> result = newHashMapWithExpectedSize(11);
List<String> lines = Files.readLines(executionsFile, charset);
String[] headers = null;
for (String line : lines) {
tokenizer.reset(line);
String[] tokens = tokenizer.getTokenArray();
if (headers == null) {
headers = tokens;
} else {
String[] data = tokenizer.getTokenArray();
String operation = data[0];
for (int i = 1; i < headers.length; ++i) {
result.put(operation + "." + headers[i], data[i]);
}
}
}
return result;
}
示例12: readLoadProfileEvents
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException {
ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create();
String loadProfile = testplan.elementTextTrim("loadProfile");
// relative to testplan
File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile);
try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) {
StrTokenizer st = StrTokenizer.getCSVInstance();
st.setDelimiterChar(';');
for (String line = null; (line = br.readLine()) != null;) {
// ignore line that are blank, commented out, or represent markers
if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) {
continue;
}
st.reset(line);
String[] tokens = st.getTokenArray();
long startTime = Long.parseLong(tokens[0]);
String operation = tokens[1];
String target = tokens[2];
int daemonId = Integer.parseInt(tokens[3]);
int processId = Integer.parseInt(tokens[4]);
eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target,
daemonId, processId));
}
}
return eventsByProcess;
}
示例13: processFile
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
* Processes the specified CSV file. For every line but the header line (which is required), the
* specified command is executed.
*
* @param reader
* the reader for loading the CSV data
* @param delimiter
* the column separator
* @param quoteChar
* the quote character ('\0' for no quoting)
* @param command
* the command (i. e. a Groovy closure if used in a Groovy script) to be executed for
* every processed line
*/
public void processFile(final Reader reader, final String delimiter, final char quoteChar, final Runnable command) {
try {
List<String> inputLines = CharStreams.readLines(reader);
StrTokenizer st = StrTokenizer.getCSVInstance();
st.setDelimiterString(delimiter);
if (quoteChar != '\0') {
st.setQuoteChar(quoteChar);
} else {
st.setQuoteMatcher(StrMatcher.noneMatcher());
}
// extract header
String headerLine = inputLines.remove(0);
List<Column> columns = initColumns(st, headerLine);
for (String line : inputLines) {
st.reset(line);
String[] colArray = st.getTokenArray();
int len = colArray.length;
checkState(len == columns.size(), "Mismatch between number of header columns and number of line columns.");
DataSource dataSource = dataSourceProvider.get();
Configuration config = configProvider.get();
for (int i = 0; i < len; ++i) {
String value = StringUtils.trimToEmpty(colArray[i]);
String dataSetKey = columns.get(i).dataSetKey;
String key = columns.get(i).key;
if (dataSetKey != null) {
if ("<auto>".equals(value)) {
dataSource.resetFixedValue(dataSetKey, key);
} else {
log.debug("Setting data set entry for " + this + " to value=" + value);
dataSource.setFixedValue(dataSetKey, key, value);
}
} else {
log.debug("Setting property for " + this + " to value=" + value);
config.put(key, value);
}
}
command.run();
}
} catch (IOException ex) {
throw new JFunkException("Error processing CSV data", ex);
}
}
示例14: mergeFiles
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public void mergeFiles() throws IOException {
if (!inputDir.isDirectory()) {
throw new IllegalArgumentException("The input File must be a directory");
}
StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
tokenizer.setDelimiterChar(DELIMITER);
Map<String, FileChannel> destChannels = newHashMap();
List<OutputStream> outputStreams = newArrayList();
File[] filesInInputDirectory = inputDir.listFiles();
try {
for (File file : filesInInputDirectory) {
FileInputStream fis = null;
try {
fis = new FileInputStream(file);
for (Scanner scanner = new Scanner(fis, Charsets.UTF_8.name()); scanner.hasNext();) {
String line = scanner.nextLine();
tokenizer.reset(line);
List<String> tokenList = tokenizer.getTokenList();
String key = tokenList.get(sortCriteriaColumn);
FileChannel destChannel = destChannels.get(key);
if (destChannel == null) {
FileOutputStream fos = new FileOutputStream(new File(outputDir, FILE_TYPE + "_" + key + ".out"));
outputStreams.add(fos);
destChannel = fos.getChannel();
destChannels.put(key, destChannel);
//Write the Header...... Has to be improved
IoUtilities.writeLineToChannel(destChannel, getHeader(), Charsets.UTF_8);
}
StrBuilder outputLine = new StrBuilder();
for (String s : tokenList) {
StrBuilderUtils.appendEscapedAndQuoted(outputLine, DELIMITER, s);
}
IoUtilities.writeLineToChannel(destChannel, outputLine.toString(), Charsets.UTF_8);
}
} finally {
closeQuietly(fis);
}
}
} finally {
outputStreams.forEach(IOUtils::closeQuietly);
}
}
示例15: readDataFile
import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
* Reads a semicolon-delimited CSV file into a list. Each line in the result list will be
* another list of {@link Number} objects. The file is expected to have two numberic columns
* which are parsed using the specified number format.
*
* @param file
* the file
* @param charset
* the character set to read the file
* @param numberFormat
* the number format for parsing the column values
* @return the immutable result list
*/
public static List<SeriesPoint> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat)
throws IOException {
final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
tokenizer.setDelimiterChar(';');
try (BufferedReader br = newReader(file, charset)) {
boolean headerLine = true;
List<SeriesPoint> result = newArrayListWithExpectedSize(200);
for (String line; (line = br.readLine()) != null;) {
try {
if (headerLine) {
headerLine = false;
} else {
tokenizer.reset(line);
String[] tokens = tokenizer.getTokenArray();
double x = numberFormat.parse(tokens[0]).doubleValue();
double y = numberFormat.parse(tokens[1]).doubleValue();
if (!result.isEmpty()) {
// additional point for histogram
SeriesPoint previousPoint = getLast(result);
result.add(new SeriesPoint(x, previousPoint.getY()));
}
tokenizer.reset(line);
result.add(new SeriesPoint(x, y));
}
} catch (ParseException ex) {
throw new IOException("Error parsing number in file: " + file, ex);
}
}
int size = result.size();
if (size > 2) {
// additional point at end for histogram
SeriesPoint nextToLast = result.get(size - 3);
SeriesPoint last = result.get(size - 1);
double dX = last.getX().doubleValue() - nextToLast.getX().doubleValue();
result.add(new SeriesPoint(last.getX().doubleValue() + dX, last.getY()));
}
return ImmutableList.copyOf(result);
}
}