当前位置: 首页>>代码示例>>Java>>正文


Java StrTokenizer.getTokenArray方法代码示例

本文整理汇总了Java中org.apache.commons.lang3.text.StrTokenizer.getTokenArray方法的典型用法代码示例。如果您正苦于以下问题:Java StrTokenizer.getTokenArray方法的具体用法?Java StrTokenizer.getTokenArray怎么用?Java StrTokenizer.getTokenArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.commons.lang3.text.StrTokenizer的用法示例。


在下文中一共展示了StrTokenizer.getTokenArray方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: test

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private long test(StrTokenizer tokenizer, File source) throws IOException {
  FileInputStream fis = new FileInputStream(source);
  InputStreamReader reader = new InputStreamReader(fis, "utf8");
  BufferedReader br = new BufferedReader(reader);

  // keep track of time while iterating
  long start = System.currentTimeMillis();
  String row = br.readLine();
  while (row != null) {
    tokenizer.reset(row);
    String[] columns = tokenizer.getTokenArray();
    row = br.readLine();
  }
  long dur = System.currentTimeMillis() - start;
  br.close();
  return dur;
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:18,代码来源:StrTokenizerPerformance.java

示例2: testCsvUnquoted

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testCsvUnquoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString(",");
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121,432423, 9099053,Frieda karla L.,DC.,Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.", columns[3]);
  assertEquals("DC.", columns[4]);
  assertEquals("Ahrens", columns[5]);

  tokenizer.reset(",,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:25,代码来源:StrTokenizerTest.java

示例3: _tokenizeString

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
static String[] _tokenizeString(String string) {
	final StrTokenizer _tokenizer = new StrTokenizer().
			setDelimiterMatcher(StrMatcher.trimMatcher()).
			setQuoteMatcher(StrMatcher.quoteMatcher()).
			setTrimmerMatcher(StrMatcher.trimMatcher()).
			setIgnoredMatcher(StrMatcher.quoteMatcher());
	_tokenizer.reset(string.toLowerCase());
	return _tokenizer.getTokenArray();
}
 
开发者ID:wardle,项目名称:rsterminology,代码行数:10,代码来源:ParsedMedicationBuilder.java

示例4: parseKeyValue

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public void parseKeyValue(String line) {
    int keySeparatorIndex = line.indexOf(keySeparator);
    String key;
    String valueString;
    if (keySeparatorIndex < 0) {
        if (keySeparatorOptional) {
            key = line.trim();
            valueString = "";
        } else {
            return;
        }
    } else {
        key = line.substring(0, keySeparatorIndex).trim();
        valueString = line.substring(
                keySeparatorIndex + keySeparator.length()
        ).trim();
    }

    String[] values;
    if (separator == null) {
        values = new String[]{valueString};
    } else {
        StrTokenizer tokenizer = createStrTokenizer(valueString);
        values = tokenizer.getTokenArray();
    }

    String[] result = new String[values.length + 1];
    result[0] = key;
    System.arraycopy(values, 0, result, 1, values.length);

    storeLine(result);
}
 
开发者ID:softwareloop,项目名称:tstconfig,代码行数:33,代码来源:Config.java

示例5: testCsvQuoted

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testCsvQuoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString(",");
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121,432423, 9099053,\"Frieda karla L.,DC.\",Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.,DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   ,4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" ,,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset(",,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:38,代码来源:StrTokenizerTest.java

示例6: testPipes

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testPipes() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterChar('|');
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121|432423| 9099053|\"Frieda karla L.|DC.\"|Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.|DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   |4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" ||||zzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("||||zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:38,代码来源:StrTokenizerTest.java

示例7: testTabQuoted

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testTabQuoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString("\t");
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.,DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   \t4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" \t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("\t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:38,代码来源:StrTokenizerTest.java

示例8: testTabUnquoted

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
@Test
public void testTabUnquoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString("\t");
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("\"Frieda karla L.,DC.\"", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   \t4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" \t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("\t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
 
开发者ID:gbif,项目名称:dwca-io,代码行数:37,代码来源:StrTokenizerTest.java

示例9: initColumns

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private List<Column> initColumns(final StrTokenizer st, final String headerLine) {
	st.reset(headerLine);

	String[] headers = st.getTokenArray();
	List<Column> columns = newArrayListWithCapacity(headers.length);
	for (String header : headers) {
		columns.add(new Column(header));
	}
	return columns;
}
 
开发者ID:mgm-tp,项目名称:jfunk,代码行数:11,代码来源:CsvDataProcessor.java

示例10: readAggregatedMap

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public static Map<String, String> readAggregatedMap(final File executionsFile, final Charset charset) throws IOException {
	final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(';');

	Map<String, String> result = newHashMapWithExpectedSize(11);

	List<String> lines = Files.readLines(executionsFile, charset);
	String[] headers = null;

	for (String line : lines) {
		tokenizer.reset(line);
		String[] tokens = tokenizer.getTokenArray();

		if (headers == null) {
			headers = tokens;
		} else {

			String[] data = tokenizer.getTokenArray();

			String operation = data[0];
			for (int i = 1; i < headers.length; ++i) {
				result.put(operation + "." + headers[i], data[i]);
			}
		}
	}

	return result;
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:29,代码来源:PerfAlyzerUtils.java

示例11: readLoadProfileEvents

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException {
	ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create();
	String loadProfile = testplan.elementTextTrim("loadProfile");

	// relative to testplan
	File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile);

	try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) {
		StrTokenizer st = StrTokenizer.getCSVInstance();
		st.setDelimiterChar(';');

		for (String line = null; (line = br.readLine()) != null;) {
			// ignore line that are blank, commented out, or represent markers
			if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) {
				continue;
			}

			st.reset(line);
			String[] tokens = st.getTokenArray();

			long startTime = Long.parseLong(tokens[0]);
			String operation = tokens[1];
			String target = tokens[2];
			int daemonId = Integer.parseInt(tokens[3]);
			int processId = Integer.parseInt(tokens[4]);

			eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target,
					daemonId, processId));
		}
	}

	return eventsByProcess;
}
 
开发者ID:mgm-tp,项目名称:perfload-core,代码行数:34,代码来源:XmlConfigReader.java

示例12: parseTokenized

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public void parseTokenized(String line) {
    StrTokenizer tokenizer = createStrTokenizer(line);
    String[] tokens = tokenizer.getTokenArray();
    storeLine(tokens);
}
 
开发者ID:softwareloop,项目名称:tstconfig,代码行数:6,代码来源:Config.java

示例13: processFile

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
 * Processes the specified CSV file. For every line but the header line (which is required), the
 * specified command is executed.
 * 
 * @param reader
 *            the reader for loading the CSV data
 * @param delimiter
 *            the column separator
 * @param quoteChar
 *            the quote character ('\0' for no quoting)
 * @param command
 *            the command (i. e. a Groovy closure if used in a Groovy script) to be executed for
 *            every processed line
 */
public void processFile(final Reader reader, final String delimiter, final char quoteChar, final Runnable command) {
	try {
		List<String> inputLines = CharStreams.readLines(reader);

		StrTokenizer st = StrTokenizer.getCSVInstance();
		st.setDelimiterString(delimiter);
		if (quoteChar != '\0') {
			st.setQuoteChar(quoteChar);
		} else {
			st.setQuoteMatcher(StrMatcher.noneMatcher());
		}

		// extract header
		String headerLine = inputLines.remove(0);
		List<Column> columns = initColumns(st, headerLine);
		for (String line : inputLines) {
			st.reset(line);
			String[] colArray = st.getTokenArray();
			int len = colArray.length;
			checkState(len == columns.size(), "Mismatch between number of header columns and number of line columns.");

			DataSource dataSource = dataSourceProvider.get();
			Configuration config = configProvider.get();
			for (int i = 0; i < len; ++i) {
				String value = StringUtils.trimToEmpty(colArray[i]);

				String dataSetKey = columns.get(i).dataSetKey;
				String key = columns.get(i).key;
				if (dataSetKey != null) {
					if ("<auto>".equals(value)) {
						dataSource.resetFixedValue(dataSetKey, key);
					} else {
						log.debug("Setting data set entry for " + this + " to value=" + value);
						dataSource.setFixedValue(dataSetKey, key, value);
					}
				} else {
					log.debug("Setting property for " + this + " to value=" + value);
					config.put(key, value);
				}
			}

			command.run();
		}
	} catch (IOException ex) {
		throw new JFunkException("Error processing CSV data", ex);
	}
}
 
开发者ID:mgm-tp,项目名称:jfunk,代码行数:62,代码来源:CsvDataProcessor.java

示例14: readDataFile

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
 * Reads a semicolon-delimited CSV file into a list. Each line in the result list will be
 * another list of {@link Number} objects. The file is expected to have two numberic columns
 * which are parsed using the specified number format.
 * 
 * @param file
 *            the file
 * @param charset
 *            the character set to read the file
 * @param numberFormat
 *            the number format for parsing the column values
 * @return the immutable result list
 */
public static List<SeriesPoint> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat)
		throws IOException {
	final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(';');

	try (BufferedReader br = newReader(file, charset)) {
		boolean headerLine = true;
		List<SeriesPoint> result = newArrayListWithExpectedSize(200);

		for (String line; (line = br.readLine()) != null;) {
			try {
				if (headerLine) {
					headerLine = false;
				} else {
					tokenizer.reset(line);
					String[] tokens = tokenizer.getTokenArray();
					double x = numberFormat.parse(tokens[0]).doubleValue();
					double y = numberFormat.parse(tokens[1]).doubleValue();

					if (!result.isEmpty()) {
						// additional point for histogram
						SeriesPoint previousPoint = getLast(result);
						result.add(new SeriesPoint(x, previousPoint.getY()));
					}
					tokenizer.reset(line);
					result.add(new SeriesPoint(x, y));
				}
			} catch (ParseException ex) {
				throw new IOException("Error parsing number in file: " + file, ex);
			}
		}

		int size = result.size();
		if (size > 2) {
			// additional point at end for histogram
			SeriesPoint nextToLast = result.get(size - 3);
			SeriesPoint last = result.get(size - 1);
			double dX = last.getX().doubleValue() - nextToLast.getX().doubleValue();
			result.add(new SeriesPoint(last.getX().doubleValue() + dX, last.getY()));
		}
		return ImmutableList.copyOf(result);
	}
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:57,代码来源:PerfAlyzerUtils.java


注:本文中的org.apache.commons.lang3.text.StrTokenizer.getTokenArray方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。