当前位置: 首页>>代码示例>>Java>>正文


Java StrTokenizer.getCSVInstance方法代码示例

本文整理汇总了Java中org.apache.commons.lang3.text.StrTokenizer.getCSVInstance方法的典型用法代码示例。如果您正苦于以下问题:Java StrTokenizer.getCSVInstance方法的具体用法?Java StrTokenizer.getCSVInstance怎么用?Java StrTokenizer.getCSVInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.commons.lang3.text.StrTokenizer的用法示例。


在下文中一共展示了StrTokenizer.getCSVInstance方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: load

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
 * Loads the CSV file from the file system.
 */
public void load() throws IOException {
	lines = Lists.newLinkedList();
	headers = null;

	StrTokenizer st = StrTokenizer.getCSVInstance();
	st.setDelimiterChar(';');

	// Default encoding is used (--> UTF-8).
	BufferedReader br = null;
	try {
		br = new BufferedReader(new FileReader(fileName));
		for (String line = null; (line = br.readLine()) != null;) {
			String trimmedLine = StringUtils.trimToNull(line);
			if (trimmedLine == null || trimmedLine.startsWith("#")) {
				continue;
			}
			st.reset(line);
			ArrayList<String> tokens = Lists.newArrayList(st.getTokenArray());
			if (headers == null) {
				headers = tokens;
			} else {
				lines.add(tokens);
			}
		}
	} finally {
		IOUtils.closeQuietly(br);
	}
}
 
开发者ID:mgm-tp,项目名称:jfunk,代码行数:32,代码来源:CsvDataSource.java

示例2: loadData

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private List<? extends List<String>> loadData(final File file) throws IOException {
	try (BufferedReader br = newReader(file, Charsets.UTF_8)) {
		List<List<String>> rows = newArrayList();
		StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
		tokenizer.setDelimiterChar(DELIMITER);

		for (String line; (line = br.readLine()) != null; ) {
			tokenizer.reset(line);
			List<String> tokenList = tokenizer.getTokenList();
			rows.add(tokenList);
		}

		return rows;
	}
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:16,代码来源:EmailReporter.java

示例3: readAggregatedMap

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public static Map<String, String> readAggregatedMap(final File executionsFile, final Charset charset) throws IOException {
	final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(';');

	Map<String, String> result = newHashMapWithExpectedSize(11);

	List<String> lines = Files.readLines(executionsFile, charset);
	String[] headers = null;

	for (String line : lines) {
		tokenizer.reset(line);
		String[] tokens = tokenizer.getTokenArray();

		if (headers == null) {
			headers = tokens;
		} else {

			String[] data = tokenizer.getTokenArray();

			String operation = data[0];
			for (int i = 1; i < headers.length; ++i) {
				result.put(operation + "." + headers[i], data[i]);
			}
		}
	}

	return result;
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:29,代码来源:PerfAlyzerUtils.java

示例4: readLoadProfileEvents

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException {
	ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create();
	String loadProfile = testplan.elementTextTrim("loadProfile");

	// relative to testplan
	File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile);

	try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) {
		StrTokenizer st = StrTokenizer.getCSVInstance();
		st.setDelimiterChar(';');

		for (String line = null; (line = br.readLine()) != null;) {
			// ignore line that are blank, commented out, or represent markers
			if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) {
				continue;
			}

			st.reset(line);
			String[] tokens = st.getTokenArray();

			long startTime = Long.parseLong(tokens[0]);
			String operation = tokens[1];
			String target = tokens[2];
			int daemonId = Integer.parseInt(tokens[3]);
			int processId = Integer.parseInt(tokens[4]);

			eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target,
					daemonId, processId));
		}
	}

	return eventsByProcess;
}
 
开发者ID:mgm-tp,项目名称:perfload-core,代码行数:34,代码来源:XmlConfigReader.java

示例5: processFile

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
 * Processes the specified CSV file. For every line but the header line (which is required), the
 * specified command is executed.
 * 
 * @param reader
 *            the reader for loading the CSV data
 * @param delimiter
 *            the column separator
 * @param quoteChar
 *            the quote character ('\0' for no quoting)
 * @param command
 *            the command (i. e. a Groovy closure if used in a Groovy script) to be executed for
 *            every processed line
 */
public void processFile(final Reader reader, final String delimiter, final char quoteChar, final Runnable command) {
	try {
		List<String> inputLines = CharStreams.readLines(reader);

		StrTokenizer st = StrTokenizer.getCSVInstance();
		st.setDelimiterString(delimiter);
		if (quoteChar != '\0') {
			st.setQuoteChar(quoteChar);
		} else {
			st.setQuoteMatcher(StrMatcher.noneMatcher());
		}

		// extract header
		String headerLine = inputLines.remove(0);
		List<Column> columns = initColumns(st, headerLine);
		for (String line : inputLines) {
			st.reset(line);
			String[] colArray = st.getTokenArray();
			int len = colArray.length;
			checkState(len == columns.size(), "Mismatch between number of header columns and number of line columns.");

			DataSource dataSource = dataSourceProvider.get();
			Configuration config = configProvider.get();
			for (int i = 0; i < len; ++i) {
				String value = StringUtils.trimToEmpty(colArray[i]);

				String dataSetKey = columns.get(i).dataSetKey;
				String key = columns.get(i).key;
				if (dataSetKey != null) {
					if ("<auto>".equals(value)) {
						dataSource.resetFixedValue(dataSetKey, key);
					} else {
						log.debug("Setting data set entry for " + this + " to value=" + value);
						dataSource.setFixedValue(dataSetKey, key, value);
					}
				} else {
					log.debug("Setting property for " + this + " to value=" + value);
					config.put(key, value);
				}
			}

			command.run();
		}
	} catch (IOException ex) {
		throw new JFunkException("Error processing CSV data", ex);
	}
}
 
开发者ID:mgm-tp,项目名称:jfunk,代码行数:62,代码来源:CsvDataProcessor.java

示例6: mergeFiles

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public void mergeFiles() throws IOException {
	if (!inputDir.isDirectory()) {
		throw new IllegalArgumentException("The input File must be a directory");
	}

	StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(DELIMITER);
	Map<String, FileChannel> destChannels = newHashMap();
	List<OutputStream> outputStreams = newArrayList();
	File[] filesInInputDirectory = inputDir.listFiles();

	try {
		for (File file : filesInInputDirectory) {
			FileInputStream fis = null;
			try {
				fis = new FileInputStream(file);
				for (Scanner scanner = new Scanner(fis, Charsets.UTF_8.name()); scanner.hasNext();) {
					String line = scanner.nextLine();
					tokenizer.reset(line);

					List<String> tokenList = tokenizer.getTokenList();
					String key = tokenList.get(sortCriteriaColumn);
					FileChannel destChannel = destChannels.get(key);
					if (destChannel == null) {
						FileOutputStream fos = new FileOutputStream(new File(outputDir, FILE_TYPE + "_" + key + ".out"));
						outputStreams.add(fos);
						destChannel = fos.getChannel();
						destChannels.put(key, destChannel);

						//Write the Header...... Has to be improved
						IoUtilities.writeLineToChannel(destChannel, getHeader(), Charsets.UTF_8);
					}

					StrBuilder outputLine = new StrBuilder();
					for (String s : tokenList) {
						StrBuilderUtils.appendEscapedAndQuoted(outputLine, DELIMITER, s);
					}
					IoUtilities.writeLineToChannel(destChannel, outputLine.toString(), Charsets.UTF_8);
				}
			} finally {
				closeQuietly(fis);
			}
		}
	} finally {
		outputStreams.forEach(IOUtils::closeQuietly);
	}

}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:49,代码来源:BinnedFilesMerger.java

示例7: readDataFile

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
/**
 * Reads a semicolon-delimited CSV file into a list. Each line in the result list will be
 * another list of {@link Number} objects. The file is expected to have two numberic columns
 * which are parsed using the specified number format.
 * 
 * @param file
 *            the file
 * @param charset
 *            the character set to read the file
 * @param numberFormat
 *            the number format for parsing the column values
 * @return the immutable result list
 */
public static List<SeriesPoint> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat)
		throws IOException {
	final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(';');

	try (BufferedReader br = newReader(file, charset)) {
		boolean headerLine = true;
		List<SeriesPoint> result = newArrayListWithExpectedSize(200);

		for (String line; (line = br.readLine()) != null;) {
			try {
				if (headerLine) {
					headerLine = false;
				} else {
					tokenizer.reset(line);
					String[] tokens = tokenizer.getTokenArray();
					double x = numberFormat.parse(tokens[0]).doubleValue();
					double y = numberFormat.parse(tokens[1]).doubleValue();

					if (!result.isEmpty()) {
						// additional point for histogram
						SeriesPoint previousPoint = getLast(result);
						result.add(new SeriesPoint(x, previousPoint.getY()));
					}
					tokenizer.reset(line);
					result.add(new SeriesPoint(x, y));
				}
			} catch (ParseException ex) {
				throw new IOException("Error parsing number in file: " + file, ex);
			}
		}

		int size = result.size();
		if (size > 2) {
			// additional point at end for histogram
			SeriesPoint nextToLast = result.get(size - 3);
			SeriesPoint last = result.get(size - 1);
			double dX = last.getX().doubleValue() - nextToLast.getX().doubleValue();
			result.add(new SeriesPoint(last.getX().doubleValue() + dX, last.getY()));
		}
		return ImmutableList.copyOf(result);
	}
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:57,代码来源:PerfAlyzerUtils.java

示例8: mergeFiles

import org.apache.commons.lang3.text.StrTokenizer; //导入方法依赖的package包/类
public void mergeFiles(final List<PerfAlyzerFile> inputFiles) throws IOException {
	Predicate<PerfAlyzerFile> predicate1 = perfAlyzerFilePartsMatchWildcards("measuring", "*", "requestsPerInterval");
	Predicate<PerfAlyzerFile> predicate2 = perfAlyzerFilePartsMatchWildcards("measuring", "*", "aggregatedResponseTimes");

	Predicate<PerfAlyzerFile> predicateOr = predicate1.or(predicate2);

	Set<PerfAlyzerFile> paFiles = inputFiles.stream().filter(predicateOr).collect(toSet());
	ListMultimap<String, PerfAlyzerFile> byOperationMultimap = ArrayListMultimap.create();

	for (PerfAlyzerFile paf : paFiles) {
		byOperationMultimap.put(paf.getFileNameParts().get(1), paf);
	}

	StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
	tokenizer.setDelimiterChar(DELIMITER);

	for (String operation : byOperationMultimap.keySet()) {
		List<PerfAlyzerFile> list = byOperationMultimap.get(operation);

		checkState(list.size() == 2, "Two files are required by operation but found %d for '%s'", list.size(), operation);

		List<String> resultLines = newArrayListWithCapacity(2);

		PerfAlyzerFile paf1 = list.stream().filter(predicate1).findFirst().get();
		File file1 = new File(binnedDir, paf1.getFile().getPath());
		List<String> lines1 = Files.readLines(file1, Charsets.UTF_8);

		PerfAlyzerFile paf2 = list.stream().filter(predicate2).findFirst().get();
		File file2 = new File(binnedDir, paf2.getFile().getPath());
		List<String> lines2 = Files.readLines(file2, Charsets.UTF_8);

		if (lines1.size() == lines2.size()) {
			File resultFile = new File(binnedDir, paf1.copy().removeFileNamePart(2).addFileNamePart("aggregated").getFile().getPath());

			for (int i = 0; i < lines1.size(); ++i) {
				String line1 = get(lines1, i);
				String line2 = get(lines2, i);
				resultLines.add(line1 + DELIMITER + line2);
			}

			writeLines(resultFile, Charsets.UTF_8.name(), resultLines);

			deleteQuietly(file1);
			deleteQuietly(file2);
		} else {
			log.warn("Files to merge must have the same number of lines. Merging not possible: {}", list);
		}
	}
}
 
开发者ID:mgm-tp,项目名称:perfload-perfalyzer,代码行数:50,代码来源:RequestFilesMerger.java


注:本文中的org.apache.commons.lang3.text.StrTokenizer.getCSVInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。