本文整理汇总了Java中org.apache.commons.lang3.text.StrTokenizer类的典型用法代码示例。如果您正苦于以下问题:Java StrTokenizer类的具体用法?Java StrTokenizer怎么用?Java StrTokenizer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StrTokenizer类属于org.apache.commons.lang3.text包,在下文中一共展示了StrTokenizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getWriter
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
private Writer getWriter(OptionManager options, SchemaConfig.SchemaInfoProvider infoProvider) throws IOException{
final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val;
final List<String> storeTable = new StrTokenizer(storeTablePath, '.', ParserConfig.QUOTING.string.charAt(0))
.setIgnoreEmptyTokens(true).getTokenList();
// store query results as the system user
final SchemaPlus systemUserSchema = context.getRootSchema(
SchemaConfig
.newBuilder(SystemUser.SYSTEM_USERNAME)
.setProvider(infoProvider)
.build());
final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema,
Util.skipLast(storeTable), true, MutationType.TABLE);
// Query results are stored in arrow format. If need arises, we can change
// this to a configuration option.
final Map<String, Object> storageOptions = ImmutableMap.<String, Object> of("type",
ArrowFormatPlugin.ARROW_DEFAULT_NAME);
final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions);
return createTableEntry.getWriter(null);
}
示例2: tokenize
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
/** Split string x in tokens. Effectively just a friendly wrapper around StrTokenizer.
* Use *single* quotes for avoiding splitting.
*/
public static ArrayList<String> tokenize(String x, String delimiterString){
if(x == null){
return null;
}
// This is a hack to allow empty tokens to be passed at the command line.
// An empty
x= x.replace("''", "' '");
// See also http://stackoverflow.com/questions/38161437/inconsistent-behaviour-of-strtokenizer-to-split-string
StrTokenizer str= new StrTokenizer(x);
str.setTrimmerMatcher(StrMatcher.spaceMatcher());
str.setDelimiterString(delimiterString);
str.setQuoteChar('\'');
// str.setIgnoreEmptyTokens(false);
ArrayList<String> tokens= (ArrayList<String>) str.getTokenList();
for(int i= 0; i < tokens.size(); i++){
String tok= tokens.get(i).trim();
tokens.set(i, tok);
}
return tokens;
}
示例3: fromString
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
public static final FieldPath fromString(String string) {
if (StringUtils.isBlank(string)) {
return null;
} else {
List<FieldPathComponent> components = Lists.newLinkedList();
StrTokenizer tokenizer = new StrTokenizer(string, DELIMITER_MATCHER);
for (String token : tokenizer.getTokenList()) {
if (ITEM_TOKEN.equals(token)) {
components.add(FieldPathComponent.ITEM);
} else {
components.add(new FieldPathPropertyComponent(token));
}
}
return new FieldPath(components);
}
}
示例4: test
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
private long test(StrTokenizer tokenizer, File source) throws IOException {
FileInputStream fis = new FileInputStream(source);
InputStreamReader reader = new InputStreamReader(fis, "utf8");
BufferedReader br = new BufferedReader(reader);
// keep track of time while iterating
long start = System.currentTimeMillis();
String row = br.readLine();
while (row != null) {
tokenizer.reset(row);
String[] columns = tokenizer.getTokenArray();
row = br.readLine();
}
long dur = System.currentTimeMillis() - start;
br.close();
return dur;
}
示例5: testCsvUnquoted
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
@Test
public void testCsvUnquoted() throws IOException {
StrTokenizer tokenizer = new StrTokenizer();
tokenizer.setDelimiterString(",");
tokenizer.setEmptyTokenAsNull(true);
tokenizer.setIgnoreEmptyTokens(false);
tokenizer.reset("121,432423, 9099053,Frieda karla L.,DC.,Ahrens");
String[] columns = tokenizer.getTokenArray();
assertEquals("121", columns[0]);
assertEquals("432423", columns[1]);
assertEquals(" 9099053", columns[2]);
assertEquals("Frieda karla L.", columns[3]);
assertEquals("DC.", columns[4]);
assertEquals("Ahrens", columns[5]);
tokenizer.reset(",,,,zzz ");
columns = tokenizer.getTokenArray();
assertNull(columns[0]);
assertNull(columns[1]);
assertNull(columns[2]);
assertNull(columns[3]);
assertEquals("zzz ", columns[4]);
}
示例6: parseEnvironmentVariables
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
private Map<String, String> parseEnvironmentVariables(final BuildListener listener) throws AbortException {
Map<String, String> mapOfEnvironmentVariables = new HashMap<String, String>();
for (String environmentVariable :
new StrTokenizer(environmentVariables, spaceMatcher(), quoteMatcher()).getTokenList()) {
if (environmentVariable.contains("=")) {
String[] parts = environmentVariable.split("=", 2);
mapOfEnvironmentVariables.put(parts[0], parts[1]);
} else {
abort(listener, "Invalid environment variable: " + environmentVariable);
}
}
return mapOfEnvironmentVariables;
}
示例7: executeCommand
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
@Override
protected int executeCommand(String line) {
String[] tokens = new StrTokenizer(line).getTokenArray();
String action = tokens[0];
String[] actionArgs = Arrays.copyOfRange(tokens, 1, tokens.length);
if (logger.isDebugEnabled()) {
logger.debug("Executing command action: {}, Tokens: {}", action, tokens.length);
}
Command<StratosCommandContext> command = commands.get(action);
if (command == null) {
System.out.println(action + ": command not found.");
return CliConstants.COMMAND_FAILED;
}
try {
return command.execute(context, actionArgs, new Option[0]);
} catch (CommandException e) {
if (logger.isErrorEnabled()) {
logger.error("Error executing command: " + action, e);
}
return CliConstants.ERROR_CODE;
}
}
示例8: apacheCommonsTokenizer
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
public static void apacheCommonsTokenizer(String text){
StrTokenizer tokenizer = new StrTokenizer(text,",");
while (tokenizer.hasNext()) {
out.println(tokenizer.next());
}
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:8,代码来源:App.java
示例9: storeQueryResultsIfNeeded
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
/**
* When enabled, add a writer rel on top of the given rel to catch the output and write to configured store table.
* @param inputRel
* @return
*/
public static Rel storeQueryResultsIfNeeded(final SqlParser.Config config, final QueryContext context,
final Rel inputRel) {
final OptionManager options = context.getOptions();
final boolean storeResults = options.getOption(STORE_QUERY_RESULTS.getOptionName()) != null ?
options.getOption(STORE_QUERY_RESULTS.getOptionName()).bool_val : false;
if (!storeResults) {
return inputRel;
}
// store query results as the system user
final SchemaPlus systemUserSchema = context.getRootSchema(
SchemaConfig.newBuilder(SystemUser.SYSTEM_USERNAME)
.setProvider(context.getSchemaInfoProvider())
.build());
final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val;
final List<String> storeTable =
new StrTokenizer(storeTablePath, '.', config.quoting().string.charAt(0))
.setIgnoreEmptyTokens(true)
.getTokenList();
final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema,
Util.skipLast(storeTable), true, MutationType.TABLE);
// Query results are stored in arrow format. If need arises, we can change this to a configuration option.
final Map<String, Object> storageOptions = ImmutableMap.<String, Object>of("type", ArrowFormatPlugin.ARROW_DEFAULT_NAME);
final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions);
final RelTraitSet traits = inputRel.getCluster().traitSet().plus(Rel.LOGICAL);
return new WriterRel(inputRel.getCluster(), traits, inputRel, createTableEntry, inputRel.getRowType());
}
示例10: toPathComponents
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
/**
* Convert fs path to list of strings.
* /a/b/c -> [a,b,c]
* @param fsPath a string
* @return list of path components
*/
public static List<String> toPathComponents(String fsPath) {
if (fsPath == null ) {
return EMPTY_SCHEMA_PATHS;
}
final StrTokenizer tokenizer = new StrTokenizer(fsPath, SLASH_CHAR, SqlUtils.QUOTE).setIgnoreEmptyTokens(true);
return tokenizer.getTokenList();
}
示例11: _tokenizeString
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
static String[] _tokenizeString(String string) {
final StrTokenizer _tokenizer = new StrTokenizer().
setDelimiterMatcher(StrMatcher.trimMatcher()).
setQuoteMatcher(StrMatcher.quoteMatcher()).
setTrimmerMatcher(StrMatcher.trimMatcher()).
setIgnoredMatcher(StrMatcher.quoteMatcher());
_tokenizer.reset(string.toLowerCase());
return _tokenizer.getTokenArray();
}
示例12: target
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
@CliCommand(value = "finder list", help = "List all finders for a given target (must be an entity)")
public SortedSet<String> listFinders(
@CliOption(key = "class", mandatory = false, unspecifiedDefaultValue = "*", optionContext = UPDATE_PROJECT, help = "The controller or entity for which the finders are generated") final JavaType typeName,
@CliOption(key = { "", "depth" }, mandatory = false, unspecifiedDefaultValue = "1", specifiedDefaultValue = "1", help = "The depth of attribute combinations to be generated for the finders") final Integer depth,
@CliOption(key = "filter", mandatory = false, help = "A comma separated list of strings that must be present in a filter to be included") final String filter) {
Validate.isTrue(depth >= 1, "Depth must be at least 1");
Validate.isTrue(depth <= 3, "Depth must not be greater than 3");
final SortedSet<String> finders = finderOperations.listFindersFor(
typeName, depth);
if (StringUtils.isBlank(filter)) {
return finders;
}
final Set<String> requiredEntries = new HashSet<String>();
final String[] filterTokens = new StrTokenizer(filter, ",")
.getTokenArray();
for (final String requiredString : filterTokens) {
requiredEntries.add(requiredString.toLowerCase());
}
if (requiredEntries.isEmpty()) {
return finders;
}
final SortedSet<String> result = new TreeSet<String>();
for (final String finder : finders) {
required: for (final String requiredEntry : requiredEntries) {
if (finder.toLowerCase().contains(requiredEntry)) {
result.add(finder);
break required;
}
}
}
return result;
}
示例13: extractPathWithinPattern
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
/**
* Given a pattern and a full path, determine the pattern-mapped part.
* <p>
* For example:
* <ul>
* <li>'<code>/docs/cvs/commit.html</code>' and '
* <code>/docs/cvs/commit.html</code> -> ''</li>
* <li>'<code>/docs/*</code>' and '<code>/docs/cvs/commit</code> -> '
* <code>cvs/commit</code>'</li>
* <li>'<code>/docs/cvs/*.html</code>' and '
* <code>/docs/cvs/commit.html</code> -> '<code>commit.html</code>'</li>
* <li>'<code>/docs/**</code>' and '<code>/docs/cvs/commit</code> -> '
* <code>cvs/commit</code>'</li>
* <li>'<code>/docs/**\/*.html</code>' and '
* <code>/docs/cvs/commit.html</code> -> '<code>cvs/commit.html</code>'</li>
* <li>'<code>/*.html</code>' and '<code>/docs/cvs/commit.html</code> -> '
* <code>docs/cvs/commit.html</code>'</li>
* <li>'<code>*.html</code>' and '<code>/docs/cvs/commit.html</code> -> '
* <code>/docs/cvs/commit.html</code>'</li>
* <li>'<code>*</code>' and '<code>/docs/cvs/commit.html</code> -> '
* <code>/docs/cvs/commit.html</code>'</li>
* </ul>
* <p>
* Assumes that {@link #match} returns <code>true</code> for '
* <code>pattern</code>' and '<code>path</code>', but does
* <strong>not</strong> enforce this.
*/
public String extractPathWithinPattern(final String pattern,
final String path) {
final String[] patternParts = new StrTokenizer(pattern, pathSeparator)
.setIgnoreEmptyTokens(true).getTokenArray();
final String[] pathParts = new StrTokenizer(path, pathSeparator)
.setIgnoreEmptyTokens(true).getTokenArray();
final StringBuilder builder = new StringBuilder();
// Add any path parts that have a wildcarded pattern part.
int puts = 0;
for (int i = 0; i < patternParts.length; i++) {
final String patternPart = patternParts[i];
if ((patternPart.indexOf('*') > -1 || patternPart.indexOf('?') > -1)
&& pathParts.length >= i + 1) {
if (puts > 0 || i == 0 && !pattern.startsWith(pathSeparator)) {
builder.append(pathSeparator);
}
builder.append(pathParts[i]);
puts++;
}
}
// Append any trailing path parts.
for (int i = patternParts.length; i < pathParts.length; i++) {
if (puts > 0 || i > 0) {
builder.append(pathSeparator);
}
builder.append(pathParts[i]);
}
return builder.toString();
}
示例14: splitTablePatterns
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
private List<String> splitTablePatterns() {
final String value = rawTables();
if (value == null) {
return Collections.emptyList();
} else {
return new StrTokenizer(value, ',', '"').getTokenList();
}
}
示例15: parseKeyValue
import org.apache.commons.lang3.text.StrTokenizer; //导入依赖的package包/类
public void parseKeyValue(String line) {
int keySeparatorIndex = line.indexOf(keySeparator);
String key;
String valueString;
if (keySeparatorIndex < 0) {
if (keySeparatorOptional) {
key = line.trim();
valueString = "";
} else {
return;
}
} else {
key = line.substring(0, keySeparatorIndex).trim();
valueString = line.substring(
keySeparatorIndex + keySeparator.length()
).trim();
}
String[] values;
if (separator == null) {
values = new String[]{valueString};
} else {
StrTokenizer tokenizer = createStrTokenizer(valueString);
values = tokenizer.getTokenArray();
}
String[] result = new String[values.length + 1];
result[0] = key;
System.arraycopy(values, 0, result, 1, values.length);
storeLine(result);
}