本文整理汇总了Java中java.io.StreamTokenizer.resetSyntax方法的典型用法代码示例。如果您正苦于以下问题:Java StreamTokenizer.resetSyntax方法的具体用法?Java StreamTokenizer.resetSyntax怎么用?Java StreamTokenizer.resetSyntax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类java.io.StreamTokenizer
的用法示例。
在下文中一共展示了StreamTokenizer.resetSyntax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadCmdFile
import java.io.StreamTokenizer; //导入方法依赖的package包/类
private static void loadCmdFile(String name, ListBuffer<String> args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != StreamTokenizer.TT_EOF) {
args.append(st.sval);
}
r.close();
}
示例2: loadCmdFile
import java.io.StreamTokenizer; //导入方法依赖的package包/类
private static void loadCmdFile(String name, List<String> args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != StreamTokenizer.TT_EOF) {
args.add(st.sval);
}
r.close();
}
示例3: loadCmdFile
import java.io.StreamTokenizer; //导入方法依赖的package包/类
private static void loadCmdFile(String name, List args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != st.TT_EOF) {
args.add(st.sval);
}
r.close();
}
示例4: parseArgsLine
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Parses a option line likes
* -genkaypair -dname "CN=Me"
* and add the results into a list
* @param list the list to fill into
* @param s the line
*/
private static void parseArgsLine(List<String> list, String s)
throws IOException, PropertyExpander.ExpandException {
StreamTokenizer st = new StreamTokenizer(new StringReader(s));
st.resetSyntax();
st.whitespaceChars(0x00, 0x20);
st.wordChars(0x21, 0xFF);
// Everything is a word char except for quotation and apostrophe
st.quoteChar('"');
st.quoteChar('\'');
while (true) {
if (st.nextToken() == StreamTokenizer.TT_EOF) {
break;
}
list.add(PropertyExpander.expand(st.sval));
}
}
示例5: createTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* createTokenizer - build up StreamTokenizer for the command script
* @param script command script to parsed
* @return StreamTokenizer for command script
*/
private static StreamTokenizer createTokenizer(final String script) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(script));
tokenizer.resetSyntax();
// Default all characters to word.
tokenizer.wordChars(0, 255);
// Spaces and special characters are white spaces.
tokenizer.whitespaceChars(0, ' ');
// Ignore # comments.
tokenizer.commentChar('#');
// Handle double and single quote strings.
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
// Need to recognize the end of a command.
tokenizer.eolIsSignificant(true);
// Command separator.
tokenizer.ordinaryChar(';');
// Pipe separator.
tokenizer.ordinaryChar('|');
return tokenizer;
}
示例6: setup
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Sets up the stream tokenizer
*/
private void setup() {
st = new StreamTokenizer(this);
st.resetSyntax();
st.eolIsSignificant(false);
st.lowerCaseMode(true);
// Parse numbers as words
st.wordChars('0', '9');
st.wordChars('-', '.');
// Characters as words
st.wordChars('\u0000', '\u00FF');
// Skip comments
st.commentChar('%');
// Skip whitespace and newlines
st.whitespaceChars(' ', ' ');
st.whitespaceChars('\u0009', '\u000e');
}
示例7: initTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Initializes the stream tokenizer.
*
* @param tokenizer the tokenizer to initialize
*/
private void initTokenizer(StreamTokenizer tokenizer) {
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, (' ' - 1));
tokenizer.wordChars(' ', '\u00FF');
tokenizer.whitespaceChars(m_FieldSeparator.charAt(0),
m_FieldSeparator.charAt(0));
// tokenizer.commentChar('%');
String[] parts = m_Enclosures.split(",");
for (String e : parts) {
if (e.length() > 1 || e.length() == 0) {
throw new IllegalArgumentException(
"Enclosures can only be single characters");
}
tokenizer.quoteChar(e.charAt(0));
}
tokenizer.eolIsSignificant(true);
}
示例8: setSyntax
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* This method sets the syntax of the StreamTokenizer. i.e. set the
* whitespace, comment and delimit chars.
*
*/
protected void setSyntax(StreamTokenizer tk) {
tk.resetSyntax();
tk.eolIsSignificant(false);
tk.slashStarComments(true);
tk.slashSlashComments(true);
tk.whitespaceChars(0, ' ');
tk.wordChars(' ' + 1, '\u00ff');
tk.ordinaryChar('[');
tk.ordinaryChar(']');
tk.ordinaryChar('{');
tk.ordinaryChar('}');
tk.ordinaryChar('-');
tk.ordinaryChar('>');
tk.ordinaryChar('/');
tk.ordinaryChar('*');
tk.quoteChar('"');
tk.whitespaceChars(';', ';');
tk.ordinaryChar('=');
}
示例9: createTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Returns a new tokenizer for an OBJ or MTL stream.
*/
private static StreamTokenizer createTokenizer(Reader reader)
{
StreamTokenizer tokenizer = new StreamTokenizer(reader);
tokenizer.resetSyntax();
tokenizer.eolIsSignificant(true);
// All printable ASCII characters
tokenizer.wordChars('!', '~');
// Let's tolerate other ISO-8859-1 characters
tokenizer.wordChars(0x80, 0xFF);
tokenizer.whitespaceChars(' ', ' ');
tokenizer.whitespaceChars('\n', '\n');
tokenizer.whitespaceChars('\r', '\r');
tokenizer.whitespaceChars('\t', '\t');
return tokenizer;
}
示例10: initTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Initializes the StreamTokenizer used for reading the ARFF file.
*/
private void initTokenizer(StreamTokenizer tokenizer) {
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, ' ');
tokenizer.wordChars(' ' + 1, '\u00FF');
tokenizer.whitespaceChars(',', ',');
tokenizer.commentChar('%');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
tokenizer.ordinaryChar('{');
tokenizer.ordinaryChar('}');
tokenizer.eolIsSignificant(true);
}
示例11: createTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Creates a tokenizer for the specified string.
*/
private StreamTokenizer createTokenizer(String str) {
StreamTokenizer t = new StreamTokenizer(new StringReader(str));
t.resetSyntax();
t.wordChars((char) 33, (char) 126);
t.ordinaryChar(':');
t.ordinaryChar('/');
t.quoteChar('"');
t.quoteChar('\'');
t.whitespaceChars(' ', ' ');
t.whitespaceChars('\t', '\t');
return t;
}
示例12: loadCmdFile
import java.io.StreamTokenizer; //导入方法依赖的package包/类
private static void loadCmdFile(String name, ListBuffer<String> args)
throws IOException {
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != st.TT_EOF) {
args.append(st.sval);
}
r.close();
}
示例13: parseQuoteTokens
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Parse specified string into tokens. Content surrounded with " character
* is considered as a single token. For example: echo "hello world" will be parsed
* into two tokens, respectively [echo], and [hello world]. The quote character
* itself can be quoted and escaped in order to return as ordinary character. For
* example: echo "hello \" world" will be parsed into two tokens: [echo] and
* [hello " world].
* @param string
* @return
*/
public static String[] parseQuoteTokens(String string) {
List<String> commandTokens = new ArrayList<String>();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new StringReader(string)));
st.resetSyntax();
st.wordChars(0, 255);
st.ordinaryChar(' ');
st.ordinaryChar('\n');
st.ordinaryChar('\t');
st.ordinaryChar('\r');
st.quoteChar('"');
try {
String token = null;
while (st.nextToken() != StreamTokenizer.TT_EOF) {
if (st.ttype == '"' || st.ttype == StreamTokenizer.TT_WORD) {
if (token == null)
token = st.sval;
else
token += st.sval;
} else if (token != null) {
commandTokens.add(token);
token = null;
}
}
if (token != null)
commandTokens.add(token);
} catch (IOException e) {
throw new RuntimeException(e);
}
return commandTokens.toArray(new String[commandTokens.size()]);
}
示例14: getCookieTokens
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Tokenizes a cookie header and returns the tokens in a
* <code>Vector</code>.
**/
private Vector getCookieTokens(String cookieHeader) {
StringReader sr = new StringReader(cookieHeader);
StreamTokenizer st = new StreamTokenizer(sr);
Vector tokens = new Vector();
// clear syntax tables of the StreamTokenizer
st.resetSyntax();
// set all characters as word characters
st.wordChars(0,Character.MAX_VALUE);
// set up characters for quoting
st.quoteChar( '"' ); //double quotes
st.quoteChar( '\'' ); //single quotes
// set up characters to separate tokens
st.whitespaceChars(59,59); //semicolon
st.whitespaceChars(44,44); //comma
try {
while (st.nextToken() != StreamTokenizer.TT_EOF) {
tokens.addElement( st.sval.trim() );
}
}
catch (IOException ioe) {
// this will never happen with a StringReader
}
sr.close();
return tokens;
}
示例15: tokenizeString
import java.io.StreamTokenizer; //导入方法依赖的package包/类
public static List<String> tokenizeString(final String str) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(str));
tokenizer.resetSyntax();
tokenizer.wordChars(0, 255);
tokenizer.whitespaceChars(0, ' ');
tokenizer.commentChar('#');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
final List<String> tokenList = new ArrayList<>();
final StringBuilder toAppend = new StringBuilder();
while (nextToken(tokenizer) != StreamTokenizer.TT_EOF) {
final String s = tokenizer.sval;
// The tokenizer understands about honoring quoted strings and recognizes
// them as one token that possibly contains multiple space-separated words.
// It does not recognize quoted spaces, though, and will split after the
// escaping \ character. This is handled here.
if (s.endsWith("\\")) {
// omit trailing \, append space instead
toAppend.append(s.substring(0, s.length() - 1)).append(' ');
} else {
tokenList.add(toAppend.append(s).toString());
toAppend.setLength(0);
}
}
if (toAppend.length() != 0) {
tokenList.add(toAppend.toString());
}
return tokenList;
}