本文整理汇总了Java中java.io.StreamTokenizer.ordinaryChar方法的典型用法代码示例。如果您正苦于以下问题:Java StreamTokenizer.ordinaryChar方法的具体用法?Java StreamTokenizer.ordinaryChar怎么用?Java StreamTokenizer.ordinaryChar使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类java.io.StreamTokenizer
的用法示例。
在下文中一共展示了StreamTokenizer.ordinaryChar方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* createTokenizer - build up StreamTokenizer for the command script
* @param script command script to parsed
* @return StreamTokenizer for command script
*/
private static StreamTokenizer createTokenizer(final String script) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(script));
tokenizer.resetSyntax();
// Default all characters to word.
tokenizer.wordChars(0, 255);
// Spaces and special characters are white spaces.
tokenizer.whitespaceChars(0, ' ');
// Ignore # comments.
tokenizer.commentChar('#');
// Handle double and single quote strings.
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
// Need to recognize the end of a command.
tokenizer.eolIsSignificant(true);
// Command separator.
tokenizer.ordinaryChar(';');
// Pipe separator.
tokenizer.ordinaryChar('|');
return tokenizer;
}
示例2: skipBackSlash
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Skips the back slash in the next token if it's followed by a new line.
*/
private static void skipBackSlash(StreamTokenizer tokenizer) throws IOException
{
tokenizer.ordinaryChar('\\');
if (tokenizer.nextToken() == '\\')
{
if (tokenizer.nextToken() != StreamTokenizer.TT_EOL)
{
throw new IncorrectFormatException("Expected new line after \\ character");
}
}
else
{
tokenizer.pushBack();
}
tokenizer.wordChars('\\', '\\');
}
示例3: setSyntax
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* This method sets the syntax of the StreamTokenizer. i.e. set the
* whitespace, comment and delimit chars.
*
*/
protected void setSyntax(StreamTokenizer tk) {
tk.resetSyntax();
tk.eolIsSignificant(false);
tk.slashStarComments(true);
tk.slashSlashComments(true);
tk.whitespaceChars(0, ' ');
tk.wordChars(' ' + 1, '\u00ff');
tk.ordinaryChar('[');
tk.ordinaryChar(']');
tk.ordinaryChar('{');
tk.ordinaryChar('}');
tk.ordinaryChar('-');
tk.ordinaryChar('>');
tk.ordinaryChar('/');
tk.ordinaryChar('*');
tk.quoteChar('"');
tk.whitespaceChars(';', ';');
tk.ordinaryChar('=');
}
示例4: initTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Initializes the StreamTokenizer used for reading the ARFF file.
*/
private void initTokenizer(StreamTokenizer tokenizer) {
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, ' ');
tokenizer.wordChars(' ' + 1, '\u00FF');
tokenizer.whitespaceChars(',', ',');
tokenizer.commentChar('%');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
tokenizer.ordinaryChar('{');
tokenizer.ordinaryChar('}');
tokenizer.eolIsSignificant(true);
}
示例5: createTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Creates a tokenizer for the specified string.
*/
private StreamTokenizer createTokenizer(String str) {
StreamTokenizer t = new StreamTokenizer(new StringReader(str));
t.resetSyntax();
t.wordChars((char) 33, (char) 126);
t.ordinaryChar(':');
t.ordinaryChar('/');
t.quoteChar('"');
t.quoteChar('\'');
t.whitespaceChars(' ', ' ');
t.whitespaceChars('\t', '\t');
return t;
}
示例6: parseQuoteTokens
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Parse specified string into tokens. Content surrounded with " character
* is considered as a single token. For example: echo "hello world" will be parsed
* into two tokens, respectively [echo], and [hello world]. The quote character
* itself can be quoted and escaped in order to return as ordinary character. For
* example: echo "hello \" world" will be parsed into two tokens: [echo] and
* [hello " world].
* @param string
* @return
*/
public static String[] parseQuoteTokens(String string) {
List<String> commandTokens = new ArrayList<String>();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new StringReader(string)));
st.resetSyntax();
st.wordChars(0, 255);
st.ordinaryChar(' ');
st.ordinaryChar('\n');
st.ordinaryChar('\t');
st.ordinaryChar('\r');
st.quoteChar('"');
try {
String token = null;
while (st.nextToken() != StreamTokenizer.TT_EOF) {
if (st.ttype == '"' || st.ttype == StreamTokenizer.TT_WORD) {
if (token == null)
token = st.sval;
else
token += st.sval;
} else if (token != null) {
commandTokens.add(token);
token = null;
}
}
if (token != null)
commandTokens.add(token);
} catch (IOException e) {
throw new RuntimeException(e);
}
return commandTokens.toArray(new String[commandTokens.size()]);
}
示例7: Lexer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
Lexer(String s) {
tok = new StreamTokenizer(new CharArrayReader(s.toCharArray()));
tok.quoteChar('"');
tok.parseNumbers();
tok.ordinaryChar(',');
tok.ordinaryChar('(');
tok.ordinaryChar(')');
tok.wordChars('$','$');
tok.wordChars('_','_');
}
示例8: getTokenizer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
private static StreamTokenizer getTokenizer(Reader reader) throws IOException {
StreamTokenizer scan = new StreamTokenizer(reader);
scan.ordinaryChar('.'); // '.' looks like a number to StreamTokenizer by default
scan.commentChar('%'); // Prolog-style % comments; slashSlashComments and slashStarComments can stay as well.
scan.quoteChar('"');
scan.quoteChar('\'');
// WTF? You can't disable parsing of numbers unless you reset the syntax (http://stackoverflow.com/q/8856750/115589)
//scan.parseNumbers();
return scan;
}
示例9: Lexer
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/**
* Creates a new lexical analyzer for the specified SQL statement.
*
* @param s
* the SQL statement
*/
public Lexer(String s) {
initKeywords();
tok = new StreamTokenizer(new StringReader(s));
tok.wordChars('_', '_');
tok.ordinaryChar('.');
/*
* Tokens in TT_WORD type like ids and keywords are converted into lower
* case.
*/
tok.lowerCaseMode(true);
nextToken();
}
示例10: SimpleCalcStreamTok
import java.io.StreamTokenizer; //导入方法依赖的package包/类
/** Construct from an existing Reader */
public SimpleCalcStreamTok(Reader rdr) throws IOException {
tf = new StreamTokenizer(rdr);
// Control the input character set:
tf.slashSlashComments(true); // treat "//" as comments
tf.ordinaryChar('-'); // used for subtraction
tf.ordinaryChar('/'); // used for division
s = new Stack();
}