本文整理匯總了Java中java.io.StreamTokenizer.commentChar方法的典型用法代碼示例。如果您正苦於以下問題:Java StreamTokenizer.commentChar方法的具體用法?Java StreamTokenizer.commentChar怎麽用?Java StreamTokenizer.commentChar使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類java.io.StreamTokenizer
的用法示例。
在下文中一共展示了StreamTokenizer.commentChar方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: loadCmdFile
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static void loadCmdFile(String name, ListBuffer<String> args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != StreamTokenizer.TT_EOF) {
args.append(st.sval);
}
r.close();
}
示例2: loadCmdFile
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static void loadCmdFile(String name, List<String> args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != StreamTokenizer.TT_EOF) {
args.add(st.sval);
}
r.close();
}
示例3: loadCmdFile
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static void loadCmdFile(String name, List args)
throws IOException
{
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != st.TT_EOF) {
args.add(st.sval);
}
r.close();
}
示例4: createTokenizer
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* createTokenizer - build up StreamTokenizer for the command script
* @param script command script to parsed
* @return StreamTokenizer for command script
*/
private static StreamTokenizer createTokenizer(final String script) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(script));
tokenizer.resetSyntax();
// Default all characters to word.
tokenizer.wordChars(0, 255);
// Spaces and special characters are white spaces.
tokenizer.whitespaceChars(0, ' ');
// Ignore # comments.
tokenizer.commentChar('#');
// Handle double and single quote strings.
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
// Need to recognize the end of a command.
tokenizer.eolIsSignificant(true);
// Command separator.
tokenizer.ordinaryChar(';');
// Pipe separator.
tokenizer.ordinaryChar('|');
return tokenizer;
}
示例5: setup
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Sets up the stream tokenizer
*/
private void setup() {
st = new StreamTokenizer(this);
st.resetSyntax();
st.eolIsSignificant(false);
st.lowerCaseMode(true);
// Parse numbers as words
st.wordChars('0', '9');
st.wordChars('-', '.');
// Characters as words
st.wordChars('\u0000', '\u00FF');
// Skip comments
st.commentChar('%');
// Skip whitespace and newlines
st.whitespaceChars(' ', ' ');
st.whitespaceChars('\u0009', '\u000e');
}
示例6: loadCmdFile
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static void loadCmdFile(String name, ListBuffer<String> args)
throws IOException
{
try (Reader r = Files.newBufferedReader(Paths.get(name))) {
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != StreamTokenizer.TT_EOF) {
args.append(st.sval);
}
}
}
示例7: initTokenizer
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Initializes the StreamTokenizer used for reading the ARFF file.
*/
private void initTokenizer(StreamTokenizer tokenizer) {
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, ' ');
tokenizer.wordChars(' ' + 1, '\u00FF');
tokenizer.whitespaceChars(',', ',');
tokenizer.commentChar('%');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
tokenizer.ordinaryChar('{');
tokenizer.ordinaryChar('}');
tokenizer.eolIsSignificant(true);
}
示例8: loadCmdFile
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static void loadCmdFile(String name, ListBuffer<String> args)
throws IOException {
Reader r = new BufferedReader(new FileReader(name));
StreamTokenizer st = new StreamTokenizer(r);
st.resetSyntax();
st.wordChars(' ', 255);
st.whitespaceChars(0, ' ');
st.commentChar('#');
st.quoteChar('"');
st.quoteChar('\'');
while (st.nextToken() != st.TT_EOF) {
args.append(st.sval);
}
r.close();
}
示例9: tokenizeString
import java.io.StreamTokenizer; //導入方法依賴的package包/類
public static List<String> tokenizeString(final String str) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(str));
tokenizer.resetSyntax();
tokenizer.wordChars(0, 255);
tokenizer.whitespaceChars(0, ' ');
tokenizer.commentChar('#');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
final List<String> tokenList = new ArrayList<>();
final StringBuilder toAppend = new StringBuilder();
while (nextToken(tokenizer) != StreamTokenizer.TT_EOF) {
final String s = tokenizer.sval;
// The tokenizer understands about honoring quoted strings and recognizes
// them as one token that possibly contains multiple space-separated words.
// It does not recognize quoted spaces, though, and will split after the
// escaping \ character. This is handled here.
if (s.endsWith("\\")) {
// omit trailing \, append space instead
toAppend.append(s.substring(0, s.length() - 1)).append(' ');
} else {
tokenList.add(toAppend.append(s).toString());
toAppend.setLength(0);
}
}
if (toAppend.length() != 0) {
tokenList.add(toAppend.toString());
}
return tokenList;
}
示例10: tokenizeString
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Break a string into tokens, honoring quoted arguments and escaped spaces.
*
* @param str a {@link String} to tokenize.
* @return a {@link List} of {@link String}s representing the tokens that
* constitute the string.
* @throws IOException in case {@link StreamTokenizer#nextToken()} raises it.
*/
public static List<String> tokenizeString(final String str) throws IOException {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(str));
tokenizer.resetSyntax();
tokenizer.wordChars(0, 255);
tokenizer.whitespaceChars(0, ' ');
tokenizer.commentChar('#');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
final List<String> tokenList = new ArrayList<>();
final StringBuilder toAppend = new StringBuilder();
while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
final String s = tokenizer.sval;
// The tokenizer understands about honoring quoted strings and recognizes
// them as one token that possibly contains multiple space-separated words.
// It does not recognize quoted spaces, though, and will split after the
// escaping \ character. This is handled here.
if (s.endsWith("\\")) {
// omit trailing \, append space instead
toAppend.append(s.substring(0, s.length() - 1)).append(' ');
} else {
tokenList.add(toAppend.append(s).toString());
toAppend.setLength(0);
}
}
if (toAppend.length() != 0) {
tokenList.add(toAppend.toString());
}
return tokenList;
}
示例11: getTokenizer
import java.io.StreamTokenizer; //導入方法依賴的package包/類
private static StreamTokenizer getTokenizer(Reader reader) throws IOException {
StreamTokenizer scan = new StreamTokenizer(reader);
scan.ordinaryChar('.'); // '.' looks like a number to StreamTokenizer by default
scan.commentChar('%'); // Prolog-style % comments; slashSlashComments and slashStarComments can stay as well.
scan.quoteChar('"');
scan.quoteChar('\'');
// WTF? You can't disable parsing of numbers unless you reset the syntax (http://stackoverflow.com/q/8856750/115589)
//scan.parseNumbers();
return scan;
}
示例12: tokenizeString
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Break a string into tokens, honoring quoted arguments and escaped spaces.
*
* @param str a {@link String} to tokenize.
* @return a {@link List} of {@link String}s representing the tokens that
* constitute the string.
*/
public static List<String> tokenizeString(final String str) {
final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(str));
tokenizer.resetSyntax();
tokenizer.wordChars(0, 255);
tokenizer.whitespaceChars(0, ' ');
tokenizer.commentChar('#');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
final List<String> tokenList = new ArrayList<>();
final StringBuilder toAppend = new StringBuilder();
while (nextToken(tokenizer) != StreamTokenizer.TT_EOF) {
final String s = tokenizer.sval;
// The tokenizer understands about honoring quoted strings and recognizes
// them as one token that possibly contains multiple space-separated words.
// It does not recognize quoted spaces, though, and will split after the
// escaping \ character. This is handled here.
if (s.endsWith("\\")) {
// omit trailing \, append space instead
toAppend.append(s.substring(0, s.length() - 1)).append(' ');
} else {
tokenList.add(toAppend.append(s).toString());
toAppend.setLength(0);
}
}
if (toAppend.length() != 0) {
tokenList.add(toAppend.toString());
}
return tokenList;
}
示例13: initTokenizer
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Initializes the stream tokenizer
*
* @param tokenizer the tokenizer to initialize
*/
private void initTokenizer(StreamTokenizer tokenizer) {
tokenizer.resetSyntax();
tokenizer.whitespaceChars(0, (' ' - 1));
tokenizer.wordChars(' ', '\u00FF');
tokenizer.whitespaceChars(',', ',');
tokenizer.whitespaceChars(':', ':');
// tokenizer.whitespaceChars('.','.');
tokenizer.commentChar('|');
tokenizer.whitespaceChars('\t', '\t');
tokenizer.quoteChar('"');
tokenizer.quoteChar('\'');
tokenizer.eolIsSignificant(true);
}
示例14: XYZChemModel
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
示例15: Harness
import java.io.StreamTokenizer; //導入方法依賴的package包/類
/**
* Create new benchmark harness with given configuration and reporter.
* Throws ConfigFormatException if there was an error parsing the config
* file.
* <p>
* <b>Config file syntax:</b>
* <p>
* '#' marks the beginning of a comment. Blank lines are ignored. All
* other lines should adhere to the following format:
* <pre>
* <weight> <name> <class> [<args>]
* </pre>
* <weight> is a floating point value which is multiplied times the
* benchmark's execution time to determine its weighted score. The
* total score of the benchmark suite is the sum of all weighted scores
* of its benchmarks.
* <p>
* <name> is a name used to identify the benchmark on the benchmark
* report. If the name contains whitespace, the quote character '"' should
* be used as a delimiter.
* <p>
* <class> is the full name (including the package) of the class
* containing the benchmark implementation. This class must implement
* bench.Benchmark.
* <p>
* [<args>] is a variable-length list of runtime arguments to pass to
* the benchmark. Arguments containing whitespace should use the quote
* character '"' as a delimiter.
* <p>
* <b>Example:</b>
* <pre>
* 3.5 "My benchmark" bench.serial.Test first second "third arg"
* </pre>
*/
public Harness(InputStream in) throws IOException, ConfigFormatException {
Vector bvec = new Vector();
StreamTokenizer tokens = new StreamTokenizer(new InputStreamReader(in));
tokens.resetSyntax();
tokens.wordChars(0, 255);
tokens.whitespaceChars(0, ' ');
tokens.commentChar('#');
tokens.quoteChar('"');
tokens.eolIsSignificant(true);
tokens.nextToken();
while (tokens.ttype != StreamTokenizer.TT_EOF) {
switch (tokens.ttype) {
case StreamTokenizer.TT_WORD:
case '"': // parse line
bvec.add(parseBenchInfo(tokens));
break;
default: // ignore
tokens.nextToken();
break;
}
}
binfo = (BenchInfo[]) bvec.toArray(new BenchInfo[bvec.size()]);
}