本文整理汇总了Java中com.sun.squawk.util.StringTokenizer.nextToken方法的典型用法代码示例。如果您正苦于以下问题:Java StringTokenizer.nextToken方法的具体用法?Java StringTokenizer.nextToken怎么用?Java StringTokenizer.nextToken使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.sun.squawk.util.StringTokenizer
的用法示例。
在下文中一共展示了StringTokenizer.nextToken方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Open the connection
*/
public Connection open(String protocol, String name, int mode, boolean timeouts) throws IOException {
//System.out.println("classpath: name="+name);
if(name.charAt(0) != '/' || name.charAt(1) != '/') {
throw new IllegalArgumentException("Protocol must start with \"//\" "+name);
}
String path = name.substring(2);
StringTokenizer st = new StringTokenizer(path, sepch);
while (st.hasMoreTokens()) {
String dirName = st.nextToken();
if (dirName.endsWith("\\") || dirName.endsWith("/")) {
dirName = dirName.substring(0, dirName.length() - 1);
}
classPathArray.addElement(dirName);
}
return this;
}
示例2: TargetCollection
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Creates a new TargetCollection with Target objects created from the String,
* {@code targetData}. The string has the format
* "[x(double)],[y(double)],[distance(double)],[isCenter(1/0)]:[repeat]:[...]:".
* @param targetData The Target data string to be parsed into a new TargetCollection.
*/
public TargetCollection(String targetData) {
targets = new Vector();
String processedTargetData = targetData.trim();
if(!processedTargetData.equals("")) {
StringTokenizer targetTokenizer = new StringTokenizer(processedTargetData, ":");
try {
while(targetTokenizer.hasMoreTokens()) {
String dataBlock = targetTokenizer.nextToken();
if(dataBlock.equals("")){
continue;
}
StringTokenizer targetDataTokenizer = new StringTokenizer(dataBlock, ",");
double x = Double.parseDouble(targetDataTokenizer.nextToken());
double y = Double.parseDouble(targetDataTokenizer.nextToken());
double distance = Double.parseDouble(targetDataTokenizer.nextToken());
boolean isCenter = targetDataTokenizer.nextToken().equals("1");
targets.addElement(new Target(x, y, distance, isCenter));
}
}
catch(Exception e) {
e.printStackTrace();
}
}
}
示例3: parse
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Parses the <name>=<value> pairs separated by ';' in a URL name. The pairs
* start after the first ';' in the given name.
*
* @param name the name part of a connection URL
* @return the name stripped of the parameters (if any)
*/
public String parse(String name) {
int parmIndex = name.indexOf(';');
if (parmIndex != -1) {
String parms = name.substring(parmIndex);
name = name.substring(0, parmIndex);
StringTokenizer st = new StringTokenizer(parms, "=;", true);
while (st.hasMoreTokens()) {
try {
if (!st.nextToken().equals(";")) {
throw new NoSuchElementException();
}
String key = st.nextToken();
if (!st.nextToken().equals("=")) {
throw new NoSuchElementException();
}
String value = st.nextToken();
if (!parameter(key, value)) {
throw new IllegalArgumentException("Unknown parameter to protocol: " + key);
}
} catch (NoSuchElementException nsee) {
throw new IllegalArgumentException("Bad param string: " + parms);
}
}
}
return name;
}
示例4: open
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Opens the connection.
*
*/
public Connection open(String protocol, String name, int mode, boolean timeouts) throws IOException {
if (name.length() != 0) {
Vector names = new Vector();
StringTokenizer st = new StringTokenizer(name, ";");
String part = null;
while (st.hasMoreTokens()) {
String s = st.nextToken();
if (s.endsWith("\\")) {
if (part != null) {
part += ";" + s;
} else {
part = s;
}
} else {
if (part != null) {
s = part + ";" + s;
part = null;
}
names.addElement(s);
}
}
initialConnections = new String[names.size()];
names.copyInto(initialConnections);
}
return this;
}
示例5: Words
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Creates a Words instance.
*
* @param trace the substring from a trace line containing zero or more word values
* @param hasTypes specifies if the word values have an annotated type
*/
public Words(String trace, boolean hasTypes) {
StringTokenizer st = new StringTokenizer(trace, ",");
if (st.hasMoreTokens()) {
int count = st.countTokens();
values = new long[count];
types = (hasTypes) ? new byte[count] : null;
for (int i = 0; i != count; ++i) {
String token = st.nextToken();
if (hasTypes) {
int index = token.indexOf('#');
String value = token.substring(0, index);
if (value.equals("X")) {
values[i] = 0xdeadbeef;
} else {
values[i] = Long.parseLong(value);
}
types[i] = Byte.parseByte(token.substring(index + 1));
} else {
if (token.equals("X")) {
values[i] = 0xdeadbeef;
} else {
values[i] = Long.parseLong(token);
}
}
}
} else {
values = NO_VALUES;
types = null;
}
}
示例6: tokenizeData
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Separates input String into many Strings based on the delimiter given
* @param input String to be tokenized
* @return String Array of Tokenized Input String
*/
public synchronized String[] tokenizeData(String input) {
StringTokenizer tokenizer = new StringTokenizer(input, String.valueOf(delimiter));
String output[] = new String[tokenizer.countTokens()];
for(int i = 0; i < output.length; i++) {
output[i] = tokenizer.nextToken();
}
return output;
}
示例7: tokenizeData
import com.sun.squawk.util.StringTokenizer; //导入方法依赖的package包/类
/**
* Separates input String into many Strings based on the delimiter given
* @param input String to be tokenized
* @return String Array of Tokenized Input String
*/
public String[] tokenizeData(String input) {
StringTokenizer tokenizer = new StringTokenizer(input, String.valueOf(delimiter));
String output[] = new String[tokenizer.countTokens()];
for(int i = 0; i < output.length; i++) {
output[i] = tokenizer.nextToken();
}
return output;
}