本文整理汇总了Java中com.sun.squawk.util.StringTokenizer类的典型用法代码示例。如果您正苦于以下问题:Java StringTokenizer类的具体用法?Java StringTokenizer怎么用?Java StringTokenizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StringTokenizer类属于com.sun.squawk.util包,在下文中一共展示了StringTokenizer类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Open the connection
*/
public Connection open(String protocol, String name, int mode, boolean timeouts) throws IOException {
//System.out.println("classpath: name="+name);
if(name.charAt(0) != '/' || name.charAt(1) != '/') {
throw new IllegalArgumentException("Protocol must start with \"//\" "+name);
}
String path = name.substring(2);
StringTokenizer st = new StringTokenizer(path, sepch);
while (st.hasMoreTokens()) {
String dirName = st.nextToken();
if (dirName.endsWith("\\") || dirName.endsWith("/")) {
dirName = dirName.substring(0, dirName.length() - 1);
}
classPathArray.addElement(dirName);
}
return this;
}
示例2: TargetCollection
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Creates a new TargetCollection with Target objects created from the String,
* {@code targetData}. The string has the format
* "[x(double)],[y(double)],[distance(double)],[isCenter(1/0)]:[repeat]:[...]:".
* @param targetData The Target data string to be parsed into a new TargetCollection.
*/
public TargetCollection(String targetData) {
targets = new Vector();
String processedTargetData = targetData.trim();
if(!processedTargetData.equals("")) {
StringTokenizer targetTokenizer = new StringTokenizer(processedTargetData, ":");
try {
while(targetTokenizer.hasMoreTokens()) {
String dataBlock = targetTokenizer.nextToken();
if(dataBlock.equals("")){
continue;
}
StringTokenizer targetDataTokenizer = new StringTokenizer(dataBlock, ",");
double x = Double.parseDouble(targetDataTokenizer.nextToken());
double y = Double.parseDouble(targetDataTokenizer.nextToken());
double distance = Double.parseDouble(targetDataTokenizer.nextToken());
boolean isCenter = targetDataTokenizer.nextToken().equals("1");
targets.addElement(new Target(x, y, distance, isCenter));
}
}
catch(Exception e) {
e.printStackTrace();
}
}
}
示例3: giveData
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
public static void giveData(String data) {
latestData = data;
int commandType;
tok = new StringTokenizer(latestData, ":");
commandType = Integer.parseInt(tok.nextToken());
if(commandType == HORIZONTAL_ALIGNMENT_FROM_TARGET || commandType == DISTANCE_FROM_TARGET) {
targetType = Integer.parseInt(tok.nextToken());
targetDistance = Double.parseDouble(tok.nextToken());
}
else {
System.out.println("data not found");
}
}
示例4: parse
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Parses the <name>=<value> pairs separated by ';' in a URL name. The pairs
* start after the first ';' in the given name.
*
* @param name the name part of a connection URL
* @return the name stripped of the parameters (if any)
*/
public String parse(String name) {
int parmIndex = name.indexOf(';');
if (parmIndex != -1) {
String parms = name.substring(parmIndex);
name = name.substring(0, parmIndex);
StringTokenizer st = new StringTokenizer(parms, "=;", true);
while (st.hasMoreTokens()) {
try {
if (!st.nextToken().equals(";")) {
throw new NoSuchElementException();
}
String key = st.nextToken();
if (!st.nextToken().equals("=")) {
throw new NoSuchElementException();
}
String value = st.nextToken();
if (!parameter(key, value)) {
throw new IllegalArgumentException("Unknown parameter to protocol: " + key);
}
} catch (NoSuchElementException nsee) {
throw new IllegalArgumentException("Bad param string: " + parms);
}
}
}
return name;
}
示例5: open
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Opens the connection.
*
*/
public Connection open(String protocol, String name, int mode, boolean timeouts) throws IOException {
if (name.length() != 0) {
Vector names = new Vector();
StringTokenizer st = new StringTokenizer(name, ";");
String part = null;
while (st.hasMoreTokens()) {
String s = st.nextToken();
if (s.endsWith("\\")) {
if (part != null) {
part += ";" + s;
} else {
part = s;
}
} else {
if (part != null) {
s = part + ";" + s;
part = null;
}
names.addElement(s);
}
}
initialConnections = new String[names.size()];
names.copyInto(initialConnections);
}
return this;
}
示例6: parseRelocationFile
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Parses a given file containing relocation information and updates the {@link relocationTable relocation table}
* for a given object memory and its parents. Each line in a relocation file must be of the format '<url>=<address>'.
*
* @param file the name of the file containing relocation information
* @param om an object memory
* @throws IOException
*/
private void parseRelocationFile(String file, ObjectMemory om) throws IOException {
if (!new File(file).exists()) {
if (!file.equals("squawk.reloc")) {
throw new RuntimeException(file + " does not exist");
}
}
Properties properties = new Properties();
BufferedReader br = new BufferedReader(new FileReader(file));
String line = br.readLine();
int lno = 1;
while (line != null) {
StringTokenizer st = new StringTokenizer(line, "=");
if (st.countTokens() != 2) {
throw new RuntimeException(file + ":" + lno + ": does not match '<url>=<address>' pattern");
}
properties.setProperty(st.nextToken(), st.nextToken());
line = br.readLine();
lno++;
}
relocationTable = new Hashtable<ObjectMemory, Address>();
if (setRelocationFor(om, properties) == null) {
relocationTable = null;
}
}
示例7: initializeCallStack
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Initializes the call stack and current method of this slice based on a given stack trace.
*
* @param stackTrace the stack trace encapsulating the current call stack for the thread
* @param symbols the database of method symbols
*/
private void initializeCallStack(String stackTrace, Symbols symbols, int tracePosition) {
StringTokenizer st = new StringTokenizer(stackTrace, ":");
Stack<String> stack = new Stack<String>();
stack.ensureCapacity(st.countTokens());
while (st.hasMoreTokens()) {
stack.push(st.nextToken());
}
int repetition = 0;
int depth = 0;
while (!stack.empty()) {
String element = (String)stack.pop();
Matcher m = STACKTRACE_ELEMENT.matcher(element);
if (m.matches()) {
Symbols.Method method = TraceViewer.lookupMethod(symbols, Long.parseLong(m.group(1)));
enterMethod(method, depth++, tracePosition);
while (repetition-- > 0) {
enterMethod(method, depth++, tracePosition);
}
continue;
}
m = STACKTRACE_REPETITION_ELEMENT.matcher(element);
if (m.matches()) {
repetition = Integer.parseInt(m.group(1));
continue;
}
throw new TraceParseException(element, m.pattern().pattern());
}
}
示例8: Words
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Creates a Words instance.
*
* @param trace the substring from a trace line containing zero or more word values
* @param hasTypes specifies if the word values have an annotated type
*/
public Words(String trace, boolean hasTypes) {
StringTokenizer st = new StringTokenizer(trace, ",");
if (st.hasMoreTokens()) {
int count = st.countTokens();
values = new long[count];
types = (hasTypes) ? new byte[count] : null;
for (int i = 0; i != count; ++i) {
String token = st.nextToken();
if (hasTypes) {
int index = token.indexOf('#');
String value = token.substring(0, index);
if (value.equals("X")) {
values[i] = 0xdeadbeef;
} else {
values[i] = Long.parseLong(value);
}
types[i] = Byte.parseByte(token.substring(index + 1));
} else {
if (token.equals("X")) {
values[i] = 0xdeadbeef;
} else {
values[i] = Long.parseLong(token);
}
}
}
} else {
values = NO_VALUES;
types = null;
}
}
示例9: loadConstants
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
public static void loadConstants() {
try {
//get a connection to the constants file and read it
final String fileName = "file:///" + CONSTANTS_FILE_NAME;
printIfDebug("Opening constants file: " + fileName);
FileConnection commandFileConnection = (FileConnection) Connector.open(fileName, Connector.READ);
DataInputStream commandFileStream = commandFileConnection.openDataInputStream();
StringBuffer fileContentsBuffer = new StringBuffer((int) commandFileConnection.fileSize());
//read characters from the file until end of file is reached
byte[] buff = new byte[255];
while (commandFileStream.read(buff) != -1) {
fileContentsBuffer.append(new String(buff));
//inefficient, but with long files necessary
buff = new byte[255];
}
String fileContents = fileContentsBuffer.toString();
printIfDebug("Constants file output: " + fileContents);
StringTokenizer lineTokenizer = new StringTokenizer(fileContents, "\n");
CONSTANTS = new Vector(lineTokenizer.countTokens());
//for each line, split into space-separated tokens
while (lineTokenizer.hasMoreTokens()) {
String line = lineTokenizer.nextToken().trim();
if (line.startsWith("#")) {
continue;
}
StringTokenizer spaceTokenizer = new StringTokenizer(line, " ");
//map the first two tokens
if (spaceTokenizer.countTokens() > 1) {
final String key = spaceTokenizer.nextToken().trim();
final String value = spaceTokenizer.nextToken().trim();
CONSTANTS.addElement(new Constant(key, value));
printIfDebug("Put constant: " + key + ": " + value + ", of type " + Constant.TYPE_NAMES[((Constant) CONSTANTS.lastElement()).getType()]);
}
}
} catch (Exception ex) {
System.out.println("Could not load file " + CONSTANTS_FILE_NAME + ". Are you sure it is in the root directory of the cRIO?");
}
}
示例10: tokenizeData
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Separates input String into many Strings based on the delimiter given
* @param input String to be tokenized
* @return String Array of Tokenized Input String
*/
public synchronized String[] tokenizeData(String input) {
StringTokenizer tokenizer = new StringTokenizer(input, String.valueOf(delimiter));
String output[] = new String[tokenizer.countTokens()];
for(int i = 0; i < output.length; i++) {
output[i] = tokenizer.nextToken();
}
return output;
}
示例11: tokenizeData
import com.sun.squawk.util.StringTokenizer; //导入依赖的package包/类
/**
* Separates input String into many Strings based on the delimiter given
* @param input String to be tokenized
* @return String Array of Tokenized Input String
*/
public String[] tokenizeData(String input) {
StringTokenizer tokenizer = new StringTokenizer(input, String.valueOf(delimiter));
String output[] = new String[tokenizer.countTokens()];
for(int i = 0; i < output.length; i++) {
output[i] = tokenizer.nextToken();
}
return output;
}