001 package sysModel.parser;
002
003 import java.io.FileReader;
004 import java.io.IOException;
005 import java.io.StreamTokenizer;
006
007 /**
008 * Wrapper around StreamTokenizer.
009 *
010 * @author Mathias Ricken
011 */
012 public class Lexer {
013 /**
014 * Stream tokenizer.
015 */
016 final StreamTokenizer _tok;
017
018 /**
019 * Make a new parser.
020 *
021 * @param filename file to lex
022 * @throws IOException
023 */
024 public Lexer(String filename) throws IOException {
025 _tok = new StreamTokenizer(new FileReader(filename));
026
027 _tok.wordChars(0, 255);
028 _tok.ordinaryChars(0, ' ');
029 _tok.whitespaceChars(0, ' ');
030 _tok.ordinaryChar('(');
031 _tok.ordinaryChar(')');
032 _tok.parseNumbers();
033 _tok.wordChars('.', '.');
034 _tok.ordinaryChar(',');
035 _tok.eolIsSignificant(false);
036 }
037
038 /**
039 * Get the next token.
040 *
041 * @return next token
042 */
043 public IToken nextToken() {
044 try {
045 switch (_tok.nextToken()) {
046 case StreamTokenizer.TT_WORD:
047 {
048 return new WordToken(_tok.sval);
049 }
050 case StreamTokenizer.TT_NUMBER:
051 {
052 return new NumberToken(_tok.nval);
053 }
054 case '(':
055 {
056 return OpenToken.instance();
057 }
058 case ')':
059 {
060 return CloseToken.instance();
061 }
062 case ',':
063 {
064 return CommaToken.instance();
065 }
066 case StreamTokenizer.TT_EOF:
067 {
068 return EndOfStreamToken.instance();
069 }
070 default:
071 {
072 throw new ParserException("Invalid token");
073 }
074 }
075 }
076 catch (IOException e) {
077 throw new ParserException(e.toString(), e);
078 }
079 }
080 }