@Override public Token nextToken() { if (stashedNext != null) { previous = stashedNext; stashedNext = null; return previous; } Token next = super.nextToken(); if (insertSemicolon(previous, next)) { stashedNext = next; previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";", Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine()); return previous; } else { previous = next; return next; } }
private static <T extends Parser> T makeBasicParser(Class<T> parserClass, ANTLRInputStream stream, String parsedObjectName, List<AntlrError> errors) { Lexer lexer; Parser parser; if (parserClass.isAssignableFrom(SQLParser.class)) { lexer = new SQLLexer(stream); parser = new SQLParser(new CommonTokenStream(lexer)); } else if (parserClass.isAssignableFrom(IgnoreListParser.class)) { lexer = new IgnoreListLexer(stream); parser = new IgnoreListParser(new CommonTokenStream(lexer)); } else { throw new IllegalArgumentException("Unknown parser class: " + parserClass); } CustomAntlrErrorListener err = new CustomAntlrErrorListener(parsedObjectName, errors); lexer.removeErrorListeners(); lexer.addErrorListener(err); parser.removeErrorListeners(); parser.addErrorListener(err); return parserClass.cast(parser); }
public static <L extends Lexer, P extends Parser> P newParser( Function<CharStream, L> lexerFactory, Function<TokenStream, P> parserFactory, String input, boolean useBailErrorStrategy, boolean removeErrorListeners) { CharStream charStream = new ANTLRInputStream(input); L lexer = lexerFactory.apply(charStream); if (removeErrorListeners) { lexer.removeErrorListeners(); } TokenStream tokenStream = new CommonTokenStream(lexer); P parser = parserFactory.apply(tokenStream); if (useBailErrorStrategy) { parser.setErrorHandler(new BailErrorStrategy()); } if (removeErrorListeners) { parser.removeErrorListeners(); } return parser; }
public LangDescriptor(String name, String corpusDir, String fileRegex, Class<? extends Lexer> lexerClass, Class<? extends Parser> parserClass, String startRuleName, int indentSize, int singleLineCommentType) { this.name = name; this.corpusDir = corpusDir; this.fileRegex = fileRegex; this.lexerClass = lexerClass; this.parserClass = parserClass; this.startRuleName = startRuleName; this.indentSize = indentSize; this.singleLineCommentType = singleLineCommentType; }
/** * Gets the name by which a token can be referenced in the generated code. * For tokens defined in a {@code tokens{}} block or via a lexer rule, this * is the declared name of the token. For token types generated by the use * of a string literal within a parser rule of a combined grammar, this is * the automatically generated token type which includes the * {@link #AUTO_GENERATED_TOKEN_NAME_PREFIX} prefix. For types which are not * associated with a defined token, this method returns * {@link #INVALID_TOKEN_NAME}. * * @param ttype The token type. * @return The name of the token with the specified type. */ public String getTokenName(int ttype) { // inside any target's char range and is lexer grammar? if ( isLexer() && ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE ) { return CharSupport.getANTLRCharLiteralForChar(ttype); } if ( ttype==Token.EOF ) { return "EOF"; } if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) { return typeToTokenList.get(ttype); } return INVALID_TOKEN_NAME; }
/** Return a string representing the escaped char for code c. E.g., If c * has value 0x100, you will get "\u0100". ASCII gets the usual * char (non-hex) representation. Control characters are spit out * as unicode. While this is specially set up for returning Java strings, * it can be used by any language target that has the same syntax. :) */ public static String getANTLRCharLiteralForChar(int c) { if ( c< Lexer.MIN_CHAR_VALUE ) { return "'<INVALID>'"; } if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) { return '\''+ANTLRLiteralCharValueEscape[c]+'\''; } if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN && !Character.isISOControl((char)c) ) { if ( c=='\\' ) { return "'\\\\'"; } if ( c=='\'') { return "'\\''"; } return '\''+Character.toString((char)c)+'\''; } // turn on the bit above max "\uFFFF" value so that we pad with zeros // then only take last 4 digits String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5); String unicodeStr = "'\\u"+hex+"'"; return unicodeStr; }
private List<Token> getLeadingComments(Token token) { List<Token> hiddenTokens = tokenStream.getHiddenTokensToLeft(token.getTokenIndex(), Lexer.HIDDEN); if (hiddenTokens == null || hiddenTokens.isEmpty()) { return Collections.emptyList(); } List<Token> comments = new ArrayList<>(hiddenTokens.size()); for (Token hiddenToken : hiddenTokens) { if (isComment(hiddenToken) && !trailingDocTokenIndexes.get(hiddenToken.getTokenIndex())) { comments.add(hiddenToken); } } return comments; }
/** * Read comments following the given token, until the first newline is encountered. * * INVARIANT: * Assumes that the parse tree is being walked top-down, left to right! * * Trailing-doc tokens are marked as such, so that subsequent searches for "leading" * doc don't grab tokens already used as "trailing" doc. If the walk order is *not* * top-down, left-to-right, then the assumption underpinning the separation of leading * and trailing comments is broken. * * @param endToken the token from which to search for trailing comment tokens. * @return a list, possibly empty, of all trailing comment tokens. */ private List<Token> getTrailingComments(Token endToken) { List<Token> hiddenTokens = tokenStream.getHiddenTokensToRight(endToken.getTokenIndex(), Lexer.HIDDEN); if (hiddenTokens == null || hiddenTokens.isEmpty()) { return Collections.emptyList(); } Token maybeTrailingDoc = hiddenTokens.get(0); // only one trailing comment is possible if (isComment(maybeTrailingDoc)) { trailingDocTokenIndexes.set(maybeTrailingDoc.getTokenIndex()); return Collections.singletonList(maybeTrailingDoc); } return Collections.emptyList(); }
public static Pair<Parser, Lexer> parsePHP(String filePath) { AntlrCaseInsensitiveFileStream input; try { input = new AntlrCaseInsensitiveFileStream(filePath); } catch (IOException e) { e.printStackTrace(); return null; } PHPLexer lexer = new PHPLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); PHPParser parser = new InterruptablePHPParser(tokens, filePath); /* turn on prediction mode to speed up parsing */ parser.getInterpreter().setPredictionMode(PredictionMode.SLL); Pair<Parser, Lexer> retval = new Pair<Parser, Lexer>(parser, lexer); return retval; }
public static Document processFile(String filePath) { Pair<Parser, Lexer> pl = parsePHP(filePath); PHPParser parser = (PHPParser) pl.a; parser.setBuildParseTree(true); /* * htmlDocument is the start rule (the top-level rule) * for the PHP grammar */ ParserRuleContext tree = parser.htmlDocument(); List<String> ruleNames = Arrays.asList(parser.getRuleNames()); Map<Integer, String> invTokenMap = getInvTokenMap(parser); TokenStream tokenStream = parser.getTokenStream(); ParseTreeDOMSerializer ptSerializer = new ParseTreeDOMSerializer(ruleNames, invTokenMap, tokenStream); ParseTreeWalker.DEFAULT.walk(ptSerializer, tree); Document result= ptSerializer.getDOMDocument(); return result; }
/** * Parses the supplied input using the OTLDListener and returns it after walking it * @param reader input to parse * @return walked OTLDListener * @throws IOException */ public static OTLDListener parseFile(InputStream reader) throws IOException { OTLDErrorListener errorListener = new OTLDErrorListener(); ANTLRInputStream stream = new ANTLRInputStream(reader); Lexer lexer = new otldLexer(stream); lexer.removeErrorListeners(); lexer.addErrorListener(errorListener); TokenStream tokens = new CommonTokenStream(lexer); otldParser parser = new otldParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(errorListener); ParseTree tree = parser.program(); OTLDListener railroad = new OTLDListener(); if (errorListener.getErrors().isEmpty()) { ParseTreeWalker walker = new ParseTreeWalker(); walker.walk(railroad, tree); } else { railroad.errors.addAll(errorListener.getErrors()); } return railroad; }
@Override public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException { this.snapshot = snapshot; String text = snapshot.getText().toString(); ANTLRInputStream input = new ANTLRInputStream(text); Lexer lexer = new EditorConfigLexer(input); lexer.removeErrorListeners(); CommonTokenStream tokens = new CommonTokenStream(lexer); parser = new EditorConfigParser(tokens); parser.removeErrorListeners(); syntaxErrors = new ArrayList<>(); EditorConfigErrorListener errorListener = new EditorConfigErrorListener(syntaxErrors); parser.addErrorListener(errorListener); EditorConfigParser.FileContext root = parser.file(); result = new ECParserResult(snapshot, parser, root); }
@Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { if ( offendingSymbol==null ) { final Lexer lexer = (Lexer) recognizer; int i = lexer.getCharIndex(); final int n = lexer.getInputStream().size(); if (i >= n) { i = n - 1; } final String text = lexer.getInputStream().getText(new Interval(i, i)); CommonToken t = (CommonToken) lexer.getTokenFactory().create(Token.INVALID_TYPE, text); t.setStartIndex(i); t.setStopIndex(i); t.setLine(line); t.setCharPositionInLine(charPositionInLine); offendingSymbol = t; } // System.out.println("lex error: " + offendingSymbol); issues.add(new Issue(msg, (Token)offendingSymbol)); }
boolean isContext(Token token, int offset, boolean allowInStrings, boolean allowInActions) { if (token == null) { return false; } switch (token.getType()) { case GrammarLexer.LEXER_CHAR_SET: case GrammarLexer.ACTION_COMMENT: return false; case GrammarLexer.STRING_LITERAL: case GrammarLexer.DOUBLE_QUOTE_STRING_LITERAL: return allowInStrings; case GrammarLexer.ARG_ACTION_WORD: case GrammarLexer.ACTION_WORD: return allowInActions; case GrammarLexer.WS: return true; default: return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL; } }
static boolean isGoContext(Token token, int offset, boolean allowInStrings) { if (token == null) { return false; } switch (token.getType()) { case GoLexer.COMMENT: return false; case GoLexer.CharLiteral: case GoLexer.StringLiteral: return allowInStrings; case GoLexer.WS: case GoLexer.NEWLINE: return true; default: return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL; } }
@Override public final void syntaxError(final Recognizer<?, ?> recognizer, final Object offendingSymbol, final int line, final int charPositionInLine, final String msg, final RecognitionException e) { String input; if (recognizer instanceof Lexer) { final CharStream cs = ((Lexer) recognizer).getInputStream(); input = cs.getText(new Interval(0, cs.size())); } else if (recognizer instanceof Parser) { final TokenStream tokens = ((Parser) recognizer).getInputStream(); if (tokens != null) { input = tokens.getText(); } else { input = "<unknown input>"; } } else { input = "<unknown input>"; } throw new AntlrParseException(input, line, charPositionInLine, msg); }
/** * Execute the actions encapsulated by this executor within the context of a * particular {@link Lexer}. * * <p>This method calls {@link IntStream#seek} to set the position of the * {@code input} {@link CharStream} prior to calling * {@link LexerAction#execute} on a position-dependent action. Before the * method returns, the input position will be restored to the same position * it was in when the method was invoked.</p> * * @param lexer The lexer instance. * @param input The input stream which is the source for the current token. * When this method is called, the current {@link IntStream#index} for * {@code input} should be the start of the following token, i.e. 1 * character past the end of the current token. * @param startIndex The token start index. This value may be passed to * {@link IntStream#seek} to set the {@code input} position to the beginning * of the token. */ public void execute(@NotNull Lexer lexer, CharStream input, int startIndex) { boolean requiresSeek = false; int stopIndex = input.index(); try { for (LexerAction lexerAction : lexerActions) { if (lexerAction instanceof LexerIndexedCustomAction) { int offset = ((LexerIndexedCustomAction)lexerAction).getOffset(); input.seek(startIndex + offset); lexerAction = ((LexerIndexedCustomAction)lexerAction).getAction(); requiresSeek = (startIndex + offset) != stopIndex; } else if (lexerAction.isPositionDependent()) { input.seek(stopIndex); requiresSeek = false; } lexerAction.execute(lexer); } } finally { if (requiresSeek) { input.seek(stopIndex); } } }
public Archetype parse(Reader reader) throws IOException { try { CharStream charStream = new ANTLRInputStream(reader); Lexer lexer = new adlLexer(charStream); adlParser parser = new adlParser(new BufferedTokenStream(lexer)); AccumulatingErrorListener errorHandler = new AccumulatingErrorListener(); parser.removeErrorListeners(); parser.addErrorListener(errorHandler); adlParser.AdlContext context = parser.adl(); if (!errorHandler.getErrors().isEmpty()) { throw new AdlParserException(Joiner.on("\n").join(errorHandler.getErrors())); } AdlTreeParser treeParser = new AdlTreeParser(); return treeParser.parseAdl(context); } finally { reader.close(); } }
public CSSTokenRecovery(Lexer lexer, CharStream input, CSSLexerState ls, Logger log) { this.lexer = lexer; this.input = input; // this.state = state; this.ls = ls; this.log = log; this.expectedToken = new Stack<Integer>(); this.eof = false; lexerTypeMapper = CSSToken.createDefaultTypeMapper(lexer.getClass()); typeMapper = new CSSToken.TypeMapper(CSSTokenRecovery.class, lexer.getClass(), "APOS", "QUOT", "RPAREN", "RCURLY", "IMPORT", "CHARSET", "STRING", "INVALID_STRING"); }
public static <P extends Parser> P getParser(Class<? extends Lexer> lexerClass, Class<P> parserClass, String source) { Lexer lexer = getLexer(lexerClass, source); TokenStream tokens = new CommonTokenStream(lexer); P parser; try { parser = parserClass.getConstructor(TokenStream.class).newInstance(tokens); } catch (Exception e) { throw new IllegalArgumentException("couldn't invoke parser constructor", e); } parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); parser.removeErrorListeners(); // don't spit to stderr parser.addErrorListener(new DiagnosticErrorListener()); parser.addErrorListener(new AntlrFailureListener()); return parser; }
private <L extends Lexer> void warmupAndRun(Class<L> lexerClass, String source, Function<? super L, Tokens> lexerToIter, double multiplier) { System.out.printf("[%s]: %d tokens in %d chars%n", lexerClass.getSimpleName(), countTokens(lexerClass, source), source.toCharArray().length); for (int i = 0; i < WARMUP_REPS; ++i) { timedRuns(lexerClass, source, "warmup " + i, Math.round(WARMUP * multiplier * globalMultiplier), lexerToIter); } System.out.println(); System.out.println("Starting main runs..."); double time = timedRuns(lexerClass, source, "runs", Math.round(RUNS * multiplier * globalMultiplier), lexerToIter); System.out.println(); System.out.println(); fail(time + " ms per run"); // easy reporting. }
@Override public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) { this.snapshot = snapshot; Document document = snapshot.getSource().getDocument(true); syntaxErrors = (List<SyntaxError>) document.getProperty("syntaxErrors"); if (syntaxErrors == null) { syntaxErrors = new ArrayList<>(); document.putProperty("syntaxErrors", syntaxErrors); } embeddedOffset = snapshot.getOriginalOffset(0); // Logger.Log("macparse: " + syntaxErrors.size() + " - " + embeddedOffset); if (embeddedOffset <= 0) { syntaxErrors.clear(); } ANTLRInputStream input = new ANTLRInputStream(snapshot.getText().toString()); Lexer lexer = new macLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); macParser = new macParser(tokens); macParser.removeErrorListeners(); macParser.addErrorListener(new ErrorListener(syntaxErrors, embeddedOffset)); try { ProgContext prog = macParser.prog(); } catch (RecognitionException ex) { ex.printStackTrace(); } }
@Override public void save(byte[] data) { ANTLRInputStream input = new ANTLRInputStream(new String(data, charset)); Lexer lexer = new clsLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); clsParser clsParser = new clsParser(tokens); try { clsParser.ProgContext prog = clsParser.prog(); ParseTreeWalker walker = new ParseTreeWalker(); CLSParserListerer4Save listener = new CLSParserListerer4Save(clsParser, db, cls); walker.walk(listener, prog); cls._save(); db.utilities().compileCacheClass(getName(), "cdfk-u"); } catch (CacheException | RecognitionException ex) { System.out.println("SaveException: " + ex.getLocalizedMessage()); // ex.printStackTrace(); } }
public ANTLRAssistBehavior(Class<? extends Parser> parserClass, Class<? extends Lexer> lexerClass, String grammarFiles[], String tokenFile, String ruleName) { this.lexerClass = lexerClass; this.parserClass = parserClass; codeAssist = new CodeAssist(lexerClass, grammarFiles, tokenFile) { @Override protected List<InputSuggestion> suggest(ParentedElement element, String matchWith) { return ANTLRAssistBehavior.this.suggest(element, matchWith); } @Override protected List<String> getHints(ParentedElement expectedElement, String matchWith) { return ANTLRAssistBehavior.this.getHints(expectedElement, matchWith); } @Override protected InputSuggestion wrapAsSuggestion(ParentedElement expectedElement, String suggestedLiteral, boolean complete) { return ANTLRAssistBehavior.this.wrapAsSuggestion(expectedElement, suggestedLiteral, complete); } @Override protected int getEndOfMatch(ElementSpec spec, String content) { return ANTLRAssistBehavior.this.getEndOfMatch(spec, content); } }; this.ruleName = ruleName; }
private Constructor<? extends Lexer> getLexerCtor() { if (lexerCtor == null) { try { lexerCtor = lexerClass.getConstructor(CharStream.class); } catch (NoSuchMethodException | SecurityException e) { throw new RuntimeException(e); } } return lexerCtor; }
@Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { String position = "line " + line + ", pos " + charPositionInLine; String charText = ""; String hint = ""; if (recognizer != null && recognizer instanceof Lexer) { Lexer lexer = (Lexer) recognizer; String fullText = lexer.getInputStream().toString(); charText = String.valueOf(fullText.charAt(lexer.getCharIndex())); hint = AntlrUtils.underlineError(fullText, charText, line, charPositionInLine); } throw new LexicalErrorException(position + " near " + charText + " : " + msg + "\n" + hint, e); }
@Override public void parse( Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException { Assert.notNull(snapshot); this.snapshot = snapshot; CharStream input = new ANTLRInputStream(snapshot.getText().toString()); Lexer lexer = new ProtobufLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); ProtobufParser parser = new ProtobufParser(tokens); parser.removeErrorListeners(); ParsingErrorListener listener = new ParsingErrorListener(); parser.addErrorListener(listener); ParseTree tree = parser.proto(); // TODO def and ref phases this.errors.clear(); this.errors.addAll(listener.errors()); }
/** * @requires lexer != null * @modifies lexer * @effects Applies this to lexer */ public void apply(Lexer lexer) { Assert.notNull(lexer); lexer._mode = mode; lexer._modeStack.clear(); if (modeStack != null) { lexer._modeStack.addAll(modeStack); } }
/** Compute a document difference metric 0-1.0 between two documents that * are identical other than (likely) the whitespace and comments. * * 1.0 means the docs are maximally different and 0 means docs are identical. * * The Levenshtein distance between the docs counts only * whitespace diffs as the non-WS content is identical. * Levenshtein distance is bounded by 0..max(len(doc1),len(doc2)) so * we normalize the distance by dividing by max WS count. * * TODO: can we simplify this to a simple walk with two * cursors through the original vs formatted counting * mismatched whitespace? real text are like anchors. */ public static double docDiff(String original, String formatted, Class<? extends Lexer> lexerClass) throws Exception { // Grammar must strip all but real tokens and whitespace (and put that on hidden channel) CodeBuffTokenStream original_tokens = Tool.tokenize(original, lexerClass); // String s = original_tokens.getText(); CodeBuffTokenStream formatted_tokens = Tool.tokenize(formatted, lexerClass); // String t = formatted_tokens.getText(); // walk token streams and examine whitespace in between tokens int i = -1; int ws_distance = 0; int original_ws = 0; int formatted_ws = 0; while ( true ) { Token ot = original_tokens.LT(i); // TODO: FIX THIS! can't use LT() if ( ot==null || ot.getType()==Token.EOF ) break; List<Token> ows = original_tokens.getHiddenTokensToLeft(ot.getTokenIndex()); original_ws += tokenText(ows).length(); Token ft = formatted_tokens.LT(i); // TODO: FIX THIS! can't use LT() if ( ft==null || ft.getType()==Token.EOF ) break; List<Token> fws = formatted_tokens.getHiddenTokensToLeft(ft.getTokenIndex()); formatted_ws += tokenText(fws).length(); ws_distance += whitespaceEditDistance(tokenText(ows), tokenText(fws)); i++; } // it's probably ok to ignore ws diffs after last real token int max_ws = Math.max(original_ws, formatted_ws); double normalized_ws_distance = ((float) ws_distance)/max_ws; return normalized_ws_distance; }
public static CodeBuffTokenStream tokenize(String doc, Class<? extends Lexer> lexerClass) throws Exception { ANTLRInputStream input = new ANTLRInputStream(doc); Lexer lexer = getLexer(lexerClass, input); CodeBuffTokenStream tokens = new CodeBuffTokenStream(lexer); tokens.fill(); return tokens; }
public static List<Token> getRealTokens(CommonTokenStream tokens) { List<Token> real = new ArrayList<>(); for (int i=0; i<tokens.size(); i++) { Token t = tokens.get(i); if ( t.getType()!=Token.EOF && t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL ) { real.add(t); } } return real; }
public List<Token> getRealTokens(int from, int to) { List<Token> real = new ArrayList<Token>(); for (int i=from; i<=to; i++) { Token t = tokens.get(i); if ( t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL ) real.add(t); } if ( real.size()==0 ) return null; return real; }
public GUIController(List<TokenPositionAnalysis> analysisPerToken, InputDocument testDoc, String formattedText, Class<? extends Lexer> lexerClass) { this.analysisPerToken = analysisPerToken; this.formattedText = formattedText; this.lexerClass = lexerClass; this.testDoc = testDoc; this.scope = new BuffScope(); }
/** Given a token type, get a meaningful name for it such as the ID * or string literal. If this is a lexer and the ttype is in the * char vocabulary, compute an ANTLR-valid (possibly escaped) char literal. */ public String getTokenDisplayName(int ttype) { // inside any target's char range and is lexer grammar? if ( isLexer() && ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE ) { return CharSupport.getANTLRCharLiteralForChar(ttype); } if ( ttype==Token.EOF ) { return "EOF"; } if ( ttype==Token.INVALID_TYPE ) { return INVALID_TOKEN_NAME; } if (ttype >= 0 && ttype < typeToStringLiteralList.size() && typeToStringLiteralList.get(ttype) != null) { return typeToStringLiteralList.get(ttype); } if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) { return typeToTokenList.get(ttype); } return String.valueOf(ttype); }
/** What is the max char value possible for this grammar's target? Use * unicode max if no target defined. */ public int getMaxCharValue() { return org.antlr.v4.runtime.Lexer.MAX_CHAR_VALUE; // if ( generator!=null ) { // return generator.target.getMaxCharValue(generator); // } // else { // return Label.MAX_CHAR_VALUE; // } }
/** * Test with arithmetic expressions grammar * @throws java.io.IOException */ @Test public void testArithExpGram() throws IOException { Lexer lexer = new bnfLexer(new ANTLRInputStream(getClass().getResourceAsStream("/arithexp.bnf"))); CommonTokenStream tokens = new CommonTokenStream(lexer); bnfParser grammarparser = new bnfParser(tokens); ParserRuleContext tree = grammarparser.rulelist(); GeneratorVisitor extractor = new GeneratorVisitor(); extractor.visit(tree); List<String> generatedTests = extractor.getTests(); Assert.assertEquals(100, generatedTests.size()); }
/** * Test with course codes grammar * @throws java.io.IOException */ @Test public void testCourseCodeGram() throws IOException { Lexer lexer = new bnfLexer(new ANTLRInputStream(getClass().getResourceAsStream("/coursecodes.bnf"))); CommonTokenStream tokens = new CommonTokenStream(lexer); bnfParser grammarparser = new bnfParser(tokens); ParserRuleContext tree = grammarparser.rulelist(); GeneratorVisitor extractor = new GeneratorVisitor(); extractor.visit(tree); List<String> generatedTests = extractor.getTests(); Assert.assertEquals(10, generatedTests.size()); }