/** * @param args the command line arguments * @throws java.io.IOException * @throws java.net.URISyntaxException */ public static void main(String[] args) throws IOException, URISyntaxException { final String entryPoint; final URL res; switch (args.length) { case 2: res = Paths.get(args[0]).toUri().toURL(); entryPoint = args[1]; break; default: System.err.println("Supply two parameters in the following order:\n- file name of the main function\n- name of the main function\n\nFor example: hello.ct main:argc:argv"); return; } final CharStream inp = CharStreams.fromStream(res.openStream()); final GrammarLexer lex = new GrammarLexer(inp); final TokenStream toks = new CommonTokenStream(lex); final GrammarParser parser = new GrammarParser(toks); System.out.println(new Translator(res.toURI()).generate(parser.program(), entryPoint)); }
/** * @throws InvalidQueryException if the given query contains invalid characters or is incomplete */ public SqlGrammarParser.SelectStatementContext parse(String query) { log.trace("About to parse [{}]", query); long now = currentTimeMillis(); CharStream input = CharStreams.fromString(query); SqlGrammarLexer lexer = new SqlGrammarLexer(input); TokenStream tokenStream = new CommonTokenStream(lexer); SqlGrammarParser parser = new SqlGrammarParser(tokenStream); parser.removeErrorListeners(); parser.addErrorListener(new RaiseExceptionErrorListener()); SqlGrammarParser.SelectStatementContext result = parser.selectStatement(); log.trace("Successfully parsed [{}] into [{}] in [{}ms]", query, result.toStringTree(parser), currentTimeMillis() - now); return result; }
@Override protected void reportNoViableAlternative(Parser recognizer, NoViableAltException e) { // change error message from default implementation TokenStream tokens = recognizer.getInputStream(); String input; if (tokens != null) { if (e.getStartToken().getType() == Token.EOF) { input = "the end"; } else { input = escapeWSAndQuote(tokens.getText(e.getStartToken(), e.getOffendingToken())); } } else { input = escapeWSAndQuote("<unknown input>"); } String msg = "inadmissible input at " + input; recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e); }
/** "If an operator has whitespace on the right side only, it is treated as a postfix unary operator. As an example, the ++ operator in a++ b is treated as a postfix unary operator." "If an operator has no whitespace on the left but is followed immediately by a dot (.), it is treated as a postfix unary operator. As an example, the ++ operator in a++.b is treated as a postfix unary operator (a++ .b rather than a ++ .b)." */ public static boolean isPostfixOp(TokenStream tokens) { int stop = getLastOpTokenIndex(tokens); if ( stop==-1 ) return false; int start = tokens.index(); Token prevToken = tokens.get(start-1); // includes hidden-channel tokens Token nextToken = tokens.get(stop+1); boolean prevIsWS = isLeftOperatorWS(prevToken); boolean nextIsWS = isRightOperatorWS(nextToken); boolean result = !prevIsWS && nextIsWS || !prevIsWS && nextToken.getType()==SwiftParser.DOT; String text = tokens.getText(Interval.of(start, stop)); //System.out.println("isPostfixOp: '"+prevToken+"','"+text+"','"+nextToken+"' is "+result); return result; }
public EObject parse(final String typeExpression, final Scope scope) { final CharStream charStream = CharStreams.fromString(typeExpression); final TypeExpressionLexer lexer = new TypeExpressionLexer(charStream); final TokenStream tokenStream = new CommonTokenStream(lexer); final TypeExpressionParser typeExpressionParser = new TypeExpressionParser(tokenStream); lexer.removeErrorListeners(); typeExpressionParser.removeErrorListeners(); final ParserErrorCollector errorCollector = new ParserErrorCollector(); lexer.addErrorListener(errorCollector); typeExpressionParser.addErrorListener(errorCollector); final TypeExpressionParser.Type_exprContext typeExpr = typeExpressionParser.type_expr(); final EObject anyType = new TypeExpressionBuilder(scope, ARRAY_TYPE).visit(typeExpr); scope.getResource().getErrors().addAll(errorCollector.getErrors()); return anyType; }
@Override protected void doLoad(final InputStream inputStream, final Map<?, ?> options) throws IOException { final BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream); final Optional<AbstractConstructor> optionalRootConstructor = getRootConstructor(bufferedInputStream); if (optionalRootConstructor.isPresent()) { final AbstractConstructor rootConstructor = optionalRootConstructor.get(); final RAMLCustomLexer lexer = new RAMLCustomLexer(uri, getURIConverter()); final TokenStream tokenStream = new CommonTokenStream(lexer); final RAMLParser parser = new RAMLParser(tokenStream); parser.removeErrorListeners(); final ParserErrorCollector errorCollector = new ParserErrorCollector(); parser.addErrorListener(errorCollector); try { rootConstructor.construct(parser, resourceScope); validate(); } catch (final Exception e) { getErrors().addAll(errorCollector.getErrors()); throw e; } } }
@Ignore @Test public void api() { final File apiFile = new File("/Users/mkoester/Development/commercetools-api-reference/api.raml"); assumeTrue(apiFile.exists()); final URI apiUri = URI.createURI(apiFile.toURI().toString()); final URIConverter uriConverter = new RamlResourceSet().getURIConverter(); final RAMLCustomLexer lexer = new RAMLCustomLexer(apiUri, uriConverter); final TokenStream tokenStream = new CommonTokenStream(lexer); final RAMLParser parser = new RAMLParser(tokenStream); final Resource resource = new RamlResourceSet().createResource(apiUri); final Scope scope = Scope.of(resource); final TypeDeclarationResolver resolver = new TypeDeclarationResolver(); resolver.resolve(parser.api(), scope); assertThat(resource.getErrors()).isEmpty(); }
public static boolean isClassName(TokenStream _input) { try { int i=1; Token token = _input.LT(i); while (token!=null && i < _input.size() && _input.LT(i+1).getType() == GroovyParser.DOT) { i = i + 2; token = _input.LT(i); } if(token==null) return false; // TODO here return Character.isUpperCase(Character.codePointAt(token.getText(), 0)); } catch(Exception e) { e.printStackTrace(); } return false; }
public static <L extends Lexer, P extends Parser> P newParser( Function<CharStream, L> lexerFactory, Function<TokenStream, P> parserFactory, String input, boolean useBailErrorStrategy, boolean removeErrorListeners) { CharStream charStream = new ANTLRInputStream(input); L lexer = lexerFactory.apply(charStream); if (removeErrorListeners) { lexer.removeErrorListeners(); } TokenStream tokenStream = new CommonTokenStream(lexer); P parser = parserFactory.apply(tokenStream); if (useBailErrorStrategy) { parser.setErrorHandler(new BailErrorStrategy()); } if (removeErrorListeners) { parser.removeErrorListeners(); } return parser; }
/** * Parse an interval, for example <tt>[1,-]</tt> or <tt>-</tt> (a wildcard) or <tt>[1,4]</tt>. * Only fixed values are allowed, no variables. * * @param intervalAsString the string to be parsed. * @return a LowerBoundedInterval as the runtime representation of interval strings. * @throws ParseException in case the string doesn't fit the given fixed-interval grammar. */ public static LowerBoundedInterval parse(String intervalAsString) throws ParseException { CharStream charStream = new ANTLRInputStream(intervalAsString); CellExpressionLexer lexer = new CellExpressionLexer(charStream); TokenStream tokens = new CommonTokenStream(lexer); CellExpressionParser parser = new CellExpressionParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(new ThrowingErrorListener()); try { CellExpressionParser.Fixed_intervalContext ctx = parser.fixed_interval(); if (ctx == null) { throw new ParseException(0, 0, "Expected fixed interval"); } return INSTANCE.visit(ctx); } catch (ParseRuntimeException runtimeException) { throw runtimeException.getParseException(); } }
public static AstBuilder createAstBuilder(CompilationUnit source,TokenStream tokens){ KalangParser p = new KalangParser(tokens); AstBuilder sp = new AstBuilder(source, p); p.setErrorHandler(new DefaultErrorStrategy() { @Override public void reportError(Parser recognizer, RecognitionException e) { String msg = AntlrErrorString.exceptionString(recognizer, e); Token end = e.getOffendingToken(); Token start; RuleContext ctx = e.getCtx(); if(ctx instanceof ParserRuleContext){ start = ((ParserRuleContext) ctx).getStart(); }else{ start = end; } sp.getDiagnosisReporter().report(Diagnosis.Kind.ERROR, msg,start,end); } }); return sp; }
private void compileFiles(List<RawFile> files, OOPSourceCodeModel srcModel, List<String> projectFileTypes) { for (RawFile file : files) { try { CharStream charStream = new ANTLRInputStream(file.content()); GolangLexer lexer = new GolangLexer(charStream); TokenStream tokens = new CommonTokenStream(lexer); GolangParser parser = new GolangParser(tokens); SourceFileContext sourceFileContext = parser.sourceFile(); parser.setErrorHandler(new BailErrorStrategy()); parser.getInterpreter().setPredictionMode(PredictionMode.SLL); ParseTreeWalker walker = new ParseTreeWalker(); GolangBaseListener listener = new GoLangTreeListener(srcModel, projectFileTypes, file); walker.walk(listener, sourceFileContext); } catch (Exception e) { e.printStackTrace(); } } }
private SyntaxErrorListener getSyntaxTreeErrors(String iFlowResource) { InputStream inputStream = null; try { File file = new File(getClass().getResource(iFlowResource).getFile()); inputStream = new FileInputStream(file); CharStream cs = new ANTLRInputStream(inputStream); TokenStream tokenStream = new CommonTokenStream(new WUMLLexer(cs)); WUMLParser parser = new WUMLParser(tokenStream); SyntaxErrorListener errorListener = new SyntaxErrorListener(); parser.addErrorListener(errorListener); ParseTree tree = parser.sourceFile(); return errorListener; } catch (Exception e) { return null; } }
public static Program parse(String source) { RankPLLexer lexer = new RankPLLexer(new ANTLRInputStream(source)); TokenStream tokens = new CommonTokenStream(lexer); RankPLParser parser = new RankPLParser(tokens); parser.setErrorHandler(new BailErrorStrategy()); ConcreteParser classVisitor = new ConcreteParser(); // Parse Program program = null; try { program = (Program) classVisitor.visit(parser.program()); } catch (ParseCancellationException e) { System.out.println("Syntax error"); lexer = new RankPLLexer(new ANTLRInputStream(source)); tokens = new CommonTokenStream(lexer); parser = new RankPLParser(tokens); classVisitor = new ConcreteParser(); try { program = (Program) classVisitor.visit(parser.program()); } catch (Exception ex) { // Ignore } return null; } return program; }
public ParseTreeDOMSerializer(List<String> ruleNames, Map<Integer, String> invTokenMap, TokenStream tokenStream) { this.tokenStream = tokenStream; this.ruleNames = ruleNames; this.invTokenMap = invTokenMap; nodeStack = new Stack<Element>(); DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); Document document = null; try { DocumentBuilder builder = factory.newDocumentBuilder(); document = builder.newDocument(); Element root = (Element) document.createElement("ast"); document.appendChild(root); nodeStack.push(root); domDoc = document; } catch (Exception pce) { pce.printStackTrace(); } }
public static Document processFile(String filePath) { Pair<Parser, Lexer> pl = parsePHP(filePath); PHPParser parser = (PHPParser) pl.a; parser.setBuildParseTree(true); /* * htmlDocument is the start rule (the top-level rule) * for the PHP grammar */ ParserRuleContext tree = parser.htmlDocument(); List<String> ruleNames = Arrays.asList(parser.getRuleNames()); Map<Integer, String> invTokenMap = getInvTokenMap(parser); TokenStream tokenStream = parser.getTokenStream(); ParseTreeDOMSerializer ptSerializer = new ParseTreeDOMSerializer(ruleNames, invTokenMap, tokenStream); ParseTreeWalker.DEFAULT.walk(ptSerializer, tree); Document result= ptSerializer.getDOMDocument(); return result; }
protected void reportNoViableAlternative(Parser recognizer, NoViableAltException e) { TokenStream tokens = recognizer.getInputStream(); String input; if (tokens != null) { if (e.getStartToken().getType() == -1) { input = "<EOF>"; } else { input = tokens.getText(e.getStartToken(), e.getOffendingToken()); } } else { input = "<unknown input>"; } String msg = "no viable alternative at input " + this.escapeWSAndQuote(input); recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e); }
public static @Nullable AutoCompletionContext getContext(int index, String text) { ANTLRInputStream input = new ANTLRInputStream(text); MplLexer lexer = new MplLexer(input); lexer.removeErrorListeners(); TokenStream tokens = new CommonTokenStream(lexer); MplParser parser = new MplParser(tokens); parser.removeErrorListeners(); FileContext ctx = parser.file(); AutoCompletionListener listener = new AutoCompletionListener(index); try { new ParseTreeWalker().walk(listener, ctx); } catch (ResultException earlyExit) { return earlyExit.getResult(); } return null; }
/** * Parses the supplied input using the OTLDListener and returns it after walking it * @param reader input to parse * @return walked OTLDListener * @throws IOException */ public static OTLDListener parseFile(InputStream reader) throws IOException { OTLDErrorListener errorListener = new OTLDErrorListener(); ANTLRInputStream stream = new ANTLRInputStream(reader); Lexer lexer = new otldLexer(stream); lexer.removeErrorListeners(); lexer.addErrorListener(errorListener); TokenStream tokens = new CommonTokenStream(lexer); otldParser parser = new otldParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(errorListener); ParseTree tree = parser.program(); OTLDListener railroad = new OTLDListener(); if (errorListener.getErrors().isEmpty()) { ParseTreeWalker walker = new ParseTreeWalker(); walker.walk(railroad, tree); } else { railroad.errors.addAll(errorListener.getErrors()); } return railroad; }
/** * {@inheritDoc} */ @Override public void reportNoViableAlternative(@NotNull Parser recognizer, @NotNull NoViableAltException e) { TokenStream tokens = recognizer.getInputStream(); String input; if (tokens instanceof TokenStream) { if (e.getStartToken().getType() == Token.EOF) input = "<EOF>"; else input = getText(tokens, e.getStartToken(), e.getOffendingToken()); } else { input = "<unknown input>"; } String msg = "no viable alternative at input " + escapeWSAndQuote(input); recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e); }
@NotNull private String getText(TokenStream tokens, Interval interval) { int start = interval.a; int stop = interval.b; if (start < 0 || stop < 0) return ""; if (stop >= tokens.size()) stop = tokens.size() - 1; StringBuilder buf = new StringBuilder(); for (int i = start; i <= stop; i++) { Token t = tokens.get(i); if (t.getType() == Token.EOF) break; buf.append(t.getText()); if (i != stop) { buf.append(" "); } } return buf.toString(); }
@Test public void testParseWorkingExamples() throws IOException { FileVisitor<Path> workingFilesVisitior = new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { System.out.println("Testing parser input from file \""+file.toString()+"\""); ANTLRFileStream antlrStream = new ANTLRFileStream(file.toString()); MiniJLexer lexer = new MiniJLexer(antlrStream); TokenStream tokens = new CommonTokenStream(lexer); MiniJParser parser = new MiniJParser(tokens); parser.setErrorHandler(new BailErrorStrategy()); parser.prog(); return super.visitFile(file, attrs); } }; Files.walkFileTree(EXAMPLE_PROGRAM_PATH_WORKING, workingFilesVisitior); }
@Test public void testParseFailingExamples() throws IOException { FileVisitor<Path> workingFilesVisitior = new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { System.out.println("Testing parser input from file \""+file.toString()+"\""); ANTLRFileStream antlrStream = new ANTLRFileStream(file.toString()); MiniJLexer lexer = new MiniJLexer(antlrStream); TokenStream tokens = new CommonTokenStream(lexer); MiniJParser parser = new MiniJParser(tokens); parser.setErrorHandler(new BailErrorStrategy()); /* * Catch all exceptions first, to ensure that every single * compilation unit exits with an Exception. Otherwise, this * method will return after the first piece of code. */ try { parser.prog(); fail("The example "+file.toString()+" should have failed, but was accepted by the parser."); } catch (ParseCancellationException e) { } return super.visitFile(file, attrs); } }; Files.walkFileTree(EXAMPLE_PROGRAM_PATH_FAILING, workingFilesVisitior); }
@Test public void testVisitTypeErrorExamples() throws Exception { FileVisitor<Path> failingFilesVisitior = new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.toString().endsWith("LinkedListBUG.java")) { return super.visitFile(file, attrs); } System.out.println("Testing type checker with file \""+file.toString()+"\""); ANTLRFileStream reader = new ANTLRFileStream(file.toString()); MiniJLexer lexer = new MiniJLexer((CharStream) reader); TokenStream tokens = new CommonTokenStream(lexer); MiniJParser parser = new MiniJParser(tokens); ParseTree parseTree = parser.prog(); ASTVisitor astVisitor = new ASTVisitor(); Program ast = (Program) astVisitor.visit(parseTree); TypeInferenceVisitor typeInferenceVisitor = new TypeInferenceVisitor(); ast.accept(typeInferenceVisitor); TypeCheckVisitor visitor = new TypeCheckVisitor(); boolean typesCorrect = ast.accept(visitor); assertFalse("\"" + file.toString() + "\" passed type check but it shouldn't", typesCorrect); return super.visitFile(file, attrs); } }; Files.walkFileTree(EXAMPLE_PROGRAM_PATH_FAILING, failingFilesVisitior); }
/** * Parses and evaluates the given Stellar expression, {@code rule}. * @param rule The Stellar expression to parse and evaluate. * @return The Expression, which can be reevaluated without reparsing in different Contexts and Resolvers. */ public static StellarCompiler.Expression compile(final String rule) { if (rule == null || isEmpty(rule.trim())) { return null; } ANTLRInputStream input = new ANTLRInputStream(rule); StellarLexer lexer = new StellarLexer(input); lexer.removeErrorListeners(); lexer.addErrorListener(new ErrorListener()); TokenStream tokens = new CommonTokenStream(lexer); StellarParser parser = new StellarParser(tokens); StellarCompiler treeBuilder = new StellarCompiler( ArithmeticEvaluator.INSTANCE, NumberLiteralEvaluator.INSTANCE, ComparisonExpressionWithOperatorEvaluator.INSTANCE ); parser.addParseListener(treeBuilder); parser.removeErrorListeners(); parser.addErrorListener(new ErrorListener()); parser.transformation(); return treeBuilder.getExpression(); }
@Override public String visitIncludeFile(QPLParser.IncludeFileContext ctx) { String filePath = ctx.path.getText().trim(); filePath = filePath.substring(1, filePath.length() - 1); if (filePath.charAt(0) != '/') { filePath = Paths.get(__FILE_PATH__, filePath).toString(); } try { ANTLRInputStream ais = new ANTLRFileStream(filePath); QPLLexer lex = new QPLLexer(ais); TokenStream toks = new CommonTokenStream(lex); QPLParser parse = new QPLParser(toks); ParseTree tree = parse.prog(); return new ImpVisitor(filePath).visit(tree); } catch (IOException ex) { System.err.println(filePath + " cannot be found! Ignoring"); return ""; } }
@Override public int adaptivePredict(TokenStream input, int decision, ParserRuleContext outerContext) { if (decision == QID_DECISION && QID_DECISION >= 0) { if (input.LA(1) == GoParser.IDENTIFIER) { if (input.LA(2) == GoParser.Dot) { if (input.LA(3) == GoParser.IDENTIFIER) { return qidPredicate.eval(parser, outerContext) ? 1 : 2; } else { assert input.LA(3) != CaretToken.CARET_TOKEN_TYPE; return 2; } } else { assert input.LA(2) != CaretToken.CARET_TOKEN_TYPE; return 2; } } } return super.adaptivePredict(input, decision, outerContext); }
@NonNull public CodeCompletionGoParser getParser(@NonNull TokenStream input) { CodeCompletionGoParser parser = createParser(input); parser.removeErrorListeners(); parser.setBuildParseTree(false); parser.setErrorHandler(new DefaultErrorStrategy()); parser.getInterpreter().setPredictionMode(PredictionMode.LL); parser.getInterpreter().force_global_context = false; parser.getInterpreter().always_try_local_context = true; parser.setCheckPackageNames(false); parser.setPackageNames(Collections.<String>emptyList()); return parser; }
@Override public int adaptivePredict(TokenStream input, int decision, ParserRuleContext outerContext) { if (decision == QID_DECISION && QID_DECISION >= 0) { if (input.LA(1) == GoParser.IDENTIFIER) { if (input.LA(2) == GoParser.Dot) { if (input.LA(3) == GoParser.IDENTIFIER) { return qidPredicate.eval(parser, outerContext) ? 1 : 2; } else if (input.LA(3) != CaretToken.CARET_TOKEN_TYPE) { return 2; } } else if (input.LA(2) != CaretToken.CARET_TOKEN_TYPE) { return 2; } } } return super.adaptivePredict(input, decision, outerContext); }
public T getParser(TokenStream input) { T parser = null; synchronized (parsers) { while (parser == null && !parsers.isEmpty()) { parser = parsers.poll().get(); } } if (parser != null) { parser.setInputStream(input); } else { parser = createParser(input); } return parser; }
@Override public final void syntaxError(final Recognizer<?, ?> recognizer, final Object offendingSymbol, final int line, final int charPositionInLine, final String msg, final RecognitionException e) { String input; if (recognizer instanceof Lexer) { final CharStream cs = ((Lexer) recognizer).getInputStream(); input = cs.getText(new Interval(0, cs.size())); } else if (recognizer instanceof Parser) { final TokenStream tokens = ((Parser) recognizer).getInputStream(); if (tokens != null) { input = tokens.getText(); } else { input = "<unknown input>"; } } else { input = "<unknown input>"; } throw new AntlrParseException(input, line, charPositionInLine, msg); }
@Override public TransformExpressionTree compileToExpressionTree(String expression) { CharStream charStream = new ANTLRInputStream(expression); PQL2Lexer lexer = new PQL2Lexer(charStream); lexer.setTokenFactory(new CommonTokenFactory(true)); TokenStream tokenStream = new UnbufferedTokenStream<CommonToken>(lexer); PQL2Parser parser = new PQL2Parser(tokenStream); parser.setErrorHandler(new BailErrorStrategy()); // Parse ParseTree parseTree = parser.expression(); ParseTreeWalker walker = new ParseTreeWalker(); Pql2AstListener listener = new Pql2AstListener(expression, _splitInClause); walker.walk(listener, parseTree); final AstNode rootNode = listener.getRootNode(); return TransformExpressionTree.buildTree(rootNode); }
public static <P extends Parser> P getParser(Class<? extends Lexer> lexerClass, Class<P> parserClass, String source) { Lexer lexer = getLexer(lexerClass, source); TokenStream tokens = new CommonTokenStream(lexer); P parser; try { parser = parserClass.getConstructor(TokenStream.class).newInstance(tokens); } catch (Exception e) { throw new IllegalArgumentException("couldn't invoke parser constructor", e); } parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); parser.removeErrorListeners(); // don't spit to stderr parser.addErrorListener(new DiagnosticErrorListener()); parser.addErrorListener(new AntlrFailureListener()); return parser; }
public Token addDecisionEventHighlighter(PreviewState previewState, MarkupModel markupModel, DecisionEventInfo info, Color errorStripeColor, EffectType effectType) { TokenStream tokens = previewState.parsingResult.parser.getInputStream(); Token startToken = tokens.get(info.startIndex); Token stopToken = tokens.get(info.stopIndex); TextAttributes textAttributes = new TextAttributes(JBColor.BLACK, JBColor.WHITE, errorStripeColor, effectType, Font.PLAIN); textAttributes.setErrorStripeColor(errorStripeColor); final RangeHighlighter rangeHighlighter = markupModel.addRangeHighlighter( startToken.getStartIndex(), stopToken.getStopIndex()+1, HighlighterLayer.ADDITIONAL_SYNTAX, textAttributes, HighlighterTargetArea.EXACT_RANGE); rangeHighlighter.putUserData(DECISION_EVENT_INFO_KEY, info); rangeHighlighter.setErrorStripeMarkColor(errorStripeColor); return startToken; }
/** * Parse. * @param text Text to parse * @return Map of tags * @throws BibSyntaxException If fails */ private static Map<String, String> parse(final String text) throws BibSyntaxException { final BibLexer lexer = new BibLexer(new ANTLRInputStream(text)); final TokenStream tokens = new CommonTokenStream(lexer); final BibParser parser = new BibParser(tokens); final Errors errors = new Errors(); lexer.addErrorListener(errors); parser.addErrorListener(errors); final Map<String, String> map; try { map = parser.tags().map; } catch (final RecognitionException ex) { throw new BibSyntaxException(ex); } if (!Iterables.isEmpty(errors)) { throw new BibSyntaxException( Joiner.on("; ").join(errors) ); } return map; }