private UserAgentContext parseUserAgent(UserAgent userAgent) { String userAgentString = EvilManualUseragentStringHacks.fixIt(userAgent.getUserAgentString()); CodePointCharStream input = CharStreams.fromString(userAgentString); UserAgentLexer lexer = new UserAgentLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); UserAgentParser parser = new UserAgentParser(tokens); if (!verbose) { lexer.removeErrorListeners(); parser.removeErrorListeners(); } lexer.addErrorListener(userAgent); parser.addErrorListener(userAgent); return parser.userAgent(); }
private static void run(String expr) throws Exception { //构建流CodePointCharStream(4.7版本ANTLRInputStream标记为deprecated) CodePointCharStream in = CharStreams.fromString(expr); //词法分析 LionLexer lexer = new LionLexer(in); //token流 CommonTokenStream tokens = new CommonTokenStream(lexer); //语法分析器 LionParser parser = new LionParser(tokens); //验证 parser.prog(); }
/** * Build and return {@link Antlr4OboParser} for a given <code>text</code>. * * @param text String with the text to parse. * @param mode Name of the mode to use. * @return {@link Antlr4OboParser}, readily setup for parsing the OBO file. */ protected Antlr4OboParser buildParser(String text, String mode) { final CodePointCharStream inputStream = CharStreams.fromString(text); final OboLexer l = new OboLexer(inputStream); for (int i = 0; i < l.getModeNames().length; ++i) { if (mode.equals(l.getModeNames()[i])) { l.mode(i); } } Antlr4OboParser p = new Antlr4OboParser(new CommonTokenStream(l)); p.addErrorListener(new BaseErrorListener() { @Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { throw new IllegalStateException("failed to parse at line " + line + " due to " + msg, e); } }); p.addErrorListener(new DiagnosticErrorListener()); p.addParseListener(outerListener); return p; }
public static Matcher compile(String query) { try { CodePointCharStream input = CharStreams.fromReader(new StringReader(query)); ExprLexer lexer = new ExprLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); ExprParser parser = new ExprParser(tokens); ParseTree tree = parser.expr(); Visitor visitor = new Visitor(); visitor.visit(tree); return visitor.expression().normalize(); } catch (IOException e) { throw new RuntimeException(e.getLocalizedMessage(), e); } }
@Test public void testLexingOnNTStrings() { for (String ntString : NT_STRINGS) { CodePointCharStream inputStream = CharStreams.fromString(ntString); HGVSLexer l = new HGVSLexer(inputStream); l.mode(HGVSLexer.CHANGE_BRANCH); l.getAllTokens(); } }
@Test public void testLexingOnLegacyStrings() { for (String ntString : LEGACY_STRINGS) { CodePointCharStream inputStream = CharStreams.fromString(ntString); HGVSLexer l = new HGVSLexer(inputStream); l.mode(HGVSLexer.CHANGE_BRANCH); l.getAllTokens(); } }
@Test public void testLexingOnProteinStrings() { for (String proteinString : PROTEIN_STRINGS) { CodePointCharStream inputStream = CharStreams.fromString(proteinString); HGVSLexer l = new HGVSLexer(inputStream); l.mode(HGVSLexer.AMINO_ACID_CHANGE); l.getAllTokens(); } }
@Test public void testLexingMinimalFile() { final CodePointCharStream inputStream = CharStreams.fromString(MINIMAL_FILE); Antlr4OboLexer l = new Antlr4OboLexer(inputStream); l.getAllTokens(); }
@Test public void testLexingHeadOfHPO() { final CodePointCharStream inputStream = CharStreams.fromString(HEAD_HPO); Antlr4OboLexer l = new Antlr4OboLexer(inputStream); l.getAllTokens(); }
void init(String newMatchExpression, Matcher newMatcher) { this.matcher = newMatcher; this.matchExpression = newMatchExpression; setVerbose(newMatcher.getVerbose()); InitErrorListener errorListener = new InitErrorListener(); CodePointCharStream input = CharStreams.fromString(this.matchExpression); UserAgentTreeWalkerLexer lexer = new UserAgentTreeWalkerLexer(input); lexer.addErrorListener(errorListener); CommonTokenStream tokens = new CommonTokenStream(lexer); UserAgentTreeWalkerParser parser = new UserAgentTreeWalkerParser(tokens); parser.addErrorListener(errorListener); // parser.setTrace(true); ParserRuleContext requiredPattern = parseWalkerExpression(parser); if (requiredPattern == null) { throw new InvalidParserConfigurationException("NO pattern ?!?!?"); } // We couldn't ditch the double quotes around the fixed values in the parsing phase. // So we ditch them here. We simply walk the tree and modify some of the tokens. new UnQuoteValues().visit(requiredPattern); // Now we create an evaluator instance evaluator = new TreeExpressionEvaluator(requiredPattern, matcher, verbose); // Is a fixed value (i.e. no events will ever be fired)? String fixedValue = evaluator.getFixedValue(); if (fixedValue != null) { setFixedValue(fixedValue); mustHaveMatches = false; matches = new MatchesList(0); return; // Not interested in any patterns } mustHaveMatches = !evaluator.usesIsNull(); int informs = calculateInformPath("agent", requiredPattern); // If this is based on a variable we do not need any matches from the hashmap. if (mustHaveMatches && informs == 0) { mustHaveMatches = false; } int listSize = 0; if (informs > 0) { listSize = 1; } this.matches = new MatchesList(listSize); }
public static DateTimeFormatter convert(String strfformat, ZoneId defaultZone) { CodePointCharStream input = CharStreams.fromString(strfformat); StrfTimeLexer lexer = new StrfTimeLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); StrfTimeParser parser = new StrfTimeParser(tokens); lexer.removeErrorListeners(); parser.removeErrorListeners(); ParseTreeWalker walker = new ParseTreeWalker(); // create standard walker StrfTimeToDateTimeFormatter converter = new StrfTimeToDateTimeFormatter(strfformat, defaultZone); lexer.addErrorListener(converter); parser.addErrorListener(converter); StrfTimeParser.PatternContext pattern = parser.pattern(); walker.walk(converter, pattern); // initiate walk of tree with listener if (converter.hasSyntaxError()) { return null; } return converter.build(); }