@Override protected void announce(Token start, Token stop, AbstractElement element) { if (start != null && start != Token.EOF_TOKEN) { if (start == stop) { announce(start, element); } else { CommonToken castedStart = (CommonToken) start; if (stop == null) { // possible error condition if (start.getTokenIndex() == state.lastErrorIndex) { return; } } CommonToken castedEnd = (CommonToken) stop; Integer newType = rewriter.rewrite(castedStart, element); if (newType != null && castedEnd != null && castedEnd != Token.EOF_TOKEN) { LazyTokenStream castedInput = (LazyTokenStream) this.input; for (int i = castedStart.getTokenIndex() + 1; i < castedEnd.getTokenIndex(); i++) { Token token = castedInput.get(i); if (token.getChannel() != Token.HIDDEN_CHANNEL) token.setType(newType); } castedEnd.setType(newType); } } } }
public void test_getTrimmedNodeRange() { String source = " hello! "; // 012345678 CommonToken token = new CommonToken(Css3Lexer.IDENT); token.setText(source); token.setStartIndex(0); token.setStopIndex(7); //len - 1 -> points to last char not the end! Node node = new TokenNode(source, token); assertEquals(" hello! ", node.image().toString()); int[] result = NodeUtil.getTrimmedNodeRange(node); assertEquals(1, result[0]); assertEquals(7, result[1]); }
/** Creates a qualified name tree by extending an existing one * with an additional fragment in front. * @param init token holding the additional level text * @param subTree the existing tree; may be {@code null}, in which case * the result is calculated using {@link #toQualName(Token,Token)} with first * argument {@code null} */ CommonTree toQualName(Token init, CtrlTree subTree) { CtrlTree result; if (subTree == null || this.namespace.hasErrors()) { result = toQualName(null, init); } else { Token subTop = subTree.getToken(); QualName qualName = subTree.getQualName() .nest(getText(init)); CommonToken top = new CommonToken(subTop.getType(), qualName.toString()); top.setLine(init.getLine()); top.setTokenIndex(init.getTokenIndex()); result = new CtrlTree(top); result.setQualName(qualName); result.addChild(subTree.getChild(0)); } return result; }
/** * Tests if the rule name is qualified; * if not, first tries to look it up in the import map, and if that fails, * prefixes it with the package name. */ CtrlTree qualify(CtrlTree ruleNameToken) { CtrlTree result = ruleNameToken; QualName qualName = ruleNameToken.getQualName(); if (!this.namespace.hasErrors() && qualName.parent() .isTop()) { String simpleName = qualName.last(); Map<String,QualName> importMap = getNamespace().getImportMap(); if (importMap.containsKey(simpleName)) { qualName = importMap.get(simpleName); } else if (!isAnyOther(simpleName)) { qualName = this.packageName.extend(simpleName); } CommonToken token = new CommonToken(ruleNameToken.getType(), qualName.toString()); token.setLine(ruleNameToken.getLine()); token.setTokenIndex(ruleNameToken.getToken() .getTokenIndex()); result = new CtrlTree(token); result.setQualName(qualName); result.addChild(ruleNameToken.getChild(0)); } return result; }
/** for testing */ public CompiledST defineTemplate(String templateName, String template) { if ( templateName.charAt(0) !='/' ) templateName = "/"+templateName; try { CompiledST impl = defineTemplate(templateName, new CommonToken(GroupParser.ID, templateName), null, template, null); return impl; } catch (STException se) { // we have reported the error; the exception just blasts us // out of parsing this template } return null; }
/** for testing */ public CompiledST defineTemplate(String templateName, String template) { if ( templateName.charAt(0)!='/' ) templateName = "/"+templateName; try { CompiledST impl = defineTemplate(templateName, new CommonToken(GroupParser.ID, templateName), null, template, null); return impl; } catch (STException se) { // we have reported the error; the exception just blasts us // out of parsing this template } return null; }
@Test public void testTokenRegionContainsRegion() throws Exception { String modelAsString = "a1 / /* comment */ b2"; List<CommonToken> tokens = getTokens(modelAsString); for(int length=0; length < modelAsString.length(); ++length) { for(int offset = 0; offset + length < modelAsString.length(); ++offset) { ITextRegion tokenRegion = tokenRegionProvider.getTokenRegion(modelAsString, new TextRegion(offset, length)); // System.out.println(offset + ":" + length + " -> " + tokenRegion); CommonToken firstToken = findTokenStartingAt(tokenRegion.getOffset(), tokens); assertTrue(firstToken.getStartIndex() <= offset); if(tokenRegion.getLength() != 0) { CommonToken lastToken = findTokenStopingAt(tokenRegion.getOffset() + tokenRegion.getLength()-1, tokens); assertTrue(lastToken.getStopIndex() >= offset + length -1); } } } }
/** * Create a new token from the given prototype. Any argument besides the prototype is optional and * will be ignored if its value is <code>null</code>. */ protected CommonToken createToken(CommonToken prototype, String text, Integer charPosInLine, Integer channel, Integer start, Integer stop, Integer type) { if (prototype == null) throw new IllegalArgumentException("Prototype may not be null."); CommonToken result = new CommonToken(prototype); if (text != null) result.setText(text); if (charPosInLine != null) result.setCharPositionInLine(charPosInLine.intValue()); if (channel != null) result.setChannel(channel.intValue()); if (start != null) result.setStartIndex(start.intValue()); if (stop != null) result.setStopIndex(stop.intValue()); if (type != null) result.setType(type.intValue()); return result; }
private void splitIntoBeginToken(Token token, int indentation, ITokenAcceptor result) { result.accept(token); if (shouldEmitPendingEndTokens()) { Token nextToken = getDelegate().nextToken(); if (shouldSplitToken(nextToken)) { nextOffset = ((CommonToken) token).getStopIndex() + 1; doSplitToken(nextToken, result); } else { indentationStack.push(indentation); currentIndentation = indentation; result.accept(createBeginToken(((CommonToken) token).getStopIndex() + 1)); result.accept(nextToken); } } else { indentationStack.push(indentation); currentIndentation = indentation; result.accept(createBeginToken(((CommonToken) token).getStopIndex() + 1)); } }
public Token nextToken() { super.nextToken(); if (tokens.size() == 0) return new CommonToken(Token.EOF); return tokens.remove(0); }
/** * The second pass over the given input. If the input ends with an ASI, the semicolon is removed and it is parsed * again since the production may have skipped the ASI if more input was present. Same applies for the opposite * direction, e.g if it does not end with an ASI but the prev token suggests that there may have been a semicolon, * it is inserted. */ private void adjustASIAndCollectFollowElements(CustomInternalN4JSParser previousParser, boolean strict, Set<FollowElement> result) { ObservableXtextTokenStream tokens = (ObservableXtextTokenStream) previousParser.getTokenStream(); int lastTokenIndex = tokens.size() - 1; if (lastTokenIndex >= 0) { CommonToken lastToken = (CommonToken) tokens.LT(-1); @SuppressWarnings("unchecked") List<Token> tokenList = tokens.getTokens(); if (lastToken == null) { return; // TODO ask Sebastian why and how this can happen... it happens! } if (shouldSkipASI(lastToken)) { // Some statements may not span multiple lines, e.g between the return keyword // and the returned expression, there may not be an ASI. Filter these locations. if (maySkipASI(lastToken, tokens)) { tokenList.remove(lastTokenIndex); result.addAll(resetAndGetFollowElements(tokens, strict)); // If a postfix operator sneaked into the result, remove it since // we'd have produce an ASI before that removePostfixOperator(result); } } else if (shouldAddSyntheticSemicolon(previousParser, lastTokenIndex, lastToken)) { CommonToken token = new CommonToken(semi); tokenList.add(token); result.addAll(resetAndGetFollowElements(tokens, strict)); // Same here, if we had added an ASI, the postfix operator would be rendered // invalid, remove it. removePostfixOperator(result); } } }
/** * Prevent ASIs to be skipped at certain locations, e.g. after a return keyword. */ private boolean maySkipASI(CommonToken lastToken, ObservableXtextTokenStream tokens) { int countDownFrom = lastToken.getTokenIndex(); for (int i = countDownFrom - 1; i >= 0; i--) { Token prevToken = tokens.get(i); if (prevToken.getChannel() == Token.DEFAULT_CHANNEL) { if (mandatoryASI.get(prevToken.getType())) { return false; } return true; } } return true; }
/** * Returns true if the ASI at the end should be skipped for a second pass of the CA parser. Otherwise false. */ private boolean shouldSkipASI(CommonToken lastToken) { if (lastToken.getType() == eol) { return true; } if (lastToken.getType() == semi && lastToken.getText() != null && !";".equals(lastToken.getText())) { return true; } return false; }
/** * Implementation of the {@link TokenSource} interface. Return new tokens as long as there are some, afterwards * return {@link Token#EOF_TOKEN}. */ @Override public Token nextToken() { if (next != null) { Token result = next; next = null; return result; } if (!leafNodes.hasNext()) { return Token.EOF_TOKEN; } ILeafNode leaf = leafNodes.next(); if (leaf.getTotalOffset() >= endOffset) { leafNodes = Iterators.emptyIterator(); return Token.EOF_TOKEN; } if (leaf.getTotalEndOffset() <= startOffset) { return nextToken(); } if (leaf.getTotalEndOffset() > endOffset) { return toPrefixToken(leaf); } SyntaxErrorMessage syntaxErrorMessage = leaf.getSyntaxErrorMessage(); if (syntaxErrorMessage != null && SEMICOLON_INSERTED.equals(syntaxErrorMessage.getIssueCode())) { return toASIToken(leaf); } if (leaf.isHidden()) { return processHiddenToken(leaf); } int tokenType = tokenTypeMapper.getInternalTokenType(leaf); return new CommonToken(tokenType, leaf.getText()); }
/** * Skips the given leaf as it's hidden. If it was the last token to be returned, a hidden token may be syntesized if * would affect the semicolon insertion. */ private Token processHiddenToken(ILeafNode leaf) { Token result = nextToken(); if (result == Token.EOF_TOKEN && Strings.countLineBreaks(leaf.getText()) > 0) { next = result; CommonToken hidden = new CommonToken(tokenTypeMapper.getInternalTokenType(leaf), leaf.getText()); hidden.setChannel(Token.HIDDEN_CHANNEL); return hidden; } return result; }
/** * Produce an Antlr token for the prefix of the given leaf that overlaps the requested region * * @see #endOffset */ private Token toPrefixToken(ILeafNode leaf) { Lexer lexer = new InternalN4JSLexer(); String text = leaf.getText(); String prefix = text.substring(0, endOffset - leaf.getTotalOffset()); ANTLRStringStream stream = new ANTLRStringStream(prefix); lexer.setCharStream(stream); Token nextToken = lexer.nextToken(); // copy to get rid of the reference to the stream again return new CommonToken(nextToken.getType(), nextToken.getText()); }
/** * JSDoc comments are identified by the lexer as normal ML comments to simplify the ASI code. If a comment starts * with the sequence @çode{/**} it is remapped to a JS Doc token. */ @Override protected TokenInfo createTokenInfo(CommonToken token) { if (token.getType() == InternalN4JSParser.RULE_ML_COMMENT) { String text = token.getText(); if (text.length() > 4 && text.startsWith("/**") && text.charAt(3) != '*') { CommonToken jsDoc = new CommonToken(token); jsDoc.setType(JS_DOC_TOKEN); return super.createTokenInfo(jsDoc); } } return super.createTokenInfo(token); }
/** * {@inheritDoc} */ @Override public String toString(int start, int stop) { if (start < 0 || stop < 0) { return null; } CommonToken startToken = (CommonToken) tokens.get(start); CommonToken stopToken = (CommonToken) tokens.get(stop); CharStream charStream = ((Lexer) tokenSource).getCharStream(); String result = charStream.toString().substring(startToken.getStartIndex(), stopToken.getStopIndex()); return result; }
public TokenNode(CharSequence source, CommonToken token) { super(source); int[] range = CommonTokenUtil.getCommonTokenOffsetRange(token); from = range[0]; to = range[1]; tokenId = CssTokenId.forTokenTypeCode(token.getType()); }
@Override public void consumeToken(Token token) { if (backtracking > 0 || resync) { return; } if (debug_tokens) { CommonToken ct = (CommonToken) token; int[] ctr = CommonTokenUtil.getCommonTokenOffsetRange(ct); System.out.println(token + "(" + ctr[0] + "-" + ctr[1] + ")"); } //ignore the closing EOF token, we do not want it //it the parse tree if (token.getType() == Css3Lexer.EOF) { return; } //also ignore error tokens - they are added as children of ErrorNode-s in the recognitionException(...) method if (token.getType() == Token.INVALID_TOKEN_TYPE) { return; } lastConsumedToken = (CommonToken) token; RuleNode ruleNode = callStack.peek(); TokenNode elementNode = new TokenNode(source, (CommonToken) token); elementNode.hiddenTokens = this.hiddenTokens; hiddenTokens.clear(); ruleNode.addChild(elementNode); updateFirstTokens(ruleNode, lastConsumedToken); }
private void updateFirstTokens(RuleNode ruleNode, CommonToken token) { while (true) { if (ruleNode.from() != -1) { break; } ruleNode.setFirstToken(token); ruleNode = (RuleNode) ruleNode.getParent(); if (ruleNode == null) { break; } } }
@Override public void consumeHiddenToken(Token token) { if (backtracking > 0 || resync) { return; } if (debug_tokens) { CommonToken ct = (CommonToken) token; int[] ctr = CommonTokenUtil.getCommonTokenOffsetRange(ct); System.out.println(token + "(" + ctr[0] + "-" + ctr[1] + ")"); } hiddenTokens.add((CommonToken) token); }
public static void dumpTokens(Css3Lexer lexer) { System.out.println("Tokens:"); CommonToken t; while ((t = (CommonToken)lexer.nextToken()) != null) { System.out.println( t.getStartIndex() + "-" + t.getStopIndex() + ": " + t.getText() + "(" + (t.getType() == -1 ? "" : Css3Parser.tokenNames[t.getType()]) + ")"); if(t.getType() == Css3Lexer.EOF) { break; } } System.out.println("-------------"); }
private static void checkForSyntaxErrors(String text, String type, SQLParser parser, ParserRuleReturnScope r) { if (parser.getNumberOfSyntaxErrors() > 0) throw new SyntaxError("Illegal " + type, text, -1, -1); CommonToken stop = (CommonToken) r.stop; if (text != null && stop.getStopIndex() < StringUtil.trimRight(text).length() - 1) { if (stop.getStopIndex() == 0) throw new SyntaxError("Syntax error after " + stop.getText(), text); else throw new SyntaxError("Syntax error at the beginning ", text); } }
/** * Creates a new tree with a {@link CtrlParser#PACKAGE} token at the root, and * an empty text. */ CtrlTree emptyPackage() { CommonToken token = new CommonToken(CtrlParser.ID, ""); CtrlTree name = new CtrlTree(token); // the empty package does not get a qualified name, // since it stands for ModuleName.TOP name.addChild(new CtrlTree(token)); // construct the result tree CtrlTree result = new CtrlTree(CtrlParser.PACKAGE); result.addChild(name); result.addChild(new CtrlTree(CtrlParser.SEMI)); return result; }
public void testLexerOutput() throws IOException { String str = "~woof^2"; CharStream input = new ANTLRInputStream(new ByteArrayInputStream(str.getBytes("UTF-8"))); FTSLexer lexer = new FTSLexer(input); CommonTokenStream tokenStream = new CommonTokenStream(lexer); for(CommonToken token : (List<CommonToken>)tokenStream.getTokens()) { System.out.println(token.toString()); } }
SemanticException(IntStream input, Token token, String errorMessage, Object... messageArguments) { super(); this.input = input; this.token = token; this.index = ((CommonToken)token).getStartIndex(); this.line = token.getLine(); this.charPositionInLine = token.getCharPositionInLine(); this.errorMessage = String.format(errorMessage, messageArguments); }
@Test public void testSlComment() { String model = "//sl comment\na"; InternalSimpleExpressionsTestLanguageLexer lexer = new InternalSimpleExpressionsTestLanguageLexer(); lexer.setCharStream(new ANTLRStringStream(model)); CommonTokenStream stream = new CommonTokenStream(lexer); Object inLineComment = stream.getTokens().get(0); assertTrue(inLineComment instanceof CommonToken); assertEquals(InternalSimpleExpressionsTestLanguageLexer.RULE_SL_COMMENT, ((CommonToken) inLineComment).getType()); }
@Test public void testLexerError_05() throws Exception { String model = "a 'incomplete string \\'"; InternalSimpleExpressionsTestLanguageLexer lexer = new InternalSimpleExpressionsTestLanguageLexer(); lexer.setCharStream(new ANTLRStringStream(model)); CommonTokenStream stream = new CommonTokenStream(lexer); @SuppressWarnings("unchecked") List<CommonToken> tokens = stream.getTokens(); assertEquals(tokens.toString(), 3, tokens.size()); assertEquals("a", tokens.get(0).getText()); assertEquals(" ", tokens.get(1).getText()); assertEquals("'incomplete string \\'", tokens.get(2).getText()); assertEquals(0, tokens.get(2).getType()); }
@Override public CompiledST loadTemplateFile(String prefix, String unqualifiedFileName, CharStream templateStream) { String template = templateStream.substring(0, templateStream.size()- 1); String templateName = Misc.getFileNameNoSuffix(unqualifiedFileName); String fullyQualifiedTemplateName = prefix+templateName; CompiledST impl = new Compiler(this).compile(fullyQualifiedTemplateName, template); CommonToken nameT = new CommonToken(STLexer.SEMI); // Seems like a hack, best I could come up with. nameT.setInputStream(templateStream); rawDefineTemplate(fullyQualifiedTemplateName, impl, nameT); impl.defineImplicitlyDefinedTemplates(this); return impl; }
/** for testing */ public CompiledST defineTemplate(String name, String argsS, String template) { if ( name.charAt(0)!='/' ) name = "/"+name; String[] args = argsS.split(","); List<FormalArgument> a = new ArrayList<FormalArgument>(); for (String arg : args) { a.add( new FormalArgument(arg) ); } return defineTemplate(name, new CommonToken(GroupParser.ID, name), a, template, null); }
/** for testing */ public CompiledST defineTemplate(String name, String argsS, String template) { if ( name.charAt(0)!='/' ) name = "/"+name; String[] args = argsS.split(","); List<FormalArgument> a = new ArrayList<FormalArgument>(); for (String arg : args) { a.add( new FormalArgument(arg)); } return defineTemplate(name, new CommonToken(GroupParser.ID, name), a, template, null); }