Java 类org.antlr.runtime.TokenSource 实例源码

项目:n4js    文件:CustomN4JSParser.java   
/**
 * First pass. Use the tokens that have been computed from the production parser's result and collect the follow
 * elements from those.
 */
private CustomInternalN4JSParser collectFollowElements(TokenSource tokens, boolean strict,
        Set<FollowElement> result) {
    CustomInternalN4JSParser parser = createParser();
    parser.setStrict(strict);
    try {
        ObservableXtextTokenStream tokenStream = new ObservableXtextTokenStream(tokens, parser);
        result.addAll(doGetFollowElements(parser, tokenStream));
    } catch (InfiniteRecursion infinite) {
        // this guards against erroneous infinite recovery loops in Antlr.
        // Grammar dependent and not expected something that is expected for N4JS
        // is used in the base class thus also used here.
        result.addAll(parser.getFollowElements());
    }
    return parser;
}
项目:n4js    文件:ParserBasedDocumentTokenSource.java   
@Override
protected TokenSource createTokenSource(String string) {
    List<Token> tokens = highlightingParser.getTokens(string);
    Iterator<Token> iter = tokens.iterator();
    return new TokenSource() {

        @Override
        public Token nextToken() {
            if (iter.hasNext()) {
                return iter.next();
            }
            return Token.EOF_TOKEN;
        }

        @Override
        public String getSourceName() {
            return "Text: " + string;
        }
    };
}
项目:xtext-core    文件:AbstractAntlrParser.java   
protected IParseResult doParse(String ruleName, CharStream in, NodeModelBuilder nodeModelBuilder, int initialLookAhead) {
    TokenSource tokenSource = createLexer(in);
    XtextTokenStream tokenStream = createTokenStream(tokenSource);
    tokenStream.initCurrentLookAhead(initialLookAhead);
    setInitialHiddenTokens(tokenStream);
    AbstractInternalAntlrParser parser = createParser(tokenStream);
    parser.setTokenTypeMap(getTokenDefProvider().getTokenDefMap());
    parser.setSyntaxErrorProvider(getSyntaxErrorProvider());
    parser.setNodeModelBuilder(nodeModelBuilder);
    parser.setSemanticModelBuilder(getElementFactory());
    IUnorderedGroupHelper helper = getUnorderedGroupHelper().get();
    parser.setUnorderedGroupHelper(helper);
    helper.initializeWith(parser);
    try {
        if(ruleName != null)
            return parser.parse(ruleName);
        return parser.parse();
    } catch (Exception re) {
        throw new ParseException(re.getMessage(),re);
    }
}
项目:xtext-core    文件:BaseContentAssistParser.java   
public Collection<FE> getFollowElements(String input, boolean strict) {
    TokenSource tokenSource = createTokenSource(input);
    InternalParser parser = createParser();
    parser.setStrict(strict);
    ObservableXtextTokenStream tokens = new ObservableXtextTokenStream(tokenSource, parser);
    tokens.setInitialHiddenTokens(getInitialHiddenTokens());
    parser.setTokenStream(tokens);
    IUnorderedGroupHelper helper = createUnorderedGroupHelper();
    parser.setUnorderedGroupHelper(helper);
    helper.initializeWith(parser);
    tokens.setListener(parser);
    try {
        return Lists.newArrayList(getFollowElements(parser));
    } catch (InfiniteRecursion infinite) {
        return Lists.newArrayList(parser.getFollowElements());
    }
}
项目:bts    文件:AbstractContentAssistParser.java   
public Collection<FollowElement> getFollowElements(String input, boolean strict) {
    CharStream stream = new ANTLRStringStream(input);
    TokenSource tokenSource = createLexer(stream);
    AbstractInternalContentAssistParser parser = createParser();
    parser.setStrict(strict);
    ObservableXtextTokenStream tokens = new ObservableXtextTokenStream(tokenSource, parser);
    tokens.setInitialHiddenTokens(getInitialHiddenTokens());
    parser.setTokenStream(tokens);
    IUnorderedGroupHelper helper = getUnorderedGroupHelper().get();
    parser.setUnorderedGroupHelper(helper);
    helper.initializeWith(parser);
    tokens.setListener(parser);
    try {
        return Lists.newArrayList(getFollowElements(parser));
    } catch(InfiniteRecursion infinite) {
        return Lists.newArrayList(parser.getFollowElements());
    }
}
项目:bts    文件:BacktrackingLexerDocumentTokenSource.java   
/**
 * @since 2.4
 */
@Override
protected RepairEntryData getRepairEntryData(DocumentEvent e) throws Exception {
    int tokenStartsAt = 0;
    int tokenInfoIdx = 0;
    TokenSource source = createTokenSource(e.fDocument.get());
    CommonToken token = (CommonToken) source.nextToken();
    // find start idx
    while (true) {
        if (token == Token.EOF_TOKEN) {
            break;
        }
        if (tokenInfoIdx >= getInternalModifyableTokenInfos().size())
            break;
        TokenInfo tokenInfo = getInternalModifyableTokenInfos().get(tokenInfoIdx);
        if (tokenInfo.getAntlrTokenType() != token.getType()
                || token.getStopIndex() - token.getStartIndex() + 1 != tokenInfo.getLength())
            break;
        if (tokenStartsAt + tokenInfo.getLength() > e.fOffset)
            break;
        tokenStartsAt += tokenInfo.getLength();
        tokenInfoIdx++;
        token = (CommonToken) source.nextToken();
    }
    return new RepairEntryData(tokenStartsAt, tokenInfoIdx, token, source);
}
项目:sadlos2    文件:NAMEValueConverter.java   
protected void assertTokens(String value, TokenSource tokenSource, String escapedString) {
    if (tokenSource == null)
        return;
    Token token = tokenSource.nextToken();
    if (!escapedString.equals(token.getText())) {
        throw createTokenContentMismatchException(value, escapedString, token);
    }

    String rule1 = getRuleName().toUpperCase();
    String rule2 = getRuleName(token);
    // workaround: 
    if ("NAME".equals(rule1) && "ID".equals(rule2)) {
        rule2 = "NAME";
    } else if ("ID".equals(rule1) && "NAME".equals(rule2)) {
        rule1 = "NAME";
    }
    // Ambiguity between NAME and ID rule
    if (!rule1.equals(rule2)) {
        throw createTokenTypeMismatchException(value, escapedString, token);
    }
    String reparsedValue = toValue(token.getText(), null);
    if (value != reparsedValue && !value.equals(reparsedValue)) {
        throw createTokenContentMismatchException(value, escapedString, token);
    }
}
项目:n4js    文件:CustomN4JSParser.java   
/**
 * @param node
 *            the root node of the model to parse
 * @param startOffset
 *            the start offset to consider
 * @param endOffset
 *            the exclusive end offset
 * @param strict
 *            if true the parser will not use error recovery on the very last token of the input.
 * @return a collection of follow elements.
 */
public Collection<FollowElement> getFollowElements(INode node, int startOffset, int endOffset, boolean strict) {
    Set<FollowElement> result = Sets.newLinkedHashSet();

    TokenSource tokenSource = tokenSourceFactory.toTokenSource(node, startOffset, endOffset);
    CustomInternalN4JSParser parser = collectFollowElements(tokenSource, strict, result);
    adjustASIAndCollectFollowElements(parser, strict, result);

    /*
     * Lists are easier to debug
     */
    return Lists.newArrayList(result);
}
项目:xtext-extras    文件:TokenSequencePreservingPartialParsingHelper.java   
protected boolean isSameTokenSequence(TokenSource originalSource, TokenSource newSource, int expectedLength) {
    Token token = originalSource.nextToken();
    int newLength = 0;
    while(Token.EOF != token.getType()) {
        Token newToken = newSource.nextToken();
        if (token.getType() != newToken.getType()) {
            return false;
        }
        newLength += TokenTool.getLength(newToken);
        token = originalSource.nextToken();
    }
    return newLength == expectedLength;
}
项目:xtext-core    文件:XtextTokenStream.java   
public XtextTokenStream(TokenSource tokenSource, ITokenDefProvider tokenDefProvider) {
    super(tokenSource);
    tokens = new TokenList(500);
    rulenameToTokenType = new HashMap<String, Integer>(tokenDefProvider.getTokenDefMap().size());
    for(Map.Entry<Integer, String> entry: tokenDefProvider.getTokenDefMap().entrySet()) {
        rulenameToTokenType.put(entry.getValue(), entry.getKey());
    }
}
项目:xtext-core    文件:AbstractLexerBasedConverter.java   
protected void assertTokens(T value, TokenSource tokenSource, String escapedString) {
    if (tokenSource == null)
        return;
    Token token = tokenSource.nextToken();
    if (!escapedString.equals(token.getText())) {
        throw createTokenContentMismatchException(value, escapedString, token);
    }
    if (!getRuleName().toUpperCase().equals(getRuleName(token))) {
        throw createTokenTypeMismatchException(value, escapedString, token);
    }
    T reparsedValue = toValue(token.getText(), null);
    if (value != reparsedValue && !value.equals(reparsedValue)) {
        throw createTokenContentMismatchException(value, escapedString, token);
    }
}
项目:xtext-core    文件:AbstractLexerBasedConverter.java   
protected TokenSource getTokenSource(String escapedValue) {
    Lexer result = getLexer();
    if (result == null)
        return null;
    result.setCharStream(new ANTLRStringStream(escapedValue));
    return result;
}
项目:xtext-core    文件:AbstractAntlrParserBasedTokenSourceProvider.java   
@Override
public TokenSource createTokenSource(final CharStream stream) {
  if ((this.parser instanceof AbstractAntlrParser)) {
    return ((AbstractAntlrParser)this.parser).createLexer(stream);
  }
  StringConcatenation _builder = new StringConcatenation();
  String _name = this.parser.getClass().getName();
  _builder.append(_name);
  _builder.append(" should be a subclass of ");
  String _name_1 = AbstractAntlrParser.class.getName();
  _builder.append(_name_1);
  throw new IllegalStateException(_builder.toString());
}
项目:xtext-core    文件:AntlrProposalConflictHelper.java   
protected boolean equalTokenSequence(TokenSource first, TokenSource second) {
    Token token = null;
    while (!(token = first.nextToken()).equals(Token.EOF_TOKEN)) {
        Token otherToken = second.nextToken();
        if (otherToken.equals(Token.EOF_TOKEN)) {
            return false;
        }
        if (!token.getText().equals(otherToken.getText())) {
            return false;
        }
    }
    return true;
}
项目:bts    文件:AntlrProposalConflictHelper.java   
protected boolean equalTokenSequence(TokenSource first, TokenSource second) {
    Token token = null;
    while(!(token = first.nextToken()).equals(Token.EOF_TOKEN)) {
        Token otherToken = second.nextToken();
        if (otherToken.equals(Token.EOF_TOKEN)) {
            return false;
        }
        if (!token.getText().equals(otherToken.getText())) {
            return false;
        }
    }
    return true;
}
项目:bts    文件:DocumentTokenSource.java   
public RepairEntryData(int offset, int index, CommonToken newToken, TokenSource lexer) {
    super();
    this.offset = offset;
    this.index = index;
    this.newToken = newToken;
    this.tokenSource = lexer;
}
项目:bts    文件:DocumentTokenSource.java   
protected List<TokenInfo> createTokenInfos(String string) {
    List<TokenInfo> result = Lists.newArrayListWithExpectedSize(string.length() / 3);
    TokenSource source = createTokenSource(string);
    CommonToken token = (CommonToken) source.nextToken();
    while (token != Token.EOF_TOKEN) {
        TokenInfo info = createTokenInfo(token);
        result.add(info);
        token = (CommonToken) source.nextToken();
    }
    return result;
}
项目:bts    文件:DocumentTokenSource.java   
/**
 * @since 2.4
 */
protected RepairEntryData getRepairEntryData(DocumentEvent e) throws Exception {
    int tokenStartsAt = 0;
    int tokenInfoIdx = 0;
    for(tokenInfoIdx = 0; tokenInfoIdx< getInternalModifyableTokenInfos().size(); ++tokenInfoIdx) {
        TokenInfo oldToken = getInternalModifyableTokenInfos().get(tokenInfoIdx);
        if(tokenStartsAt <= e.getOffset() && tokenStartsAt + oldToken.getLength() >= e.getOffset())
            break;
        tokenStartsAt += oldToken.getLength();
    }
    final TokenSource delegate = createTokenSource(e.fDocument.get(tokenStartsAt, e.fDocument.getLength() - tokenStartsAt));
    final int offset = tokenStartsAt;
    TokenSource source = new TokenSource() {
        public Token nextToken() {
            CommonToken commonToken = (CommonToken) delegate.nextToken();
            commonToken.setText(commonToken.getText());
            commonToken.setStartIndex(commonToken.getStartIndex()+offset);
            commonToken.setStopIndex(commonToken.getStopIndex()+offset);
            return commonToken;
        }

        public String getSourceName() {
            return delegate.getSourceName();
        }
    };
    final CommonToken token = (CommonToken) source.nextToken();
    return new RepairEntryData(offset, tokenInfoIdx, token, source);
}
项目:templates4j    文件:gUnitBase.java   
public Object execParser(
    String ruleName,
    String input,
    int scriptLine)
    throws Exception
{
    ANTLRStringStream is = new ANTLRStringStream(input);
    Class<? extends TokenSource> lexerClass = Class.forName(lexerClassName).asSubclass(TokenSource.class);
    Constructor<? extends TokenSource> lexConstructor = lexerClass.getConstructor(CharStream.class);
    TokenSource lexer = lexConstructor.newInstance(is);
    is.setLine(scriptLine);

    CommonTokenStream tokens = new CommonTokenStream(lexer);

    Class<? extends Parser> parserClass = Class.forName(parserClassName).asSubclass(Parser.class);
    Constructor<? extends Parser> parConstructor = parserClass.getConstructor(TokenStream.class);
    Parser parser = parConstructor.newInstance(tokens);

    // set up customized tree adaptor if necessary
    if ( adaptorClassName!=null ) {
        Method m = parserClass.getMethod("setTreeAdaptor", TreeAdaptor.class);
        Class<? extends TreeAdaptor> adaptorClass = Class.forName(adaptorClassName).asSubclass(TreeAdaptor.class);
        m.invoke(parser, adaptorClass.newInstance());
    }

    Method ruleMethod = parserClass.getMethod(ruleName);

    // INVOKE RULE
    return ruleMethod.invoke(parser);
}
项目:stack-trace-parser    文件:AntlrUtils.java   
static void printTokenStream(CommonTokenStream tokens, Map tokenToName) {
    TokenSource source = tokens.getTokenSource();
    Token curr;
    do {
        curr = source.nextToken();
        System.out.println(curr.getText() + " [" + tokenToName.get(String.valueOf(curr.getType())) + "]");
    } while (-1 != curr.getType() ); // eof
}
项目:n4js    文件:HighlightingParser.java   
/**
 * Create a new lexer for the given input.
 */
protected TokenSource createLexer(CharStream stream) {
    Lexer lexer = lexerProvider.get();
    lexer.setCharStream(stream);
    return lexer;
}
项目:n4js    文件:LazyTokenStream.java   
/**
 * Create a new stream with the given source.
 */
public LazyTokenStream(TokenSource tokenSource, ITokenDefProvider tokenDefProvider) {
    super(tokenSource, tokenDefProvider);
    tokens = new JSTokenList();
}
项目:n4js    文件:N4JSSemicolonInjectingParser.java   
/**
 * Creates a custom {@link XtextTokenStream} which does not fill its buffer eager but pauses on occurrences of the
 * {@code '/'}.
 */
@Override
protected LazyTokenStream createTokenStream(TokenSource tokenSource) {
    return new LazyTokenStream(tokenSource, getTokenDefProvider());
}
项目:r8    文件:Smali.java   
public static byte[] compile(List<String> smaliTexts, int apiLevel)
    throws RecognitionException, IOException {
  DexBuilder dexBuilder = new DexBuilder(Opcodes.forApi(apiLevel));

  for (String smaliText : smaliTexts) {
    Reader reader = new StringReader(smaliText);

    LexerErrorInterface lexer = new smaliFlexLexer(reader);
    CommonTokenStream tokens = new CommonTokenStream((TokenSource) lexer);

    smaliParser parser = new smaliParser(tokens);
    parser.setVerboseErrors(true);
    parser.setAllowOdex(false);
    parser.setApiLevel(apiLevel);

    smaliParser.smali_file_return result = parser.smali_file();

    if (parser.getNumberOfSyntaxErrors() > 0 || lexer.getNumberOfSyntaxErrors() > 0) {
      throw new RuntimeException(
          "Error occured while compiling text:\n" + StringUtils.join(smaliTexts, "\n"));
    }

    CommonTree t = result.getTree();

    CommonTreeNodeStream treeStream = new CommonTreeNodeStream(t);
    treeStream.setTokenStream(tokens);

    smaliTreeWalker dexGen = new smaliTreeWalker(treeStream);
    dexGen.setApiLevel(apiLevel);
    dexGen.setVerboseErrors(true);
    dexGen.setDexBuilder(dexBuilder);
    dexGen.smali_file();

    if (dexGen.getNumberOfSyntaxErrors() > 0) {
      throw new RuntimeException("Error occured while compiling text");
    }
  }

  MemoryDataStore dataStore = new MemoryDataStore();

  dexBuilder.writeTo(dataStore);

  // TODO(sgjesse): This returns the full backingstore from MemoryDataStore, which by default
  // is 1024k bytes. Our dex file reader does not complain though.
  return dataStore.getData();
}
项目:xtext-core    文件:AbstractAntlrParser.java   
protected TokenSource createLexer(CharStream stream) {
    Lexer lexer = lexerProvider.get();
    lexer.setCharStream(stream);
    return lexer;
}
项目:xtext-core    文件:AbstractAntlrParser.java   
protected XtextTokenStream createTokenStream(TokenSource tokenSource) {
    return new XtextTokenStream(tokenSource, getTokenDefProvider());
}
项目:xtext-core    文件:XtextTokenStream.java   
public XtextTokenStream(TokenSource tokenSource, int channel) {
    super(tokenSource, channel);
    tokens = new TokenList(500);
}
项目:xtext-core    文件:AbstractIndentationTokenSource.java   
protected AbstractIndentationTokenSource(TokenSource delegate) {
    setDelegate(delegate);
}
项目:xtext-core    文件:AbstractSplittingTokenSource.java   
public void setDelegate(TokenSource delegate) {
    this.delegate = delegate;
}
项目:xtext-core    文件:AbstractSplittingTokenSource.java   
public TokenSource getDelegate() {
    return delegate;
}
项目:xtext-core    文件:AbstractLexerBasedConverter.java   
/**
 * @since 2.7
 */
protected void assertTokens(T value, String result) {
    TokenSource tokenSource = getTokenSource(result);
    assertTokens(value, tokenSource, result);
}
项目:xtext-core    文件:AbstractTokenSourceProvider.java   
@Override
public TokenSource createTokenSource(final CharSequence text) {
  return this.createTokenSource(this.getReader(text));
}
项目:xtext-core    文件:AbstractTokenSourceProvider.java   
@Override
public TokenSource createTokenSource(final Reader reader) {
  return this.createTokenSource(this.getCharStream(reader));
}
项目:xtext-core    文件:IndentationAwareUiTestLanguageTokenSource.java   
public IndentationAwareUiTestLanguageTokenSource(TokenSource delegate) {
    super(delegate);
}
项目:xtext-core    文件:IndentationAwareUiTestLanguageTokenSource.java   
public IndentationAwareUiTestLanguageTokenSource(TokenSource delegate) {
    super(delegate);
}
项目:xtext-core    文件:IndentationAwareUiTestLanguageParser.java   
@Override
protected TokenSource createLexer(CharStream stream) {
    return new IndentationAwareUiTestLanguageTokenSource(super.createLexer(stream));
}
项目:xtext-core    文件:IndentationAwareUiTestLanguageParser.java   
@Override
protected TokenSource createLexer(CharStream stream) {
    return new IndentationAwareUiTestLanguageTokenSource(super.createLexer(stream));
}
项目:xtext-core    文件:IndentationAwareTestLanguageParser.java   
@Override
protected TokenSource createLexer(CharStream stream) {
    return new IndentationAwareTestLanguageTokenSource(super.createLexer(stream));
}
项目:xtext-core    文件:IndentationAwareTestLanguageTokenSource.java   
public IndentationAwareTestLanguageTokenSource(TokenSource delegate) {
    super(delegate);
}
项目:xtext-core    文件:CustomizedIndentationAwareTestLanguageParser.java   
@Override
protected TokenSource createLexer(CharStream stream) {
    IndentTokenSource tokenSource = new IndentTokenSource();
    tokenSource.setDelegate(super.createLexer(stream));
    return tokenSource;
}