/** * Parses the input and returns a list of lexer tokens. Asserts that the produced tokens are equal to the tokens * that the production parser produced. * * @return the tokens for the highlighting. */ public List<Token> getTokens(CharSequence input) { List<Token> result; IParseResult parseResult = parser.parse(new StringReader(input.toString())); if (!parseResult.hasSyntaxErrors()) { result = throwingHighlightingParser.getTokens(input); } else { result = highlightingParser.getTokens(input); } // assert equal tokens Iterator<Token> iter = result.iterator(); for (ILeafNode leaf : parseResult.getRootNode().getLeafNodes()) { Assert.assertTrue("hasNext at index " + leaf.getTotalOffset() + " for leaf '" + leaf.getText() + "'", iter.hasNext()); Token token = iter.next(); // TODO: assert token type Assert.assertEquals(leaf.getText(), token.getText()); } return result; }
@Override protected void createContextsForLastCompleteNode(EObject previousModel, boolean strict) { String currentNodePrefix = getPrefix(currentNode); if (!Strings.isEmpty(currentNodePrefix) && !currentNode.getText().equals(currentNodePrefix)) { lexer.setCharStream(new ANTLRStringStream(currentNodePrefix)); Token token = lexer.nextToken(); if (token == Token.EOF_TOKEN) { // error case - nothing could be parsed return; } while (token != Token.EOF_TOKEN) { if (isErrorToken(token)) return; token = lexer.nextToken(); } } String prefix = ""; Collection<FollowElement> followElements = getParser().getFollowElements(rootNode, 0, completionOffset, strict); doCreateContexts(lastCompleteNode, currentNode, prefix, previousModel, followElements); }
@Override protected TokenSource createTokenSource(String string) { List<Token> tokens = highlightingParser.getTokens(string); Iterator<Token> iter = tokens.iterator(); return new TokenSource() { @Override public Token nextToken() { if (iter.hasNext()) { return iter.next(); } return Token.EOF_TOKEN; } @Override public String getSourceName() { return "Text: " + string; } }; }
@Override protected void announce(Token start, Token stop, AbstractElement element) { if (start != null && start != Token.EOF_TOKEN) { if (start == stop) { announce(start, element); } else { CommonToken castedStart = (CommonToken) start; if (stop == null) { // possible error condition if (start.getTokenIndex() == state.lastErrorIndex) { return; } } CommonToken castedEnd = (CommonToken) stop; Integer newType = rewriter.rewrite(castedStart, element); if (newType != null && castedEnd != null && castedEnd != Token.EOF_TOKEN) { LazyTokenStream castedInput = (LazyTokenStream) this.input; for (int i = castedStart.getTokenIndex() + 1; i < castedEnd.getTokenIndex(); i++) { Token token = castedInput.get(i); if (token.getChannel() != Token.HIDDEN_CHANNEL) token.setType(newType); } castedEnd.setType(newType); } } } }
/** * Returns {@code true} if the set of expected follow-states includes an implicit or explicit semicolon. */ private static boolean followedBySemicolon(RecognizerSharedState state, Callback.RecoverySets recoverySets, int currentIndex) { int top = state._fsp; if (currentIndex != state.lastErrorIndex) { long[] array = state.following[top].toPackedArray(); if (array.length == 1 && array[0] == (1L << Token.EOR_TOKEN_TYPE)) { return false; } } for (int i = top; i >= 0; i--) { BitSet localFollowSet = state.following[i]; if (recoverySets.matches(localFollowSet)) { return true; } } return false; }
/** * Returns true if there was an unexpected EOL. */ public static boolean hasDisallowedEOL(Callback callback) { TokenStream input = callback.getInput(); Token lt = input.LT(1); // Start on the position before the current token and scan backwards off channel tokens until the previous on // channel token. for (int ix = lt.getTokenIndex() - 1; ix > 0; ix--) { lt = input.get(ix); if (lt.getChannel() == Token.DEFAULT_CHANNEL) { // On channel token found: stop scanning. break; } else if (isSemicolonEquivalent(lt)) { return true; } } return false; }
/** * Returns true if the given token is considered to be semicolon equivalent. */ static boolean isSemicolonEquivalent(Token lt) { if (lt.getType() == InternalN4JSParser.RULE_EOL) { return true; } if (lt.getType() == InternalN4JSParser.RULE_ML_COMMENT) { String tokenText = lt.getText(); for (int i = 2; i < tokenText.length() - 2; i++) { char c = tokenText.charAt(i); if (c == '\n' || c == '\r') { return true; } } } return false; }
/** Consume tokens until one matches the given token set */ @Override public void consumeUntil(IntStream i, BitSet set) { // System.out.println("consumeUntil(" + set.toString(getTokenNames()) + ")"); Token ttype; List<Token> skipped = new ArrayList<>(); beginResync(); try { while ((ttype = input.LT(1)) != null && ttype.getType() != Token.EOF && !set.member(ttype.getType())) { // System.out.println("consume during recover LA(1)=" + getTokenNames()[input.LA(1)]); input.consume(); skipped.add(ttype); } } finally { endResync(); } ((NbParseTreeBuilder) dbg).consumeSkippedTokens(skipped); }
/** Creates a qualified name tree by extending an existing one * with an additional fragment in front. * @param init token holding the additional level text * @param subTree the existing tree; may be {@code null}, in which case * the result is calculated using {@link #toQualName(Token,Token)} with first * argument {@code null} */ CommonTree toQualName(Token init, CtrlTree subTree) { CtrlTree result; if (subTree == null || this.namespace.hasErrors()) { result = toQualName(null, init); } else { Token subTop = subTree.getToken(); QualName qualName = subTree.getQualName() .nest(getText(init)); CommonToken top = new CommonToken(subTop.getType(), qualName.toString()); top.setLine(init.getLine()); top.setTokenIndex(init.getTokenIndex()); result = new CtrlTree(top); result.setQualName(qualName); result.addChild(subTree.getChild(0)); } return result; }
public CompiledST defineRegion(String enclosingTemplateName, Token regionT, String template, Token templateToken) { String name = regionT.getText(); template = Misc.trimOneStartingNewline(template); template = Misc.trimOneTrailingNewline(template); CompiledST code = compile(getFileName(), enclosingTemplateName, null, template, templateToken); String mangled = getMangledRegionName(enclosingTemplateName, name); if ( lookupTemplate(mangled)==null ) { errMgr.compileTimeError(ErrorType.NO_SUCH_REGION, templateToken, regionT, enclosingTemplateName, name); return new CompiledST(); } code.name = mangled; code.isRegion = true; code.regionDefType = ST.RegionType.EXPLICIT; code.templateDefStartToken = regionT; rawDefineTemplate(mangled, code, regionT); code.defineArgDefaultValueTemplates(this); code.defineImplicitlyDefinedTemplates(this); return code; }
public CompiledST defineTemplate(String fullyQualifiedTemplateName, Token nameT, List<FormalArgument> args, String template, Token templateToken) { if ( verbose ) System.out.println("defineTemplate("+fullyQualifiedTemplateName+")"); if ( fullyQualifiedTemplateName==null || fullyQualifiedTemplateName.length()==0 ) { throw new IllegalArgumentException("empty template name"); } if ( fullyQualifiedTemplateName.indexOf('.')>=0 ) { throw new IllegalArgumentException("cannot have '.' in template names"); } template = Misc.trimOneStartingNewline(template); template = Misc.trimOneTrailingNewline(template); // compile, passing in templateName as enclosing name for any embedded regions CompiledST code = compile(getFileName(), fullyQualifiedTemplateName, args, template, templateToken); code.name = fullyQualifiedTemplateName; rawDefineTemplate(fullyQualifiedTemplateName, code, nameT); code.defineArgDefaultValueTemplates(this); code.defineImplicitlyDefinedTemplates(this); // define any anonymous subtemplates return code; }
@Override protected void createContextsForLastCompleteNode(EObject previousModel, boolean strict) { String currentNodePrefix = getPrefix(currentNode); if (!Strings.isEmpty(currentNodePrefix) && !currentNode.getText().equals(currentNodePrefix)) { lexer.setCharStream(new ANTLRStringStream(currentNodePrefix)); Token token = lexer.nextToken(); if (token == Token.EOF_TOKEN) { return; } while (token != Token.EOF_TOKEN) { if (isErrorToken(token)) { return; } token = lexer.nextToken(); } } String prefix = ""; Collection<FollowElement> followElements = parseFollowElements(completionOffset, strict); doCreateContexts(lastCompleteNode, currentNode, prefix, previousModel, followElements); }
Token COMMENT() { match('!'); while ( !(c=='!' && input.LA(2)==delimiterStopChar) ) { if ( c==EOF ) { RecognitionException re = new MismatchedTokenException((int)'!', input); re.line = input.getLine(); re.charPositionInLine = input.getCharPositionInLine(); errMgr.lexerError(input.getSourceName(), "Nonterminated comment starting at "+startLine+":"+startCharPositionInLine+": '!"+delimiterStopChar+"' missing", templateToken, re); break; } consume(); } consume(); consume(); // grab !> return newToken(COMMENT); }
Token ESCAPE() { startCharIndex = input.index(); startCharPositionInLine = input.getCharPositionInLine(); consume(); // kill \\ if ( c=='u') return UNICODE(); String text; switch ( c ) { case '\\' : LINEBREAK(); return SKIP; case 'n' : text = "\n"; break; case 't' : text = "\t"; break; case ' ' : text = " "; break; default : NoViableAltException e = new NoViableAltException("",0,0,input); errMgr.lexerError(input.getSourceName(), "invalid escaped char: '"+str(c)+"'", templateToken, e); consume(); match(delimiterStopChar); return SKIP; } consume(); Token t = newToken(TEXT, text, input.getCharPositionInLine()-2); match(delimiterStopChar); return t; }
Token COMMENT() { match('!'); while ( !(c=='!' && input.LA(2)==delimiterStopChar) ) { if ( c==EOF ) { RecognitionException re = new MismatchedTokenException((int)'!', input); re.line = input.getLine(); re.charPositionInLine = input.getCharPositionInLine(); errMgr.lexerError(input.getSourceName(), "Nonterminated comment starting at " + startLine +":"+startCharPositionInLine + ": '!" + delimiterStopChar + "' missing", templateToken, re); break; } consume(); } consume(); consume(); // grab !> return newToken(COMMENT); }
/** * Appends a query snippet to the message to help the user to understand the problem. * * @param parser the parser used to parse the query * @param builder the <code>StringBuilder</code> used to build the error message */ private void appendQuerySnippet(Parser parser, StringBuilder builder) { TokenStream tokenStream = parser.getTokenStream(); int index = tokenStream.index(); int size = tokenStream.size(); Token from = tokenStream.get(getSnippetFirstTokenIndex(index)); Token to = tokenStream.get(getSnippetLastTokenIndex(index, size)); Token offending = tokenStream.get(getOffendingTokenIndex(index, size)); appendSnippet(builder, from, to, offending); }
/** * Appends a query snippet to the message to help the user to understand the problem. * * @param from the first token to include within the snippet * @param to the last token to include within the snippet * @param offending the token which is responsible for the error */ final void appendSnippet(StringBuilder builder, Token from, Token to, Token offending) { if (!areTokensValid(from, to, offending)) return; String[] lines = query.split("\n"); boolean includeQueryStart = (from.getLine() == 1) && (from.getCharPositionInLine() == 0); boolean includeQueryEnd = (to.getLine() == lines.length) && (getLastCharPositionInLine(to) == lines[lines.length - 1].length()); builder.append(" ("); if (!includeQueryStart) builder.append("..."); String toLine = lines[lineIndex(to)]; int toEnd = getLastCharPositionInLine(to); lines[lineIndex(to)] = toEnd >= toLine.length() ? toLine : toLine.substring(0, toEnd); lines[lineIndex(offending)] = highlightToken(lines[lineIndex(offending)], offending); lines[lineIndex(from)] = lines[lineIndex(from)].substring(from.getCharPositionInLine()); for (int i = lineIndex(from), m = lineIndex(to); i <= m; i++) builder.append(lines[i]); if (!includeQueryEnd) builder.append("..."); builder.append(")"); }
/** * Checks if the specified tokens are valid. * * @param tokens the tokens to check * @return <code>true</code> if all the specified tokens are valid ones, * <code>false</code> otherwise. */ private static boolean areTokensValid(Token... tokens) { for (Token token : tokens) { if (!isTokenValid(token)) return false; } return true; }
public Token nextToken() { super.nextToken(); if (tokens.size() == 0) return new CommonToken(Token.EOF); return tokens.remove(0); }
public final String basic_unreserved_keyword() throws RecognitionException { String str = null; Token k=null; try { // Parser.g:1628:5: (k= ( K_KEYS | K_AS | K_CLUSTERING | K_COMPACT | K_STORAGE | K_TYPE | K_VALUES | K_MAP | K_LIST | K_FILTERING | K_PERMISSION | K_PERMISSIONS | K_KEYSPACES | K_ALL | K_USER | K_USERS | K_ROLE | K_ROLES | K_SUPERUSER | K_NOSUPERUSER | K_LOGIN | K_NOLOGIN | K_OPTIONS | K_PASSWORD | K_EXISTS | K_CUSTOM | K_TRIGGER | K_CONTAINS | K_STATIC | K_FROZEN | K_TUPLE | K_FUNCTION | K_FUNCTIONS | K_AGGREGATE | K_SFUNC | K_STYPE | K_FINALFUNC | K_INITCOND | K_RETURNS | K_LANGUAGE | K_CALLED | K_INPUT | K_LIKE | K_PER | K_PARTITION | K_GROUP ) ) // Parser.g:1628:7: k= ( K_KEYS | K_AS | K_CLUSTERING | K_COMPACT | K_STORAGE | K_TYPE | K_VALUES | K_MAP | K_LIST | K_FILTERING | K_PERMISSION | K_PERMISSIONS | K_KEYSPACES | K_ALL | K_USER | K_USERS | K_ROLE | K_ROLES | K_SUPERUSER | K_NOSUPERUSER | K_LOGIN | K_NOLOGIN | K_OPTIONS | K_PASSWORD | K_EXISTS | K_CUSTOM | K_TRIGGER | K_CONTAINS | K_STATIC | K_FROZEN | K_TUPLE | K_FUNCTION | K_FUNCTIONS | K_AGGREGATE | K_SFUNC | K_STYPE | K_FINALFUNC | K_INITCOND | K_RETURNS | K_LANGUAGE | K_CALLED | K_INPUT | K_LIKE | K_PER | K_PARTITION | K_GROUP ) { k=input.LT(1); if ( (input.LA(1) >= K_AGGREGATE && input.LA(1) <= K_ALL)||input.LA(1)==K_AS||input.LA(1)==K_CALLED||input.LA(1)==K_CLUSTERING||(input.LA(1) >= K_COMPACT && input.LA(1) <= K_CONTAINS)||input.LA(1)==K_CUSTOM||(input.LA(1) >= K_EXISTS && input.LA(1) <= K_FINALFUNC)||input.LA(1)==K_FROZEN||(input.LA(1) >= K_FUNCTION && input.LA(1) <= K_FUNCTIONS)||input.LA(1)==K_GROUP||(input.LA(1) >= K_INITCOND && input.LA(1) <= K_INPUT)||input.LA(1)==K_KEYS||(input.LA(1) >= K_KEYSPACES && input.LA(1) <= K_LIKE)||(input.LA(1) >= K_LIST && input.LA(1) <= K_MAP)||input.LA(1)==K_NOLOGIN||input.LA(1)==K_NOSUPERUSER||input.LA(1)==K_OPTIONS||(input.LA(1) >= K_PARTITION && input.LA(1) <= K_PERMISSIONS)||input.LA(1)==K_RETURNS||(input.LA(1) >= K_ROLE && input.LA(1) <= K_ROLES)||input.LA(1)==K_SFUNC||(input.LA(1) >= K_STATIC && input.LA(1) <= K_SUPERUSER)||input.LA(1)==K_TRIGGER||(input.LA(1) >= K_TUPLE && input.LA(1) <= K_TYPE)||(input.LA(1) >= K_USER && input.LA(1) <= K_USERS)||input.LA(1)==K_VALUES ) { input.consume(); state.errorRecovery=false; } else { MismatchedSetException mse = new MismatchedSetException(null,input); throw mse; } str = (k!=null?k.getText():null); } } catch (RecognitionException re) { reportError(re); recover(input,re); } finally { // do for sure before leaving } return str; }
/** * The second pass over the given input. If the input ends with an ASI, the semicolon is removed and it is parsed * again since the production may have skipped the ASI if more input was present. Same applies for the opposite * direction, e.g if it does not end with an ASI but the prev token suggests that there may have been a semicolon, * it is inserted. */ private void adjustASIAndCollectFollowElements(CustomInternalN4JSParser previousParser, boolean strict, Set<FollowElement> result) { ObservableXtextTokenStream tokens = (ObservableXtextTokenStream) previousParser.getTokenStream(); int lastTokenIndex = tokens.size() - 1; if (lastTokenIndex >= 0) { CommonToken lastToken = (CommonToken) tokens.LT(-1); @SuppressWarnings("unchecked") List<Token> tokenList = tokens.getTokens(); if (lastToken == null) { return; // TODO ask Sebastian why and how this can happen... it happens! } if (shouldSkipASI(lastToken)) { // Some statements may not span multiple lines, e.g between the return keyword // and the returned expression, there may not be an ASI. Filter these locations. if (maySkipASI(lastToken, tokens)) { tokenList.remove(lastTokenIndex); result.addAll(resetAndGetFollowElements(tokens, strict)); // If a postfix operator sneaked into the result, remove it since // we'd have produce an ASI before that removePostfixOperator(result); } } else if (shouldAddSyntheticSemicolon(previousParser, lastTokenIndex, lastToken)) { CommonToken token = new CommonToken(semi); tokenList.add(token); result.addAll(resetAndGetFollowElements(tokens, strict)); // Same here, if we had added an ASI, the postfix operator would be rendered // invalid, remove it. removePostfixOperator(result); } } }
/** * Prevent ASIs to be skipped at certain locations, e.g. after a return keyword. */ private boolean maySkipASI(CommonToken lastToken, ObservableXtextTokenStream tokens) { int countDownFrom = lastToken.getTokenIndex(); for (int i = countDownFrom - 1; i >= 0; i--) { Token prevToken = tokens.get(i); if (prevToken.getChannel() == Token.DEFAULT_CHANNEL) { if (mandatoryASI.get(prevToken.getType())) { return false; } return true; } } return true; }
/** * Implementation of the {@link TokenSource} interface. Return new tokens as long as there are some, afterwards * return {@link Token#EOF_TOKEN}. */ @Override public Token nextToken() { if (next != null) { Token result = next; next = null; return result; } if (!leafNodes.hasNext()) { return Token.EOF_TOKEN; } ILeafNode leaf = leafNodes.next(); if (leaf.getTotalOffset() >= endOffset) { leafNodes = Iterators.emptyIterator(); return Token.EOF_TOKEN; } if (leaf.getTotalEndOffset() <= startOffset) { return nextToken(); } if (leaf.getTotalEndOffset() > endOffset) { return toPrefixToken(leaf); } SyntaxErrorMessage syntaxErrorMessage = leaf.getSyntaxErrorMessage(); if (syntaxErrorMessage != null && SEMICOLON_INSERTED.equals(syntaxErrorMessage.getIssueCode())) { return toASIToken(leaf); } if (leaf.isHidden()) { return processHiddenToken(leaf); } int tokenType = tokenTypeMapper.getInternalTokenType(leaf); return new CommonToken(tokenType, leaf.getText()); }
/** * Skips the given leaf as it's hidden. If it was the last token to be returned, a hidden token may be syntesized if * would affect the semicolon insertion. */ private Token processHiddenToken(ILeafNode leaf) { Token result = nextToken(); if (result == Token.EOF_TOKEN && Strings.countLineBreaks(leaf.getText()) > 0) { next = result; CommonToken hidden = new CommonToken(tokenTypeMapper.getInternalTokenType(leaf), leaf.getText()); hidden.setChannel(Token.HIDDEN_CHANNEL); return hidden; } return result; }
/** * Produce an Antlr token for the prefix of the given leaf that overlaps the requested region * * @see #endOffset */ private Token toPrefixToken(ILeafNode leaf) { Lexer lexer = new InternalN4JSLexer(); String text = leaf.getText(); String prefix = text.substring(0, endOffset - leaf.getTotalOffset()); ANTLRStringStream stream = new ANTLRStringStream(prefix); lexer.setCharStream(stream); Token nextToken = lexer.nextToken(); // copy to get rid of the reference to the stream again return new CommonToken(nextToken.getType(), nextToken.getText()); }
/** * Obtain the tokens from the given reader. */ public List<Token> getTokens(Reader reader) { try { return doParse(new AntlrStreamWithToString(reader)); } catch (IOException e) { throw new WrappedException(e); } }
private List<Token> doParse(CharStream in) { TokenSource tokenSource = createLexer(in); LazyTokenStream tokenStream = createTokenStream(tokenSource); setInitialHiddenTokens(tokenStream); InternalN4JSParser parser = createParser(tokenStream); IUnorderedGroupHelper helper = unorderedGroupHelper.get(); if (!(helper instanceof IUnorderedGroupHelper.Null)) { throw new IllegalStateException("Unexpected usage of unordered groups."); } Stopwatch stopwatch = null; boolean debug = LOGGER.isDebugEnabled(); // boolean debug = true; if (debug) { stopwatch = Stopwatch.createStarted(); } try { parser.entryRuleScript(); while (tokenStream.LT(1) != Token.EOF_TOKEN) { tokenStream.consume(); } @SuppressWarnings("unchecked") List<Token> result = tokenStream.getTokens(); return result; } catch (Exception re) { throw new ParseException(re.getMessage(), re); } finally { if (debug) { assert stopwatch != null; long elapsed = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); if (elapsed > 5) { LOGGER.warn("Coloring parser took: " + elapsed); } } } }
/** * Adds token and adjust channel. */ @Override public boolean add(Token tok) { super.add(tok); int type = tok.getType(); if (type == InternalN4JSParser.EqualsSignGreaterThanSign) { // The arrow expression may not follow a semicolon thus we promote those here // to he default channel if they precede the arrow => operator for (int i = size() - 2; i >= 0; i--) { Token prev = get(i); if (prev.getChannel() == Token.HIDDEN_CHANNEL) { if (SemicolonInjectionHelper.isSemicolonEquivalent(prev)) { prev.setChannel(Token.DEFAULT_CHANNEL); break; } } else { break; } } } else if (type == InternalN4JSParser.RULE_EOL || type == InternalN4JSParser.RULE_ML_COMMENT || type == InternalN4JSParser.RULE_WS || type == InternalN4JSParser.RULE_SL_COMMENT) { tok.setChannel(Token.HIDDEN_CHANNEL); } else { tok.setChannel(Token.DEFAULT_CHANNEL); } return true; }
private void exhaustTokenSource() { LazyTokenStream casted = (LazyTokenStream) this.input; int marked = casted.mark(); try { while (casted.LT(1) != Token.EOF_TOKEN) { casted.consume(); } } finally { casted.rewind(marked); } }
/** * <p> * Promotes EOL which may lead to an automatically inserted semicolon. This is probably the most important method * for automatic semicolon insertion, as it is only possible to insert a semicolon in case of line breaks (even if * they are hidden in a multi-line comment!). * </p> */ public static void promoteEOL(Callback callback) { RecognizerSharedState state = callback.getState(); TokenStream input = callback.getInput(); // Don't promote EOL if there was a syntax error at EOF if (state.lastErrorIndex == input.size()) { return; } // Get current token and its type (the possibly offending token). Token prev = input.LT(-1); Token next = input.LT(1); int la = next.getType(); // Promoting an EOL means switching it from off channel to on channel. // A ML_COMMENT gets promoted when it contains an EOL. for (int idx = prev == null ? 0 : prev.getTokenIndex() + 1, max = la == Token.EOF ? input.size() : next.getTokenIndex(); idx < max; idx++) { Token lt = input.get(idx); if (lt.getChannel() == Token.DEFAULT_CHANNEL) { // On channel token found: stop scanning (previously promoted) break; } else if (isSemicolonEquivalent(lt)) { // We found our EOL: promote the token to on channel, position the input on it and reset the rule // start. lt.setChannel(Token.DEFAULT_CHANNEL); input.seek(idx); break; } } }
/** * A "," cannot be followed by an automatically inserted semicolon. This is in particular true in case of variable * statements, in which the last declaration is ended with a comma (which might easily happen in case of copying the * initializer from a list or object literal (cf. IDEBUG-214). */ private static boolean findCommaBeforeEOL(TokenStream casted, int startIndex) { for (int ix = startIndex - 1; ix > 0; ix--) { Token lt = casted.get(ix); if (lt.getType() == InternalN4JSParser.Comma) { // System.out.println("Found Comma, EOL is not valid"); return true; } if (lt.getChannel() == Token.DEFAULT_CHANNEL) { // any other real char ends this search break; } } return false; }
private void clearAndResetTokenState() { this.state.token = null; this.state.channel = Token.DEFAULT_CHANNEL; this.state.tokenStartCharIndex = input.index(); this.state.tokenStartCharPositionInLine = input.getCharPositionInLine(); this.state.tokenStartLine = input.getLine(); this.state.text = null; }
public SwitchNode(Token t) { super(t); if (t != null) { setName(t.getText()); setValue((OptionValue.SwitchOnly) OptionValue.createSwitch()); } }
public UnknownOption(Token t) { super(t); if (t != null) { setName(t.getText()); setValue(new OptionValue.SwitchOnly(true)); } }
public ParametrizedNode(Token token, int splitIndex) { super(token); final String string = token.getText(); if (string != null) { setName(string.substring(0, splitIndex)); setValue(new OptionValue.SimpleString(string.substring(splitIndex))); delimiter = ""; setValid(true); } else { setName(""); setValid(false); } }
public ParametrizedNode(Token name, String delimiter, String parameter, boolean isValid) { super(name); setName(name.getText()); this.delimiter = delimiter; if (parameter != null) { setValue(new OptionValue.SimpleString(parameter)); } setValid(isValid); }
public UnrecognizedOption(TokenStream input, Token start, Token stop, RecognitionException e) { super(start); this.input = input; this.start = start; this.stop = stop; this.e = e; if (start != null) { setName(start.getText()); } setValue(new OptionValue.SwitchOnly(true)); }