public void parse(String startRule, DebugEventListener actions, List visitedStates) throws RecognitionException { //System.out.println("parse("+startRule+")"); // Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet if ( grammar.getRuleStartState(startRule)==null ) { grammar.createNFAs(); } if ( !grammar.allDecisionDFAHaveBeenCreated() ) { // Create the DFA predictors for each decision grammar.createLookaheadDFAs(); } // do the parse Stack ruleInvocationStack = new Stack(); NFAState start = grammar.getRuleStartState(startRule); NFAState stop = grammar.getRuleStopState(startRule); parseEngine(startRule, start, stop, input, ruleInvocationStack, actions, visitedStates); }
public void parse(String startRule, DebugEventListener actions, List visitedStates) throws RecognitionException { //System.out.println("parse("+startRule+")"); // Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet if ( grammar.getRuleStartState(startRule)==null ) { grammar.buildNFA(); } if ( !grammar.allDecisionDFAHaveBeenCreated() ) { // Create the DFA predictors for each decision grammar.createLookaheadDFAs(); } // do the parse Stack ruleInvocationStack = new Stack(); NFAState start = grammar.getRuleStartState(startRule); NFAState stop = grammar.getRuleStopState(startRule); parseEngine(startRule, start, stop, input, ruleInvocationStack, actions, visitedStates); }
/** For a given input char stream, try to match against the NFA * starting at startRule. This is a deterministic parse even though * it is using an NFA because it uses DFAs at each decision point to * predict which alternative will succeed. This is exactly what the * generated parser will do. * * This only does lexer grammars. * * Return the token type associated with the final rule end state. */ public void scan(String startRule, DebugEventListener actions, List visitedStates) throws RecognitionException { if ( grammar.type!=Grammar.LEXER ) { return; } CharStream in = (CharStream)this.input; //System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')"); // Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet if ( grammar.getRuleStartState(startRule)==null ) { grammar.createNFAs(); } if ( !grammar.allDecisionDFAHaveBeenCreated() ) { // Create the DFA predictors for each decision grammar.createLookaheadDFAs(); } // do the parse Stack ruleInvocationStack = new Stack(); NFAState start = grammar.getRuleStartState(startRule); NFAState stop = grammar.getRuleStopState(startRule); parseEngine(startRule, start, stop, in, ruleInvocationStack, actions, visitedStates); }
/** For a given input char stream, try to match against the NFA * starting at startRule. This is a deterministic parse even though * it is using an NFA because it uses DFAs at each decision point to * predict which alternative will succeed. This is exactly what the * generated parser will do. * * This only does lexer grammars. * * Return the token type associated with the final rule end state. */ public void scan(String startRule, DebugEventListener actions, List visitedStates) throws RecognitionException { if ( grammar.type!=Grammar.LEXER ) { return; } CharStream in = (CharStream)this.input; //System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')"); // Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet if ( grammar.getRuleStartState(startRule)==null ) { grammar.buildNFA(); } if ( !grammar.allDecisionDFAHaveBeenCreated() ) { // Create the DFA predictors for each decision grammar.createLookaheadDFAs(); } // do the parse Stack ruleInvocationStack = new Stack(); NFAState start = grammar.getRuleStartState(startRule); NFAState stop = grammar.getRuleStopState(startRule); parseEngine(startRule, start, stop, in, ruleInvocationStack, actions, visitedStates); }
public DebugAbstractXQueryParser(TokenStream input, DebugEventListener dbg) { this(input, dbg, null); setDebugListener(dbg); }
public DebugAbstractXQueryParser(TokenStream input, DebugEventListener dbg, RecognizerSharedState state) { this(input, state); setDebugListener(dbg); }