Java 类org.antlr.runtime.ANTLRFileStream 实例源码

项目:codebuff    文件:Tool.java   
public GrammarRootAST parseGrammar(String fileName) {
    try {
        File file = new File(fileName);
        if (!file.isAbsolute()) {
            file = new File(inputDirectory, fileName);
        }

        ANTLRFileStream in = new ANTLRFileStream(file.getAbsolutePath(), grammarEncoding);
        GrammarRootAST t = parse(fileName, in);
        return t;
    }
    catch (IOException ioe) {
        errMgr.toolError(ErrorType.CANNOT_OPEN_FILE, ioe, fileName);
    }
    return null;
}
项目:spicy    文件:AbstractParseMappingTask.java   
public MappingTask generateMappingTask(String mappingTaskFile) throws Exception {
    try {
        if (!mappingTaskFile.endsWith(PARSER_EXTENSION)) {
            throw new IllegalArgumentException("TGD file must end with " + PARSER_EXTENSION);
        }
        this.mappingTaskFilePath = mappingTaskFile;
        TGDMappingTaskLexer lex = new TGDMappingTaskLexer(new ANTLRFileStream(mappingTaskFile));
        CommonTokenStream tokens = new CommonTokenStream(lex);
        TGDMappingTaskParser g = new TGDMappingTaskParser(tokens);
        try {
            g.setGenerator(this);
            g.prog();
        } catch (RecognitionException ex) {
            logger.error("Unable to load mapping task: " + ex.getMessage());
            throw new ParserException(ex);
        }
        mappingTask.setFileName(mappingTaskFile);
        return this.mappingTask;
    } catch (Exception e) {
        e.printStackTrace();
        logger.error(e);
        throw new ParserException(e);
    }
}
项目:lua-parser    文件:Main.java   
public static void main(String[] args) throws Exception {

    if(args.length == 0) {
      throw new IllegalArgumentException("no Lua file(s) provided as a command line parameter");
    }

    for (String fileName : args) {

      System.out.printf("\nParsing `%s`...\n\n", fileName);

      // Create the lexer and parser.
      Lua52Lexer lexer = new Lua52Lexer(new ANTLRFileStream(fileName));
      Lua52Parser parser = new Lua52Parser(new CommonTokenStream(lexer));

      // Print the AST of the source file by manually traversing the AST.
      CommonTree ast = parser.parse().getTree();
      StringBuilder builder = new StringBuilder();
      walk(ast, builder);
      System.out.println(builder);

      // Now let the tree walker traverse the AST and print some
      // information about the `assignment` rule.
      Lua52Walker walker = new Lua52Walker(new CommonTreeNodeStream(ast));
      walker.walk();
    }
  }
项目:db-schema-definition-translator    文件:DbsdReader.java   
/**
 * Retrieves the dbs represented by the given input file.
 *
 * @param inputFileName such information.
 * @return such information.
 * @throws IOException
 */
public DataBaseSchema compile(final String inputFileName) throws IOException
{
    DataBaseSchema dataBaseSchema = null;

    final ANTLRFileStream antlrFileStream = new ANTLRFileStream(inputFileName);

    try {

        dataBaseSchema = retrieveDataBaseSchema(antlrFileStream);

    } catch (RecognitionException e) {
        throw new IllegalStateException("Recognition exception is never thrown, only declared.");
    }

    return dataBaseSchema;
}
项目:traci    文件:ParserRunner.java   
private Result getInput()
{
    final String inputFilename = settings.getInputFilename();

    if (code == null)
    {
        try
        {
            input = new ANTLRFileStream(inputFilename);
        }
        catch (final IOException e)
        {
            Log.ERROR("Unable to open input file: '" + inputFilename + "':");
            Log.ERROR(e.getMessage());
            return Result.IO_ERROR;
        }
    }
    else
    {
        input = new ANTLRStringStream(code);
    }

    return Result.SUCCESS;
}
项目:KernelGenius    文件:DeviceParser.java   
public void parse() {
  try {
    lex = new DeviceConfigLexer(new ANTLRFileStream(inputFile.getPath()));
  } catch (IOException e1) {
    // Should never happen
    e1.printStackTrace();
  }

  tokens = new CommonTokenStream(lex);
  parser = new DeviceConfigParser(tokens);
  parser.setCompilerError(compilerError);

  try {
    parser.module();
  } catch (RecognitionException e)  {
    e.printStackTrace();
  }

  device=parser.getComputeDevice();
}
项目:iConA    文件:ConsoleMain.java   
public static void parseFile(Collection files, File src)
                              throws Exception {
        //  System.out.println("in folder:"+file.getParent());

        for (int i=0; i<files.toArray().length;i++){

                File file = ((FileModel)files.toArray()[i]).getFile();
                //PackageDef package_= ((FileModel)files.toArray()[i]).getPackage();

                String tempName = (file.getParent().replace(src.getAbsolutePath(), "").trim().replaceFirst("\\W", "").replaceAll( "\\W", ".")).trim();
                System.out.println("src:" + src.getAbsolutePath() +
                        " parent:"+file.getParent() + " tempName:" +tempName +";"
                        );

                PackageDef package_ = (PackageDef) SymbolTable.getInstance().getObject(tempName, IdentifierType.PACKAGE);

                if(package_!=null){
                    SymbolTable.getInstance().setCurrentPackage(package_);  
                }
                else{
                    package_ = new PackageDef(null, IdentifierType.PACKAGE, -1);  
                    package_.setFile(file);
                    package_.addPackageName(tempName);
                    SymbolTable.getInstance().addObject(tempName, package_);
                    SymbolTable.getInstance().setCurrentPackage(package_);  
                }


                SymbolTable.getInstance().setCurrentFile(file);
                //SymbolTable.getInstance().setCurrentPackage(package_);

                CharStream input = new ANTLRFileStream(file.getAbsolutePath());
                ObjectiveCLexer lex= new ObjectiveCLexer(input);
                CommonTokenStream tokens = new CommonTokenStream(lex);
                ObjectiveCParser parser = new ObjectiveCParser(tokens);

                parser.translation_unit();             
      }
}
项目:codebuff    文件:Tool.java   
/**
 * Try current dir then dir of g then lib dir
 * @param g
 * @param nameNode The node associated with the imported grammar name.
 */
public Grammar loadImportedGrammar(Grammar g, GrammarAST nameNode) throws IOException {
    String name = nameNode.getText();
    Grammar imported = importedGrammars.get(name);
    if (imported == null) {
        g.tool.log("grammar", "load " + name + " from " + g.fileName);
        File importedFile = null;
        for (String extension : ALL_GRAMMAR_EXTENSIONS) {
            importedFile = getImportedGrammarFile(g, name + extension);
            if (importedFile != null) {
                break;
            }
        }

        if ( importedFile==null ) {
            errMgr.grammarError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, g.fileName, nameNode.getToken(), name);
            return null;
        }

        String absolutePath = importedFile.getAbsolutePath();
        ANTLRFileStream in = new ANTLRFileStream(absolutePath, grammarEncoding);
        GrammarRootAST root = parse(g.fileName, in);
        if (root == null) {
            return null;
        }

        imported = createGrammar(root);
        imported.fileName = absolutePath;
        importedGrammars.put(root.getGrammarName(), imported);
    }

    return imported;
}
项目:demidovii    文件:ParserDriver.java   
public static TypeSpecification getTypeSpecification(String fileName) throws IOException, RecognitionException {
  ANTLRFileStream input = new ANTLRFileStream(fileName);
  GalagoTypeBuilderLexer lexer = new GalagoTypeBuilderLexer(input);
  CommonTokenStream tokens = new CommonTokenStream(lexer);
  GalagoTypeBuilderParser parser = new GalagoTypeBuilderParser(tokens);
  return parser.type_def();
}
项目:mclab-core    文件:Interp.java   
public static void main(String[] args) throws Exception {
    if ( args.length!=4 ) {
        System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
        return;
    }
    String grammarFileName = args[0];
    String ignoreTokens = args[1];
    String startRule = args[2];
    String inputFileName = args[3];

    Grammar parser =
        new Grammar(null,
                    grammarFileName,
                    new BufferedReader(new FileReader(grammarFileName)));

    String lexerGrammarText = parser.getLexerGrammar();
    Grammar lexer = new Grammar();
    lexer.importTokenVocabulary(parser);
    lexer.setGrammarContent(lexerGrammarText);
    CharStream input =
        new ANTLRFileStream(inputFileName);
    Interpreter lexEngine = new Interpreter(lexer, input);
    CommonTokenStream tokens = new CommonTokenStream(lexEngine);
    StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
    while ( tk.hasMoreTokens() ) {
        String tokenName = tk.nextToken();
        tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
    }

    if ( parser.getRule(startRule)==null ) {
        System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
        return;
    }
    Interpreter parseEngine = new Interpreter(parser, tokens);
    ParseTree t = parseEngine.parse(startRule);
    System.out.println(t.toStringTree());
}
项目:galago-git    文件:ParserDriver.java   
public static TypeSpecification getTypeSpecification(String fileName) throws IOException, RecognitionException {
  ANTLRFileStream input = new ANTLRFileStream(fileName);
  GalagoTypeBuilderLexer lexer = new GalagoTypeBuilderLexer(input);
  CommonTokenStream tokens = new CommonTokenStream(lexer);
  GalagoTypeBuilderParser parser = new GalagoTypeBuilderParser(tokens);
  return parser.type_def();
}
项目:galagosearch    文件:ParserDriver.java   
public static TypeSpecification getTypeSpecification(String fileName) throws IOException, RecognitionException {
    ANTLRFileStream input = new ANTLRFileStream(fileName);
    GalagoTypeBuilderLexer lexer = new GalagoTypeBuilderLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    GalagoTypeBuilderParser parser = new GalagoTypeBuilderParser(tokens);
    TypeSpecification spec = parser.type_def();
    return spec;
}
项目:hapi-fhir    文件:Template.java   
private Template(File file, Map<String, Tag> tags, Map<String, Filter> filters, Flavor flavor) throws IOException {

        this.tags = tags;
        this.filters = filters;
        this.flavor = flavor;

        try {
            LiquidLexer lexer = new LiquidLexer(new ANTLRFileStream(file.getAbsolutePath()));
            LiquidParser parser = new LiquidParser(flavor, new CommonTokenStream(lexer));
            root = (CommonTree) parser.parse().getTree();
        }
        catch (RecognitionException e) {
            throw new RuntimeException("could not parse input from " + file, e);
        }
    }
项目:junit-converter    文件:TestClassConverter.java   
public void convert(File inputFile, File outputFile) throws IOException, RecognitionException
{
    JavaParser parser = new JavaParser(new CommonTokenStream(new JavaLexer(new ANTLRFileStream(
            inputFile.getAbsolutePath()))));
    parser.compilationUnit();

    if (isTestSuite(parser, inputFile))
    {
        writeChanges(outputFile, runSuiteConversion(inputFile, parser));
    }
    else if (isTestCase(parser, inputFile))
    {
        writeChanges(outputFile, runConversion(inputFile, parser));
    }
}
项目:GalagoGUI    文件:ParserDriver.java   
public static TypeSpecification getTypeSpecification(String fileName) throws IOException, RecognitionException {
    ANTLRFileStream input = new ANTLRFileStream(fileName);
    GalagoTypeBuilderLexer lexer = new GalagoTypeBuilderLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    GalagoTypeBuilderParser parser = new GalagoTypeBuilderParser(tokens);
    TypeSpecification spec = parser.type_def();
    return spec;
}
项目:asup    文件:CLParserWrapper.java   
@Override
public CLObject parse(File f) throws CLScriptException {

    try {
        return invokeParser(new ANTLRFileStream(f.getCanonicalPath(), "UTF-8"));
    } catch (IOException ex) {
           throw new CLScriptException(ex);
       }
}
项目:asup    文件:CLCommandParserWrapper.java   
@Override
public CLCommand parse(File f) throws CLScriptException {

    try {
        return invokeParser(new ANTLRFileStream(f.getCanonicalPath(), "UTF-8"));
    } catch (IOException ex) {
           throw new CLScriptException(ex);
       }
}
项目:asup    文件:CLParameterParserWrapper.java   
@Override
public CLParmAbstractComponent parse(File f) throws CLScriptException {

    try {
        return invokeParser(new ANTLRFileStream(f.getCanonicalPath(), "UTF-8"));
    } catch (IOException ex) {
           throw new CLScriptException(ex);
       }
}
项目:asup    文件:CLExpressionParserWrapper.java   
@Override
public QExpression parse(File f) throws CLScriptException {

    try {
        return invokeParser(new ANTLRFileStream(f.getCanonicalPath(), "UTF-8"));
    } catch (IOException ex) {
           throw new CLScriptException(ex);
       }
}
项目:mclab    文件:Interp.java   
public static void main(String[] args) throws Exception {
    if ( args.length!=4 ) {
        System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
        return;
    }
    String grammarFileName = args[0];
    String ignoreTokens = args[1];
    String startRule = args[2];
    String inputFileName = args[3];

    Grammar parser =
        new Grammar(null,
                    grammarFileName,
                    new BufferedReader(new FileReader(grammarFileName)));

    String lexerGrammarText = parser.getLexerGrammar();
    Grammar lexer = new Grammar();
    lexer.importTokenVocabulary(parser);
    lexer.setGrammarContent(lexerGrammarText);
    CharStream input =
        new ANTLRFileStream(inputFileName);
    Interpreter lexEngine = new Interpreter(lexer, input);
    CommonTokenStream tokens = new CommonTokenStream(lexEngine);
    StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
    while ( tk.hasMoreTokens() ) {
        String tokenName = tk.nextToken();
        tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
    }

    if ( parser.getRule(startRule)==null ) {
        System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
        return;
    }
    Interpreter parseEngine = new Interpreter(parser, tokens);
    ParseTree t = parseEngine.parse(startRule);
    System.out.println(t.toStringTree());
}
项目:Rapture    文件:FileApiReader.java   
@Override
public ANTLRStringStream read(String apiFileName) throws IOException {
    return new ANTLRFileStream(new File(rootDir, apiFileName).getAbsolutePath(), StandardCharsets.UTF_8.name());
}
项目:jtcc    文件:TCCTokenizer.java   
/**
 * Create a new <code>TCCTokenizer</code> to tokenize the content inside the
 * specified text file, with the specified encoding.
 */
public TCCTokenizer(File file, String encoding) throws IOException {
    initialize(new ANTLRFileStream(file.getAbsolutePath(), encoding));

}
项目:jabref    文件:VM.java   
public VM(File f) throws RecognitionException, IOException {
    this(new ANTLRFileStream(f.getPath()));
    this.file = f;
}
项目:jFuzzyLogic    文件:Interp.java   
public static void main(String[] args) throws Exception {
    if ( args.length!=4 ) {
        System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
        return;
    }
    String grammarFileName = args[0];
    String ignoreTokens = args[1];
    String startRule = args[2];
    String inputFileName = args[3];

    // TODO: using wrong constructor now
    Tool tool = new Tool();
    CompositeGrammar composite = new CompositeGrammar();
    Grammar parser = new Grammar(tool, grammarFileName, composite);
    composite.setDelegationRoot(parser);
    FileReader fr = new FileReader(grammarFileName);
    BufferedReader br = new BufferedReader(fr);
    parser.parseAndBuildAST(br);
    br.close();

    parser.composite.assignTokenTypes();
    parser.composite.defineGrammarSymbols();
    parser.composite.createNFAs();

    List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
    if ( leftRecursiveRules.size()>0 ) {
        return;
    }

    if ( parser.getRule(startRule)==null ) {
        System.out.println("undefined start rule "+startRule);
        return;
    }

    String lexerGrammarText = parser.getLexerGrammar();
    Grammar lexer = new Grammar();
    lexer.importTokenVocabulary(parser);
    lexer.fileName = grammarFileName;
    lexer.setTool(tool);
    if ( lexerGrammarText!=null ) {
        lexer.setGrammarContent(lexerGrammarText);
    }
    else {
        System.err.println("no lexer grammar found in "+grammarFileName);
    }
    lexer.composite.createNFAs();

    CharStream input =
        new ANTLRFileStream(inputFileName);
    Interpreter lexEngine = new Interpreter(lexer, input);
    CommonTokenStream tokens = new CommonTokenStream(lexEngine);
    StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
    while ( tk.hasMoreTokens() ) {
        String tokenName = tk.nextToken();
        tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
    }

    if ( parser.getRule(startRule)==null ) {
        System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
        return;
    }
    Interpreter parseEngine = new Interpreter(parser, tokens);
    ParseTree t = parseEngine.parse(startRule);
    System.out.println(t.toStringTree());
}
项目:zz-old-scripting-engine    文件:MTScriptSandBox.java   
public static void mainOld(String[] args) throws Exception {

    long startTime = System.currentTimeMillis();
    MTScriptLexer lexer = new MTScriptLexer(new ANTLRFileStream("/tmp/tst1.mts"));


    CommonTokenStream tokenStream = new CommonTokenStream(lexer);

    ScriptContext scriptContext = new ScriptContextBuilder().toScriptContext();

    SymbolTable symbolTable =  scriptContext.getSymbolTable();  

    MTScriptParser parser = new MTScriptParser(tokenStream);
    parser.setSymbolTable(symbolTable);

    CommonTree tree = (CommonTree) (parser.mtscript().getTree());
    System.out.println(tree.toStringTree());
    System.out.println("--- START ROLLS ---");
    for (RollExpression rxpr : symbolTable.getRollExpressions()) {
        System.out.print(rxpr.getRollString());
        if (rxpr.isVerbose()) { 
            System.out.print(" (verbose)");
        }
        System.out.println("");
    }
    System.out.println("--- END ROLLS ---");

    CommonTreeNodeStream nodeStream = new CommonTreeNodeStream(tree);
    MTScriptTreeParser walker = new MTScriptTreeParser(nodeStream); 

    walker.setSymbolTable(symbolTable);

    ScriptTreeNode scriptNode = walker.evaluator();

    DataValue results = scriptNode.evaluate(scriptContext);

    System.out.println("Result = " + results.asString());

    System.out.println("--- START LABELS ---");
    for (String s : symbolTable.getLabels()) {
        int i = 0;
        for (DataValue l : symbolTable.getLabels(s)) {
            System.out.println(s + "(" + i + ") = " + l.asString());        
            i++;
        }
    }
    System.out.println("--- END LABELS ---");

    System.out.println("Time (ms) = " + (System.currentTimeMillis() - startTime));
}
项目:traci    文件:TraciTreeWalkerBase.java   
protected void runTreeWalkerFile(final String filename) throws RecognitionException, IOException
{
    run(new ANTLRFileStream(filename));
}
项目:traci    文件:TraciParserBase.java   
protected void runParserFile(final String filename) throws RecognitionException, IOException
{
    run(new ANTLRFileStream(filename));
}
项目:traci    文件:TraciLexerBase.java   
protected void runLexerFile(final String filename) throws IOException
{
    run(new ANTLRFileStream(filename));
}
项目:traci    文件:InterpreterBase.java   
protected void runInterpreterFile(final String filename) throws RecognitionException, IOException,
        InterpreterRuntimeException
{
    run(new ANTLRFileStream(filename));
}
项目:sonar-xquery-plugin    文件:TestParser.java   
public static void main(String[] args) throws IOException {
        XQueryTree tree = new XQueryTree();
        ParseTreeBuilder builder = new XQueryParseTreeBuilder("MainModule");
        File directory = new File(CODE_ROOT);

        System.out.println("Parsing files in " + CODE_ROOT);
        List<File> files = FileUtils.getFiles(directory, CODE_FILTER, "");

        String treeDirectory = System.getProperty("java.io.tmpdir") + "/parse-trees";
        FileUtils.mkdir(treeDirectory);
        FileUtils.cleanDirectory(treeDirectory);

        // files = Arrays.asList(new File[]{ new
        // File("C:\\Users\\cieslinskice\\Documents\\Code\\xqyShared\\common\\src\\main\\xquery\\http\\http.xqy")});
        for (File file : files) {
            System.out.println("Analyzing " + file.getPath() + ":");
            try {
                ANTLRStringStream source = new ANTLRFileStream(file.getAbsolutePath());
                source.name = file.getPath();
                XQueryLexer lexer = new XQueryLexer(source);
                TokenStream tokenStream = new LazyTokenStream(lexer);
                // XQueryParser parser = new XQueryParser(tokenStream, builder);
                XQueryParser parser = new XQueryParser(tokenStream);
                parser.setCharSource(source);
                parser.setTreeAdaptor(new XQueryTreeAdaptor());
                tree = (XQueryTree) parser.p_Module().getTree();

            } catch (Exception e) {
                e.printStackTrace();
            } finally {
                String outputDirectory = StringUtils.substringAfter(file.getPath(), CODE_ROOT);
                FileUtils.mkdir(treeDirectory + outputDirectory);
                String parseName = treeDirectory + outputDirectory + "/" + StringUtils.substringBefore(file.getName(), ".") + "-parsetree.txt";
                String treeName = treeDirectory + outputDirectory + "/" + StringUtils.substringBefore(file.getName(), ".") + "-AST.txt";
//                System.out.println("Writing parse tree to " + parseName);
//                FileUtils.fileWrite(parseName, builder.getTree().toStringTree());
                System.out.println("Writing AST to " + parseName);
                FileUtils.fileWrite(treeName, tree.toStringTree());
            }
        }

        System.out.println("File parsing complete");
    }
项目:jtcc    文件:TCCTokenizer.java   
/**
 * Create a new <code>TCCTokenizer</code> to tokenize the content inside the
 * specified text file.
 */
public TCCTokenizer(File file) throws IOException {
    initialize(new ANTLRFileStream(file.getAbsolutePath()));
}