Java 类org.apache.lucene.analysis.bg.BulgarianAnalyzer 实例源码

项目:langpi    文件:BgSWEliminator.java   
private static List<String> getStopList(String path) {

List<String> stopwords = new ArrayList<String>();
   try {

    InputStream in = BulgarianAnalyzer.class.getResourceAsStream(path);
    BufferedReader input = new BufferedReader(
                new InputStreamReader(in));
     for(String line = input.readLine(); line != null; line = input.readLine()) {
      if (line.startsWith("#")) continue;
      stopwords.add(line);
     }
     input.close();

     return stopwords;

   } catch(IOException e) {
     e.printStackTrace();
     System.exit(1);
     return null;
   } 
 }
项目:elasticsearch_my    文件:BulgarianAnalyzerProvider.java   
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    analyzer = new BulgarianAnalyzer(
        Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BulgarianAnalyzer.getDefaultStopSet()),
        Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
    );
    analyzer.setVersion(version);
}
项目:Elasticsearch    文件:BulgarianAnalyzerProvider.java   
@Inject
public BulgarianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) {
    super(index, indexSettingsService.getSettings(), name, settings);
    analyzer = new BulgarianAnalyzer(Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()),
                                     Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
    analyzer.setVersion(version);
}
项目:theSemProject    文件:MyAnalyzer.java   
/**
 * Ritorna il set di stop words di default per una lingua
 *
 * @param language lingua
 * @return set di stop words
 */
public static CharArraySet getDefaultStopSet(String language) {
    try {
        if ("en".equalsIgnoreCase(language)) {
            return StandardAnalyzer.STOP_WORDS_SET;
        } else if ("es".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "spanish_stop.txt", StandardCharsets.UTF_8));
        } else if ("fr".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "french_stop.txt", StandardCharsets.UTF_8));
        } else if ("de".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "german_stop.txt", StandardCharsets.UTF_8));
        } else if ("pl".equalsIgnoreCase(language)) {
            return WordlistLoader.getWordSet(IOUtils.getDecodingReader(PolishAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
        } else if ("pt".equalsIgnoreCase(language) || "br".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "portuguese_stop.txt", StandardCharsets.UTF_8));
        } else if ("it".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "italian_stop.txt", StandardCharsets.UTF_8));
        } else if ("cz".equalsIgnoreCase(language) || "sk".equalsIgnoreCase(language)) {
            return WordlistLoader.getWordSet(IOUtils.getDecodingReader(CzechAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
        } else if ("tr".equalsIgnoreCase(language)) {
            return TurkishAnalyzer.loadStopwordSet(false, TurkishAnalyzer.class, "stopwords.txt", "#");
        } else if ("ru".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "russian_stop.txt", StandardCharsets.UTF_8));
        } else if ("ro".equalsIgnoreCase(language)) {
            return RomanianAnalyzer.loadStopwordSet(false, RomanianAnalyzer.class, "stopwords.txt", "#");
        } else if ("bg".equalsIgnoreCase(language)) {
            return BulgarianAnalyzer.loadStopwordSet(false, BulgarianAnalyzer.class, "stopwords.txt", "#");
        } else if ("nl".equalsIgnoreCase(language)) {
            return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "dutch_stop.txt", StandardCharsets.UTF_8));
        }
    } catch (Exception ignored) {
        throw new RuntimeException("Unable to load default stopword set");
    }
    return StandardAnalyzer.STOP_WORDS_SET;

}
项目:news-credibility    文件:TokenTransform.java   
public Tuple2<Double, Multiset<String>> transform(Row row) throws IOException {
    Double label = row.getDouble(1);
    StringReader document = new StringReader(row.getString(0).replaceAll("br2n", ""));
    List<String> wordsList = new ArrayList<>();

    try (BulgarianAnalyzer analyzer = new BulgarianAnalyzer(BULGARIAN_STOP_WORDS_SET)) {
        TokenStream stream = analyzer.tokenStream("words", document);

        TokenFilter lowerFilter = new LowerCaseFilter(stream);
        TokenFilter numbers = new NumberFilter(lowerFilter);
        TokenFilter length = new LengthFilter(numbers, 3, 1000);
        TokenFilter stemmer = new BulgarianStemFilter(length);
        TokenFilter ngrams = new ShingleFilter(stemmer, 2, 3);

        try (TokenFilter filter = ngrams) {
            Attribute termAtt = filter.addAttribute(CharTermAttribute.class);
            filter.reset();
            while (filter.incrementToken()) {
                String word = termAtt.toString().replace(",", "(comma)").replaceAll("\n|\r", "");
                if (word.contains("_")) {
                    continue;
                }
                wordsList.add(word);
            }
        }
    }

    Multiset<String> words = ConcurrentHashMultiset.create(wordsList);

    return new Tuple2<>(label, words);
}
项目:elasticsearch_my    文件:BulgarianAnalyzerProvider.java   
@Override
public BulgarianAnalyzer get() {
    return this.analyzer;
}
项目:Elasticsearch    文件:BulgarianAnalyzerProvider.java   
@Override
public BulgarianAnalyzer get() {
    return this.analyzer;
}
项目:social-event-detection    文件:Tokenizer.java   
public Tokenizer() {

    Analyzer en_analyzer = new EnglishAnalyzer(Version.LUCENE_44);
    Analyzer es_analyzer = new SpanishAnalyzer(Version.LUCENE_44);
    Analyzer de_analyzer = new GermanAnalyzer(Version.LUCENE_44);
    Analyzer da_analyzer = new DanishAnalyzer(Version.LUCENE_44);
    Analyzer el_analyzer = new GreekAnalyzer(Version.LUCENE_44);
    Analyzer fr_analyzer = new FrenchAnalyzer(Version.LUCENE_44);
    Analyzer it_analyzer = new ItalianAnalyzer(Version.LUCENE_44);
    Analyzer pt_analyzer = new PortugueseAnalyzer(Version.LUCENE_44);
    Analyzer ru_analyzer = new RussianAnalyzer(Version.LUCENE_44);
    Analyzer fa_analyzer = new PersianAnalyzer(Version.LUCENE_44);  
    Analyzer ar_analyzer = new ArabicAnalyzer(Version.LUCENE_44);
    Analyzer id_analyzer = new IndonesianAnalyzer(Version.LUCENE_44);
    Analyzer pl_analyzer = new MorfologikAnalyzer(Version.LUCENE_44);
    Analyzer nl_analyzer = new DutchAnalyzer(Version.LUCENE_44);
    Analyzer no_analyzer = new NorwegianAnalyzer(Version.LUCENE_44);
    Analyzer ro_analyzer = new RomanianAnalyzer(Version.LUCENE_44);
    Analyzer sv_analyzer = new SwedishAnalyzer(Version.LUCENE_44);
    Analyzer fi_analyzer = new FinnishAnalyzer(Version.LUCENE_44);
    Analyzer tr_analyzer = new TurkishAnalyzer(Version.LUCENE_44);
    Analyzer hu_analyzer = new HungarianAnalyzer(Version.LUCENE_44);
    Analyzer bg_analyzer = new BulgarianAnalyzer(Version.LUCENE_44);

    analyzers.put("en", en_analyzer);
    analyzers.put("es", es_analyzer);
    analyzers.put("de", de_analyzer);
    analyzers.put("da", da_analyzer);
    analyzers.put("el", el_analyzer);
    analyzers.put("fr", fr_analyzer);
    analyzers.put("it", it_analyzer);
    analyzers.put("pt", pt_analyzer);
    analyzers.put("ru", ru_analyzer);
    analyzers.put("fa", fa_analyzer);
    analyzers.put("ar", ar_analyzer);
    analyzers.put("id", id_analyzer);
    analyzers.put("pl", pl_analyzer);
    analyzers.put("nl", nl_analyzer);
    analyzers.put("no", no_analyzer);
    analyzers.put("ro", ro_analyzer);
    analyzers.put("sv", sv_analyzer);
    analyzers.put("fi", fi_analyzer);
    analyzers.put("tr", tr_analyzer);
    analyzers.put("hu", hu_analyzer);
    analyzers.put("bg", bg_analyzer);
}