Java 类org.apache.lucene.index.IndexReaderContext 实例源码

项目:elasticsearch_my    文件:BlendedTermQuery.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
项目:elasticsearch_my    文件:BlendedTermQuery.java   
private TermContext adjustTTF(IndexReaderContext readerContext, TermContext termContext, long sumTTF) {
    assert termContext.wasBuiltFor(readerContext);
    if (sumTTF == -1 && termContext.totalTermFreq() == -1) {
        return termContext;
    }
    TermContext newTermContext = new TermContext(readerContext);
    List<LeafReaderContext> leaves = readerContext.leaves();
    final int len;
    if (leaves == null) {
        len = 1;
    } else {
        len = leaves.size();
    }
    int df = termContext.docFreq();
    long ttf = sumTTF;
    for (int i = 0; i < len; i++) {
        TermState termState = termContext.get(i);
        if (termState == null) {
            continue;
        }
        newTermContext.register(termState, i, df, ttf);
        df = 0;
        ttf = 0;
    }
    return newTermContext;
}
项目:lucene-custom-query    文件:SeqSpanWeight.java   
protected SeqSpanWeight(SeqSpanQuery query, IndexSearcher searcher) throws IOException {
  super(query);
  this.selfQuery = query;
  this.similarity = searcher.getSimilarity(needsScores);
  this.positions = selfQuery.getPositions();
  this.terms = selfQuery.getTerms();
  this.field = terms[0].field();
  if (positions.length < 2) {
    throw new IllegalStateException("PhraseWeight does not support less than 2 terms, call rewrite first");
  } else if (positions[0] != 0) {
    throw new IllegalStateException("PhraseWeight requires that the first position is 0, call rewrite first");
  }
  final IndexReaderContext context = searcher.getTopReaderContext();
  states = new TermContext[terms.length];
  TermStatistics termStats[] = new TermStatistics[terms.length];
  for (int i = 0; i < terms.length; i++) {
    final Term term = terms[i];
    states[i] = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, states[i]);
  }
  stats = similarity.computeWeight(searcher.collectionStatistics(terms[0].field()), termStats);
}
项目:lams    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:lams    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<>();
  TreeSet<Term> terms = new TreeSet<>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:lams    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:lams    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:Elasticsearch    文件:BlendedTermQuery.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
    query.setBoost(getBoost());
    return query;
}
项目:ir-generalized-translation-models    文件:AugmentedTermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {

    IndexReaderContext context = searcher.getTopReaderContext();

    TermContext mainTermState = null;
    TermContext[] similarStates = new TermContext[similarTerms.length];

    if (needsScores) {

        //
        // get the term contexts, for the main term + for each similar term
        //
        mainTermState = TermContext.build(context, mainTerm);

        for (int i = 0; i < similarTerms.length; i++) {
            similarStates[i] = TermContext.build(context, similarTerms[i].term);
        }
    }

    // else:  do not compute the term states, this will help save seeks in the terms
    //        dict on segments that have a cache entry for this query

    return new AugmentedTermWeight(searcher, needsScores, mainTermState, similarStates);
}
项目:DoSeR-Disambiguation    文件:TermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term);
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }

    return new TermWeight(searcher, termState);
}
项目:DoSeR-Disambiguation    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term); // cache term
                                                        // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }
    return new TermWeight(searcher, termState);
}
项目:search    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:search    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<>();
  TreeSet<Term> terms = new TreeSet<>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:search    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:search    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:DoSeR    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if (perReaderTermState == null
            || perReaderTermState.topReaderContext != context) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term, true); // cache term
                                                            // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = this.perReaderTermState;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1)
        termState.setDocFreq(docFreq);
    return new TermWeight(searcher, termState);
}
项目:DoSeR    文件:TermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term);
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }

    return new TermWeight(searcher, termState);
}
项目:DoSeR    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term); // cache term
                                                        // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }
    return new TermWeight(searcher, termState);
}
项目:NYBC    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term, true); // cache term lookups!
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:NYBC    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term, true);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:NYBC    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term, true);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:NYBC    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<Object>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:incubator-blur    文件:SecureIndexSearcher.java   
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
    AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
    Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
    throws IOException {
  super(context, executor);
  _accessControlFactory = accessControlFactory;
  _readAuthorizations = readAuthorizations;
  _discoverAuthorizations = discoverAuthorizations;
  _discoverableFields = discoverableFields;
  _defaultReadMaskMessage = defaultReadMaskMessage;
  _accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
      discoverableFields, _defaultReadMaskMessage);
  _secureIndexReader = getSecureIndexReader(context);
  List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
  _leaveMap = new HashMap<Object, AtomicReaderContext>();
  for (AtomicReaderContext atomicReaderContext : leaves) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
    AtomicReader originalReader = secureAtomicReader.getOriginalReader();
    Object coreCacheKey = originalReader.getCoreCacheKey();
    _leaveMap.put(coreCacheKey, atomicReaderContext);
  }
}
项目:read-open-source-code    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:read-open-source-code    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:read-open-source-code    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:read-open-source-code    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<Object>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:read-open-source-code    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:read-open-source-code    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:read-open-source-code    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:read-open-source-code    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<Object>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:read-open-source-code    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:read-open-source-code    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<>();
  TreeSet<Term> terms = new TreeSet<>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:read-open-source-code    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:read-open-source-code    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:Maskana-Gestor-de-Conocimiento    文件:TermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);

  return new TermWeight(searcher, termState);
}
项目:Maskana-Gestor-de-Conocimiento    文件:SpanWeight.java   
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;

  termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
项目:Maskana-Gestor-de-Conocimiento    文件:MultiPhraseQuery.java   
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();

  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
项目:Maskana-Gestor-de-Conocimiento    文件:FieldCacheSanityChecker.java   
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<Object>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
项目:elasticsearch_my    文件:BlendedTermQuery.java   
private static TermContext adjustDF(IndexReaderContext readerContext, TermContext ctx, int newDocFreq) {
    assert ctx.wasBuiltFor(readerContext);
    // Use a value of ttf that is consistent with the doc freq (ie. gte)
    long newTTF;
    if (ctx.totalTermFreq() < 0) {
        newTTF = -1;
    } else {
        newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
    }
    List<LeafReaderContext> leaves = readerContext.leaves();
    final int len;
    if (leaves == null) {
        len = 1;
    } else {
        len = leaves.size();
    }
    TermContext newCtx = new TermContext(readerContext);
    for (int i = 0; i < len; ++i) {
        TermState termState = ctx.get(i);
        if (termState == null) {
            continue;
        }
        newCtx.register(termState, i, newDocFreq, newTTF);
        newDocFreq = 0;
        newTTF = 0;
    }
    return newCtx;
}