Java 类org.apache.lucene.index.ReaderUtil 实例源码

项目:Elasticsearch    文件:ScoreDocRowFunction.java   
@Nullable
@Override
public Row apply(@Nullable ScoreDoc input) {
    if (input == null) {
        return null;
    }
    FieldDoc fieldDoc = (FieldDoc) input;
    scorer.score(fieldDoc.score);
    for (OrderByCollectorExpression orderByCollectorExpression : orderByCollectorExpressions) {
        orderByCollectorExpression.setNextFieldDoc(fieldDoc);
    }
    List<LeafReaderContext> leaves = indexReader.leaves();
    int readerIndex = ReaderUtil.subIndex(fieldDoc.doc, leaves);
    LeafReaderContext subReaderContext = leaves.get(readerIndex);
    int subDoc = fieldDoc.doc - subReaderContext.docBase;
    for (LuceneCollectorExpression<?> expression : expressions) {
        expression.setNextReader(subReaderContext);
        expression.setNextDocId(subDoc);
    }
    return inputRow;
}
项目:search    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:search    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:search    文件:ExpressionRescorer.java   
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
  Explanation result = super.explain(searcher, firstPassExplanation, docID);

  List<AtomicReaderContext> leaves = searcher.getIndexReader().leaves();
  int subReader = ReaderUtil.subIndex(docID, leaves);
  AtomicReaderContext readerContext = leaves.get(subReader);
  int docIDInSegment = docID - readerContext.docBase;
  Map<String,Object> context = new HashMap<>();

  FakeScorer fakeScorer = new FakeScorer();
  fakeScorer.score = firstPassExplanation.getValue();
  fakeScorer.doc = docIDInSegment;

  context.put("scorer", fakeScorer);

  for(String variable : expression.variables) {
    result.addDetail(new Explanation((float) bindings.getValueSource(variable).getValues(context, readerContext).doubleVal(docIDInSegment),
                                     "variable \"" + variable + "\""));
  }

  return result;
}
项目:search    文件:DocumentValueSourceDictionary.java   
/** 
 * Returns the weight for the current <code>docId</code> as computed 
 * by the <code>weightsValueSource</code>
 * */
@Override
protected long getWeight(Document doc, int docId) {    
  if (currentWeightValues == null) {
    return 0;
  }
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
项目:search    文件:ValueSourceAugmenter.java   
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
项目:NYBC    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = topReader instanceof CompositeReader 
      ? new SlowCompositeReaderWrapper((CompositeReader)topReader) 
      : (AtomicReader) topReader;
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:NYBC    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:NYBC    文件:ValueSourceAugmenter.java   
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
项目:search-core    文件:ValueSourceAugmenter.java   
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
项目:read-open-source-code    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:read-open-source-code    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:read-open-source-code    文件:DocumentValueSourceDictionary.java   
/** 
 * Returns the weight for the current <code>docId</code> as computed 
 * by the <code>weightsValueSource</code>
 * */
@Override
protected long getWeight(Document doc, int docId) {    
  if (currentWeightValues == null) {
    return 0;
  }
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
项目:read-open-source-code    文件:ValueSourceAugmenter.java   
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
项目:read-open-source-code    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:read-open-source-code    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:read-open-source-code    文件:DocumentValueSourceDictionary.java   
/** 
 * Returns the weight for the current <code>docId</code> as computed 
 * by the <code>weightsValueSource</code>
 * */
@Override
protected long getWeight(Document doc, int docId) {    
  if (currentWeightValues == null) {
    return 0;
  }
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
项目:read-open-source-code    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:read-open-source-code    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:read-open-source-code    文件:ExpressionRescorer.java   
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
  Explanation result = super.explain(searcher, firstPassExplanation, docID);

  List<AtomicReaderContext> leaves = searcher.getIndexReader().leaves();
  int subReader = ReaderUtil.subIndex(docID, leaves);
  AtomicReaderContext readerContext = leaves.get(subReader);
  int docIDInSegment = docID - readerContext.docBase;
  Map<String,Object> context = new HashMap<>();

  FakeScorer fakeScorer = new FakeScorer();
  fakeScorer.score = firstPassExplanation.getValue();
  fakeScorer.doc = docIDInSegment;

  context.put("scorer", fakeScorer);

  for(String variable : expression.variables) {
    result.addDetail(new Explanation((float) bindings.getValueSource(variable).getValues(context, readerContext).doubleVal(docIDInSegment),
                                     "variable \"" + variable + "\""));
  }

  return result;
}
项目:read-open-source-code    文件:DocumentValueSourceDictionary.java   
/** 
 * Returns the weight for the current <code>docId</code> as computed 
 * by the <code>weightsValueSource</code>
 * */
@Override
protected long getWeight(Document doc, int docId) {    
  if (currentWeightValues == null) {
    return 0;
  }
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
项目:read-open-source-code    文件:ValueSourceAugmenter.java   
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
项目:SolrTextTagger    文件:TaggerRequestHandler.java   
Object objectVal(int topDocId) throws IOException {
  // lookup segment level stuff:
  int segIdx = ReaderUtil.subIndex(topDocId, readerContexts);
  LeafReaderContext rcontext = readerContexts.get(segIdx);
  int segDocId = topDocId - rcontext.docBase;
  // unfortunately Lucene 7.0 requires forward only traversal (with no reset method).
  //   So we need to track our last docId (per segment) and re-fetch the FunctionValues. :-(
  FunctionValues functionValues = functionValuesPerSeg[segIdx];
  if (functionValues == null || segDocId < functionValuesDocIdPerSeg[segIdx]) {
    functionValues = functionValuesPerSeg[segIdx] = valueSource.getValues(fContext, rcontext);
  }
  functionValuesDocIdPerSeg[segIdx] = segDocId;

  // get value:
  return functionValues.objectVal(segDocId);
}
项目:Maskana-Gestor-de-Conocimiento    文件:ReverseOrdFieldSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
项目:Maskana-Gestor-de-Conocimiento    文件:QueryValueSource.java   
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
  super(vs);

  this.readerContext = readerContext;
  this.acceptDocs = readerContext.reader().getLiveDocs();
  this.defVal = vs.defVal;
  this.q = vs.q;
  this.fcontext = fcontext;

  Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
  if (w == null) {
    IndexSearcher weightSearcher;
    if(fcontext == null) {
      weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
    } else {
      weightSearcher = (IndexSearcher)fcontext.get("searcher");
      if (weightSearcher == null) {
        weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
      }
    }
    vs.createWeight(fcontext, weightSearcher);
    w = (Weight)fcontext.get(vs);
  }
  weight = w;
}
项目:elasticsearch_my    文件:NestedAggregator.java   
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
    IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);
    Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);

    final BitSet parentDocs = parentFilter.getBitSet(ctx);
    final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int parentDoc, long bucket) throws IOException {
            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
            // doc), so we can skip:
            if (parentDoc == 0 || parentDocs == null || childDocs == null) {
                return;
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}
项目:elasticsearch_my    文件:IndexFieldData.java   
/**
 * Get a {@link DocIdSet} that matches the inner documents.
 */
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
    final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx);
    IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx);
    Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false);
    Scorer s = weight.scorer(ctx);
    return s == null ? null : s.iterator();
}
项目:elasticsearch_my    文件:BitsetFilterCache.java   
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && indexSettings.getIndex().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index " + shardId.getIndex()
                + " with cache of index " + indexSettings.getIndex());
    }
    Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> {
        context.reader().addCoreClosedListener(BitsetFilterCache.this);
        return CacheBuilder.<Query, Value>builder().build();
    });

    return filterToFbs.computeIfAbsent(query, key -> {
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);
        final Weight weight = searcher.createNormalizedWeight(query, false);
        Scorer s = weight.scorer(context);
        final BitSet bitSet;
        if (s == null) {
            bitSet = null;
        } else {
            bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
        }

        Value value = new Value(bitSet, shardId);
        listener.onCache(shardId, value.bitset);
        return value;
    }).bitset;
}
项目:Elasticsearch    文件:FetchCollector.java   
public void collect(IntContainer docIds, StreamBucket.Builder builder) throws IOException {
    for (IntCursor cursor : docIds) {
        final int docId = cursor.value;
        int readerIndex = ReaderUtil.subIndex(docId, readerContexts);
        LeafReaderContext subReaderContext = readerContexts.get(readerIndex);
        setNextReader(subReaderContext);
        setNextDocId(docId - subReaderContext.docBase);
        builder.add(row);
    }
}
项目:Elasticsearch    文件:BitsetFilterCache.java   
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query,new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}
项目:search    文件:JoinDocFreqValueSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException
{
  final BinaryDocValues terms = cache.getTerms(readerContext.reader(), field, false, PackedInts.FAST);
  final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
  Terms t = MultiFields.getTerms(top, qfield);
  final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator(null);

  return new IntDocValues(this) {

    @Override
    public int intVal(int doc) 
    {
      try {
        final BytesRef term = terms.get(doc);
        if (termsEnum.seekExact(term)) {
          return termsEnum.docFreq();
        } else {
          return 0;
        }
      } 
      catch (IOException e) {
        throw new RuntimeException("caught exception in function "+description()+" : doc="+doc, e);
      }
    }
  };
}
项目:search    文件:ScaleFloatFunction.java   
private ScaleInfo createScaleInfo(Map context, AtomicReaderContext readerContext) throws IOException {
  final List<AtomicReaderContext> leaves = ReaderUtil.getTopLevelContext(readerContext).leaves();

  float minVal = Float.POSITIVE_INFINITY;
  float maxVal = Float.NEGATIVE_INFINITY;

  for (AtomicReaderContext leaf : leaves) {
    int maxDoc = leaf.reader().maxDoc();
    FunctionValues vals =  source.getValues(context, leaf);
    for (int i=0; i<maxDoc; i++) {

    float val = vals.floatVal(i);
    if ((Float.floatToRawIntBits(val) & (0xff<<23)) == 0xff<<23) {
      // if the exponent in the float is all ones, then this is +Inf, -Inf or NaN
      // which don't make sense to factor into the scale function
      continue;
    }
    if (val < minVal) {
      minVal = val;
    }
    if (val > maxVal) {
      maxVal = val;
    }
  }
  }

  if (minVal == Float.POSITIVE_INFINITY) {
  // must have been an empty index
    minVal = maxVal = 0;
  }

  ScaleInfo scaleInfo = new ScaleInfo();
  scaleInfo.minVal = minVal;
  scaleInfo.maxVal = maxVal;
  context.put(ScaleFloatFunction.this, scaleInfo);
  return scaleInfo;
}
项目:search    文件:TestGrouping.java   
private void verifyShards(int[] docStarts, TopGroups<BytesRef> topGroups) {
  for(GroupDocs<?> group : topGroups.groups) {
    for(int hitIDX=0;hitIDX<group.scoreDocs.length;hitIDX++) {
      final ScoreDoc sd = group.scoreDocs[hitIDX];
      assertEquals("doc=" + sd.doc + " wrong shard",
                   ReaderUtil.subIndex(sd.doc, docStarts),
                   sd.shardIndex);
    }
  }
}
项目:search    文件:RandomSortField.java   
/** 
 * Given a field name and an IndexReader, get a random hash seed.
 * Using dynamic fields, you can force the random order to change 
 */
private static int getSeed(String fieldName, AtomicReaderContext context) {
  final DirectoryReader top = (DirectoryReader) ReaderUtil.getTopLevelContext(context).reader();
  // calling getVersion() on a segment will currently give you a null pointer exception, so
  // we use the top-level reader.
  return fieldName.hashCode() + context.docBase + (int)top.getVersion();
}
项目:search    文件:TestIndexSearcher.java   
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
  SchemaField sf = sqr.getSchema().getField(field);
  ValueSource vs = sf.getType().getValueSource(sf, null);
  Map context = ValueSource.newContext(sqr.getSearcher());
  vs.createWeight(context, sqr.getSearcher());
  IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int idx = ReaderUtil.subIndex(doc, leaves);
  AtomicReaderContext leaf = leaves.get(idx);
  FunctionValues vals = vs.getValues(context, leaf);
  return vals.strVal(doc-leaf.docBase);
}
项目:neo4j-lucene5-index    文件:DocValuesCollector.java   
@Override
protected ScoreDoc fetchNextOrNull()
{
    if ( !iterator.hasNext() )
    {
        return null;
    }
    currentDoc = iterator.next();
    int subIndex = ReaderUtil.subIndex( currentDoc.doc, docStarts );
    LeafReaderContext context = contexts[subIndex];
    onNextDoc( currentDoc.doc - context.docBase, context );
    return currentDoc;
}
项目:NYBC    文件:PostingsHighlighter.java   
private Map<Integer,String> highlightField(String field, String contents[], BreakIterator bi, Term terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages) throws IOException {  
  Map<Integer,String> highlights = new HashMap<Integer,String>();

  // reuse in the real sense... for docs in same segment we just advance our old enum
  DocsAndPositionsEnum postings[] = null;
  TermsEnum termsEnum = null;
  int lastLeaf = -1;

  for (int i = 0; i < docids.length; i++) {
    String content = contents[i];
    if (content.length() == 0) {
      continue; // nothing to do
    }
    bi.setText(content);
    int doc = docids[i];
    int leaf = ReaderUtil.subIndex(doc, leaves);
    AtomicReaderContext subContext = leaves.get(leaf);
    AtomicReader r = subContext.reader();
    Terms t = r.terms(field);
    if (t == null) {
      continue; // nothing to do
    }
    if (leaf != lastLeaf) {
      termsEnum = t.iterator(null);
      postings = new DocsAndPositionsEnum[terms.length];
    }
    Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
    if (passages.length > 0) {
      // otherwise a null snippet
      highlights.put(doc, formatter.format(passages, content));
    }
    lastLeaf = leaf;
  }

  return highlights;
}
项目:NYBC    文件:JoinDocFreqValueSource.java   
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException
{
  final BinaryDocValues terms = cache.getTerms(readerContext.reader(), field, PackedInts.FAST);
  final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
  Terms t = MultiFields.getTerms(top, qfield);
  final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator(null);

  return new IntDocValues(this) {
    final BytesRef ref = new BytesRef();

    @Override
    public int intVal(int doc) 
    {
      try {
        terms.get(doc, ref);
        if (termsEnum.seekExact(ref, true)) {
          return termsEnum.docFreq();
        } else {
          return 0;
        }
      } 
      catch (IOException e) {
        throw new RuntimeException("caught exception in function "+description()+" : doc="+doc, e);
      }
    }
  };
}
项目:NYBC    文件:ScaleFloatFunction.java   
private ScaleInfo createScaleInfo(Map context, AtomicReaderContext readerContext) throws IOException {
  final List<AtomicReaderContext> leaves = ReaderUtil.getTopLevelContext(readerContext).leaves();

  float minVal = Float.POSITIVE_INFINITY;
  float maxVal = Float.NEGATIVE_INFINITY;

  for (AtomicReaderContext leaf : leaves) {
    int maxDoc = leaf.reader().maxDoc();
    FunctionValues vals =  source.getValues(context, leaf);
    for (int i=0; i<maxDoc; i++) {

    float val = vals.floatVal(i);
    if ((Float.floatToRawIntBits(val) & (0xff<<23)) == 0xff<<23) {
      // if the exponent in the float is all ones, then this is +Inf, -Inf or NaN
      // which don't make sense to factor into the scale function
      continue;
    }
    if (val < minVal) {
      minVal = val;
    }
    if (val > maxVal) {
      maxVal = val;
    }
  }
  }

  if (minVal == Float.POSITIVE_INFINITY) {
  // must have been an empty index
    minVal = maxVal = 0;
  }

  ScaleInfo scaleInfo = new ScaleInfo();
  scaleInfo.minVal = minVal;
  scaleInfo.maxVal = maxVal;
  context.put(this.source, scaleInfo);
  return scaleInfo;
}
项目:NYBC    文件:TestGrouping.java   
private void verifyShards(int[] docStarts, TopGroups<BytesRef> topGroups) {
  for(GroupDocs<?> group : topGroups.groups) {
    for(int hitIDX=0;hitIDX<group.scoreDocs.length;hitIDX++) {
      final ScoreDoc sd = group.scoreDocs[hitIDX];
      assertEquals("doc=" + sd.doc + " wrong shard",
                   ReaderUtil.subIndex(sd.doc, docStarts),
                   sd.shardIndex);
    }
  }
}