Java 类org.apache.lucene.search.BitsFilteredDocIdSet 实例源码

项目:Elasticsearch    文件:ParentQuery.java   
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
    // we forcefully apply live docs here so that deleted children don't give matching parents
    childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
    if (Lucene.isEmpty(childrenDocSet)) {
        return null;
    }
    final DocIdSetIterator childIterator = childrenDocSet.iterator();
    if (childIterator == null) {
        return null;
    }
    SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (bytesValues == null) {
        return null;
    }

    return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
项目:Elasticsearch    文件:ParentConstantScoreQuery.java   
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(childrenDocIdSet)) {
        return null;
    }

    SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (globalValues != null) {
        // we forcefully apply live docs here so that deleted children don't give matching parents
        childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
        if (innerIterator != null) {
            ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
                    innerIterator, parentOrds, globalValues
            );
            return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
        }
    }
    return null;
}
项目:Elasticsearch    文件:ChildrenQuery.java   
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet parentsSet = parentFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(parentsSet) || remaining == 0) {
        return null;
    }

    // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
    // count down (short circuit) logic will then work as expected.
    DocIdSetIterator parents = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();

    if (parents != null) {
        SortedDocValues bytesValues = collector.globalIfd.load(context).getOrdinalsValues(parentType);
        if (bytesValues == null) {
            return null;
        }

        if (minChildren > 0 || maxChildren != 0 || scoreType == ScoreType.NONE) {
            switch (scoreType) {
            case NONE:
                DocIdSetIterator parentIdIterator = new CountParentOrdIterator(this, parents, collector, bytesValues,
                        minChildren, maxChildren);
                return ConstantScorer.create(parentIdIterator, this, queryWeight);
            case AVG:
                return new AvgParentCountScorer(this, parents, collector, bytesValues, minChildren, maxChildren);
            default:
                return new ParentCountScorer(this, parents, collector, bytesValues, minChildren, maxChildren);
            }
        }
        switch (scoreType) {
        case AVG:
            return new AvgParentScorer(this, parents, collector, bytesValues);
        default:
            return new ParentScorer(this, parents, collector, bytesValues);
        }
    }
    return null;
}
项目:Elasticsearch    文件:ChildrenConstantScoreQuery.java   
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    if (remaining == 0) {
        return null;
    }

    if (shortCircuitFilter != null) {
        DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null);
        if (!Lucene.isEmpty(docIdSet)) {
            DocIdSetIterator iterator = docIdSet.iterator();
            if (iterator != null) {
                return ConstantScorer.create(iterator, this, queryWeight);
            }
        }
        return null;
    }

    DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null);
    if (!Lucene.isEmpty(parentDocIdSet)) {
        // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
        // count down (short circuit) logic will then work as expected.
        parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = parentDocIdSet.iterator();
        if (innerIterator != null) {
            LongBitSet parentOrds = collector.parentOrds;
            SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
            if (globalValues != null) {
                DocIdSetIterator parentIdIterator = new ParentOrdIterator(innerIterator, parentOrds, globalValues, this);
                return ConstantScorer.create(parentIdIterator, this, queryWeight);
            }
        }
    }
    return null;
}
项目:search    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:search    文件:DisjointSpatialFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
  Bits docsWithField;
  if (field == null) {
    docsWithField = null;//all docs
  } else {
    //NOTE By using the FieldCache we re-use a cache
    // which is nice but loading it in this way might be slower than say using an
    // intersects filter against the world bounds. So do we add a method to the
    // strategy, perhaps?  But the strategy can't cache it.
    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);

    final int maxDoc = context.reader().maxDoc();
    if (docsWithField.length() != maxDoc )
      throw new IllegalStateException("Bits length should be maxDoc ("+maxDoc+") but wasn't: "+docsWithField);

    if (docsWithField instanceof Bits.MatchNoBits) {
      return null;//match nothing
    } else if (docsWithField instanceof Bits.MatchAllBits) {
      docsWithField = null;//all docs
    }
  }

  //not so much a chain but a way to conveniently invert the Filter
  DocIdSet docIdSet = new ChainedFilter(new Filter[]{intersectsFilter}, ChainedFilter.ANDNOT).getDocIdSet(context, acceptDocs);
  return BitsFilteredDocIdSet.wrap(docIdSet, docsWithField);
}
项目:search    文件:ValueSourceRangeFilter.java   
@Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
   return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }
   }, acceptDocs);
}
项目:NYBC    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:NYBC    文件:ValueSourceRangeFilter.java   
@Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
   return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }
   }, acceptDocs);
}
项目:incubator-blur    文件:FilterCache.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  Object key = reader.getCoreCacheKey();
  DocIdSet docIdSet = _cache.get(key);
  if (docIdSet != null) {
    _hits.incrementAndGet();
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
  // This will only allow a single instance be created per reader per filter
  Object lock = getLock(key);
  synchronized (lock) {
    SegmentReader segmentReader = getSegmentReader(reader);
    if (segmentReader == null) {
      LOG.warn("Could not find SegmentReader from [{0}]", reader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    Directory directory = getDirectory(segmentReader);
    if (directory == null) {
      LOG.warn("Could not find Directory from [{0}]", segmentReader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    _misses.incrementAndGet();
    String segmentName = segmentReader.getSegmentName();
    docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
    _cache.put(key, docIdSet);
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
}
项目:search-core    文件:ValueSourceRangeFilter.java   
@Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
   return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }
   }, acceptDocs);
}
项目:read-open-source-code    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:read-open-source-code    文件:DisjointSpatialFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
  Bits docsWithField;
  if (field == null) {
    docsWithField = null;//all docs
  } else {
    //NOTE By using the FieldCache we re-use a cache
    // which is nice but loading it in this way might be slower than say using an
    // intersects filter against the world bounds. So do we add a method to the
    // strategy, perhaps?  But the strategy can't cache it.
    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);

    final int maxDoc = context.reader().maxDoc();
    if (docsWithField.length() != maxDoc )
      throw new IllegalStateException("Bits length should be maxDoc ("+maxDoc+") but wasn't: "+docsWithField);

    if (docsWithField instanceof Bits.MatchNoBits) {
      return null;//match nothing
    } else if (docsWithField instanceof Bits.MatchAllBits) {
      docsWithField = null;//all docs
    }
  }

  //not so much a chain but a way to conveniently invert the Filter
  DocIdSet docIdSet = new ChainedFilter(new Filter[]{intersectsFilter}, ChainedFilter.ANDNOT).getDocIdSet(context, acceptDocs);
  return BitsFilteredDocIdSet.wrap(docIdSet, docsWithField);
}
项目:read-open-source-code    文件:ValueSourceRangeFilter.java   
@Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
   return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }
   }, acceptDocs);
}
项目:read-open-source-code    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:read-open-source-code    文件:DisjointSpatialFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
  Bits docsWithField;
  if (field == null) {
    docsWithField = null;//all docs
  } else {
    //NOTE By using the FieldCache we re-use a cache
    // which is nice but loading it in this way might be slower than say using an
    // intersects filter against the world bounds. So do we add a method to the
    // strategy, perhaps?  But the strategy can't cache it.
    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);

    final int maxDoc = context.reader().maxDoc();
    if (docsWithField.length() != maxDoc )
      throw new IllegalStateException("Bits length should be maxDoc ("+maxDoc+") but wasn't: "+docsWithField);

    if (docsWithField instanceof Bits.MatchNoBits) {
      return null;//match nothing
    } else if (docsWithField instanceof Bits.MatchAllBits) {
      docsWithField = null;//all docs
    }
  }

  //not so much a chain but a way to conveniently invert the Filter
  DocIdSet docIdSet = new ChainedFilter(new Filter[]{intersectsFilter}, ChainedFilter.ANDNOT).getDocIdSet(context, acceptDocs);
  return BitsFilteredDocIdSet.wrap(docIdSet, docsWithField);
}
项目:read-open-source-code    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:read-open-source-code    文件:DisjointSpatialFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
  Bits docsWithField;
  if (field == null) {
    docsWithField = null;//all docs
  } else {
    //NOTE By using the FieldCache we re-use a cache
    // which is nice but loading it in this way might be slower than say using an
    // intersects filter against the world bounds. So do we add a method to the
    // strategy, perhaps?  But the strategy can't cache it.
    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);

    final int maxDoc = context.reader().maxDoc();
    if (docsWithField.length() != maxDoc )
      throw new IllegalStateException("Bits length should be maxDoc ("+maxDoc+") but wasn't: "+docsWithField);

    if (docsWithField instanceof Bits.MatchNoBits) {
      return null;//match nothing
    } else if (docsWithField instanceof Bits.MatchAllBits) {
      docsWithField = null;//all docs
    }
  }

  //not so much a chain but a way to conveniently invert the Filter
  DocIdSet docIdSet = new ChainedFilter(new Filter[]{intersectsFilter}, ChainedFilter.ANDNOT).getDocIdSet(context, acceptDocs);
  return BitsFilteredDocIdSet.wrap(docIdSet, docsWithField);
}
项目:read-open-source-code    文件:ValueSourceRangeFilter.java   
@Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
   return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }
   }, acceptDocs);
}
项目:Maskana-Gestor-de-Conocimiento    文件:ChainedFilter.java   
/**
 * {@link Filter#getDocIdSet}.
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  int[] index = new int[1]; // use array as reference to modifiable int;
  index[0] = 0;             // an object attribute would not be thread safe.
  if (logic != -1) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logic, index), acceptDocs);
  } else if (logicArray != null) {
    return BitsFilteredDocIdSet.wrap(getDocIdSet(context, logicArray, index), acceptDocs);
  }

  return BitsFilteredDocIdSet.wrap(getDocIdSet(context, DEFAULT, index), acceptDocs);
}
项目:Maskana-Gestor-de-Conocimiento    文件:DisjointSpatialFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
  Bits docsWithField;
  if (field == null) {
    docsWithField = null;//all docs
  } else {
    //NOTE By using the FieldCache we re-use a cache
    // which is nice but loading it in this way might be slower than say using an
    // intersects filter against the world bounds. So do we add a method to the
    // strategy, perhaps?  But the strategy can't cache it.
    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);

    final int maxDoc = context.reader().maxDoc();
    if (docsWithField.length() != maxDoc )
      throw new IllegalStateException("Bits length should be maxDoc ("+maxDoc+") but wasn't: "+docsWithField);

    if (docsWithField instanceof Bits.MatchNoBits) {
      return null;//match nothing
    } else if (docsWithField instanceof Bits.MatchAllBits) {
      docsWithField = null;//all docs
    }
  }

  //not so much a chain but a way to conveniently invert the Filter
  DocIdSet docIdSet = new ChainedFilter(new Filter[]{intersectsFilter}, ChainedFilter.ANDNOT).getDocIdSet(context, acceptDocs);
  return BitsFilteredDocIdSet.wrap(docIdSet, docsWithField);
}
项目:linden    文件:NotNullFieldFilter.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  DocIdSet docIdSet = LindenFieldCacheImpl.DEFAULT.getNotNullFieldDocIdSet(context.reader(), fieldName);
  return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
项目:Krill    文件:KrillCollection.java   
/**
 * Return the {@link DocIdSet} representing the documents of the
 * virtual collection to be used in searches.
 * This will respect deleted documents.
 * 
 * @param atomic
 *            The {@link LeafReaderContext} to search in.
 * @param accepted
 *            {@link Bits} vector of accepted documents.
 * @throws IOException
 */
public DocIdSet getDocIdSet (LeafReaderContext atomic, Bits acceptDocs)
        throws IOException {

    int maxDoc = atomic.reader().maxDoc();
    FixedBitSet bitset = new FixedBitSet(maxDoc);

    Filter filter;
    if (this.cbi == null || (filter = this.cbi.toFilter()) == null) {
        if (acceptDocs == null)
            return null;

        bitset.set(0, maxDoc);
    }
    else {

        // Init vector
        DocIdSet docids = filter.getDocIdSet(atomic, null);
        DocIdSetIterator filterIter = (docids == null) ? null
                : docids.iterator();

        if (filterIter == null) {
            if (!this.cbi.isNegative())
                return null;

            bitset.set(0, maxDoc);
        }
        else {
            // Or bit set
            bitset.or(filterIter);

            // Revert for negation
            if (this.cbi.isNegative())
                bitset.flip(0, maxDoc);
        };
    };

    if (DEBUG) {
        log.debug("Bit set is  {}", _bits(bitset));
        log.debug("Livedocs is {}", _bits(acceptDocs));
    };

    // Remove deleted docs
    return (DocIdSet) BitsFilteredDocIdSet
            .wrap((DocIdSet) new BitDocIdSet(bitset), acceptDocs);
}
项目:search    文件:DocSetBase.java   
@Override
public Filter getTopFilter() {
  final FixedBitSet bs = getBits();

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public long cost() {
              return bs.length();
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public long ramBytesUsed() {
          return bs.ramBytesUsed();
        }

        @Override
        public Bits bits() {
          // sparse filters should not use random access
          return null;
        }

      }, acceptDocs2);
    }
  };
}
项目:community-edition-old    文件:BitsFilter.java   
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits bits) {
    return BitsFilteredDocIdSet.wrap(bitSets.get(context.ord), bits);
}
项目:NYBC    文件:BitDocSet.java   
@Override
public Filter getTopFilter() {
  final OpenBitSet bs = bits;
  // TODO: if cardinality isn't cached, do a quick measure of sparseness
  // and return null from bits() if too sparse.

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public Bits bits() {
          return new Bits() {
            @Override
            public boolean get(int index) {
              return bs.fastGet(index + base);
            }

            @Override
            public int length() {
              return maxDoc;
            }
          };
        }

      }, acceptDocs2);
    }
  };
}
项目:NYBC    文件:DocSetBase.java   
@Override
public Filter getTopFilter() {
  final OpenBitSet bs = getBits();

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public Bits bits() {
          // sparse filters should not use random access
          return null;
        }

      }, acceptDocs2);
    }
  };
}
项目:search-core    文件:BitDocSet.java   
@Override
public Filter getTopFilter() {
  final OpenBitSet bs = bits;
  // TODO: if cardinality isn't cached, do a quick measure of sparseness
  // and return null from bits() if too sparse.

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public Bits bits() {
          return new Bits() {
            @Override
            public boolean get(int index) {
              return bs.fastGet(index + base);
            }

            @Override
            public int length() {
              return maxDoc;
            }
          };
        }

      }, acceptDocs2);
    }
  };
}
项目:search-core    文件:DocSetBase.java   
@Override
public Filter getTopFilter() {
  final OpenBitSet bs = getBits();

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public Bits bits() {
          // sparse filters should not use random access
          return null;
        }

      }, acceptDocs2);
    }
  };
}
项目:read-open-source-code    文件:DocSetBase.java   
@Override
public Filter getTopFilter() {
  final FixedBitSet bs = getBits();

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public long cost() {
              return bs.length();
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public Bits bits() {
          // sparse filters should not use random access
          return null;
        }

      }, acceptDocs2);
    }
  };
}
项目:read-open-source-code    文件:DocSetBase.java   
@Override
public Filter getTopFilter() {
  final FixedBitSet bs = getBits();

  return new Filter() {
    @Override
    public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) {
      AtomicReader reader = context.reader();
      // all Solr DocSets that are used as filters only include live docs
      final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);

      if (context.isTopLevel) {
        return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
      }

      final int base = context.docBase;
      final int maxDoc = reader.maxDoc();
      final int max = base + maxDoc;   // one past the max doc in this segment.

      return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return new DocIdSetIterator() {
            int pos=base-1;
            int adjustedDoc=-1;

            @Override
            public int docID() {
              return adjustedDoc;
            }

            @Override
            public int nextDoc() {
              pos = bs.nextSetBit(pos+1);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public int advance(int target) {
              if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
              pos = bs.nextSetBit(target+base);
              return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
            }

            @Override
            public long cost() {
              return bs.length();
            }
          };
        }

        @Override
        public boolean isCacheable() {
          return true;
        }

        @Override
        public long ramBytesUsed() {
          return bs.ramBytesUsed();
        }

        @Override
        public Bits bits() {
          // sparse filters should not use random access
          return null;
        }

      }, acceptDocs2);
    }
  };
}