Java 类org.apache.lucene.util.BytesRefHash 实例源码

项目:lams    文件:TermsHashPerField.java   
/** streamCount: how many streams this field stores per term.
 * E.g. doc(+freq) is 1 stream, prox+offset is a second. */

public TermsHashPerField(int streamCount, FieldInvertState fieldState, TermsHash termsHash, TermsHashPerField nextPerField, FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  this.fieldState = fieldState;
  this.streamCount = streamCount;
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  this.nextPerField = nextPerField;
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
项目:siren-join    文件:BytesRefTermsSet.java   
private void readFromBytes(BytesRef bytes) {
  // Read pruned flag
  this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);

  // Read size fo the set
  int size = Bytes.readInt(bytes);

  // Read terms
  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  BytesRef reusable = new BytesRef();
  for (int i = 0; i < size; i++) {
    Bytes.readBytesRef(bytes, reusable);
    set.add(reusable);
  }
}
项目:search    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:search    文件:TermsHashPerField.java   
/** streamCount: how many streams this field stores per term.
 * E.g. doc(+freq) is 1 stream, prox+offset is a second. */

public TermsHashPerField(int streamCount, FieldInvertState fieldState, TermsHash termsHash, TermsHashPerField nextPerField, FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  this.fieldState = fieldState;
  this.streamCount = streamCount;
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  this.nextPerField = nextPerField;
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
项目:NYBC    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:NYBC    文件:TermsHashPerField.java   
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  fieldState = docInverterPerField.fieldState;
  this.consumer = termsHash.consumer.addField(this, fieldInfo);
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
  streamCount = consumer.getStreamCount();
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  if (nextTermsHash != null)
    nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
  else
    nextPerField = null;
}
项目:read-open-source-code    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  fieldState = docInverterPerField.fieldState;
  this.consumer = termsHash.consumer.addField(this, fieldInfo);
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
  streamCount = consumer.getStreamCount();
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  if (nextTermsHash != null)
    nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
  else
    nextPerField = null;
}
项目:read-open-source-code    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  fieldState = docInverterPerField.fieldState;
  this.consumer = termsHash.consumer.addField(this, fieldInfo);
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
  streamCount = consumer.getStreamCount();
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  if (nextTermsHash != null)
    nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
  else
    nextPerField = null;
}
项目:read-open-source-code    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
/** streamCount: how many streams this field stores per term.
 * E.g. doc(+freq) is 1 stream, prox+offset is a second. */

public TermsHashPerField(int streamCount, FieldInvertState fieldState, TermsHash termsHash, TermsHashPerField nextPerField, FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  this.fieldState = fieldState;
  this.streamCount = streamCount;
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  this.nextPerField = nextPerField;
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
项目:Maskana-Gestor-de-Conocimiento    文件:MemoryIndex.java   
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
    int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
  int mid = 0;
  while (low <= high) {
    mid = (low + high) >>> 1;
    hash.get(ords[mid], bytesRef);
    final int cmp = comparator.compare(bytesRef, b);
    if (cmp < 0) {
      low = mid + 1;
    } else if (cmp > 0) {
      high = mid - 1;
    } else {
      return mid;
    }
  }
  assert comparator.compare(bytesRef, b) != 0;
  return -(low + 1);
}
项目:Maskana-Gestor-de-Conocimiento    文件:TermsHashPerField.java   
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
  intPool = termsHash.intPool;
  bytePool = termsHash.bytePool;
  termBytePool = termsHash.termBytePool;
  docState = termsHash.docState;
  this.termsHash = termsHash;
  bytesUsed = termsHash.bytesUsed;
  fieldState = docInverterPerField.fieldState;
  this.consumer = termsHash.consumer.addField(this, fieldInfo);
  PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
  bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
  streamCount = consumer.getStreamCount();
  numPostingInt = 2*streamCount;
  this.fieldInfo = fieldInfo;
  if (nextTermsHash != null)
    nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
  else
    nextPerField = null;
}
项目:lams    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:siren-join    文件:BytesRefTermsSet.java   
@Override
protected void addAll(TermsSet terms) {
  if (!(terms instanceof BytesRefTermsSet)) {
    throw new UnsupportedOperationException("Invalid type: BytesRefTermsSet expected.");
  }

  BytesRefHash input = ((BytesRefTermsSet) terms).set;
  BytesRef reusable = new BytesRef();
  for (int i = 0; i < input.size(); i++) {
    input.get(i, reusable);
    set.add(reusable);
  }
}
项目:siren-join    文件:BytesRefTermsSet.java   
@Override
public void readFrom(StreamInput in) throws IOException {
  this.setIsPruned(in.readBoolean());
  int size = in.readInt();

  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  for (long i = 0; i < size; i++) {
    set.add(in.readBytesRef());
  }
}
项目:search    文件:MemoryIndex.java   
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, int lastOffset, long sumTotalTermFreq) {
  this.terms = terms;
  this.sliceArray = sliceArray; 
  this.numTokens = numTokens;
  this.numOverlapTokens = numOverlapTokens;
  this.boost = boost;
  this.sumTotalTermFreq = sumTotalTermFreq;
  this.lastPosition = lastPosition;
  this.lastOffset = lastOffset;
}
项目:search    文件:TermsQuery.java   
/**
 * @param field The field that should contain terms that are specified in the previous parameter
 * @param terms The terms that matching documents should have. The terms must be sorted by natural order.
 */
TermsQuery(String field, Query fromQuery, BytesRefHash terms) {
  super(field);
  this.fromQuery = fromQuery;
  this.terms = terms;
  ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
}
项目:search    文件:TermsQuery.java   
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms, int[] ords) {
  super(tenum);
  this.terms = terms;
  this.ords = ords;
  comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
  lastElement = terms.size() - 1;
  lastTerm = terms.get(ords[lastElement], new BytesRef());
  seekTerm = terms.get(ords[upto], spare);
}
项目:search    文件:TermsIncludingScoreQuery.java   
TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, Query originalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
  this.unwrittenOriginalQuery = originalQuery;
}
项目:search    文件:TermsIncludingScoreQuery.java   
private TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, int[] ords, Query originalQuery, Query unwrittenOriginalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = ords;
  this.unwrittenOriginalQuery = unwrittenOriginalQuery;
}
项目:search    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:MemoryIndex.java   
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, long sumTotalTermFreq) {
  this.terms = terms;
  this.sliceArray = sliceArray; 
  this.numTokens = numTokens;
  this.numOverlapTokens = numOverlapTokens;
  this.boost = boost;
  this.sumTotalTermFreq = sumTotalTermFreq;
  this.lastPosition = lastPosition;
}
项目:NYBC    文件:TermsQuery.java   
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms) {
  super(tenum);
  this.terms = terms;

  lastElement = terms.size() - 1;
  ords = terms.sort(comparator = tenum.getComparator());
  lastTerm = terms.get(ords[lastElement], new BytesRef());
  seekTerm = terms.get(ords[upto], spare);
}
项目:NYBC    文件:TermsIncludingScoreQuery.java   
TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, Query originalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
  this.unwrittenOriginalQuery = originalQuery;
}
项目:NYBC    文件:TermsIncludingScoreQuery.java   
private TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, int[] ords, Query originalQuery, Query unwrittenOriginalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = ords;
  this.unwrittenOriginalQuery = unwrittenOriginalQuery;
}
项目:NYBC    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  pendingCounts = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:TermsQuery.java   
/**
 * @param field The field that should contain terms that are specified in the previous parameter
 * @param terms The terms that matching documents should have. The terms must be sorted by natural order.
 */
TermsQuery(String field, Query fromQuery, BytesRefHash terms) {
  super(field);
  this.fromQuery = fromQuery;
  this.terms = terms;
  ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
}
项目:read-open-source-code    文件:TermsQuery.java   
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms, int[] ords) {
  super(tenum);
  this.terms = terms;
  this.ords = ords;
  comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
  lastElement = terms.size() - 1;
  lastTerm = terms.get(ords[lastElement], new BytesRef());
  seekTerm = terms.get(ords[upto], spare);
}
项目:read-open-source-code    文件:TermsIncludingScoreQuery.java   
TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, Query originalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
  this.unwrittenOriginalQuery = originalQuery;
}
项目:read-open-source-code    文件:TermsIncludingScoreQuery.java   
private TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, int[] ords, Query originalQuery, Query unwrittenOriginalQuery) {
  this.field = field;
  this.multipleValuesPerDocument = multipleValuesPerDocument;
  this.terms = terms;
  this.scores = scores;
  this.originalQuery = originalQuery;
  this.ords = ords;
  this.unwrittenOriginalQuery = unwrittenOriginalQuery;
}
项目:read-open-source-code    文件:MemoryIndex.java   
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, int lastOffset, long sumTotalTermFreq) {
  this.terms = terms;
  this.sliceArray = sliceArray; 
  this.numTokens = numTokens;
  this.numOverlapTokens = numOverlapTokens;
  this.boost = boost;
  this.sumTotalTermFreq = sumTotalTermFreq;
  this.lastPosition = lastPosition;
  this.lastOffset = lastOffset;
}
项目:read-open-source-code    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
  pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:TermsQuery.java   
/**
 * @param field The field that should contain terms that are specified in the previous parameter
 * @param terms The terms that matching documents should have. The terms must be sorted by natural order.
 */
TermsQuery(String field, Query fromQuery, BytesRefHash terms) {
  super(field);
  this.fromQuery = fromQuery;
  this.terms = terms;
  ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
}
项目:read-open-source-code    文件:TermsQuery.java   
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms, int[] ords) {
  super(tenum);
  this.terms = terms;
  this.ords = ords;
  comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
  lastElement = terms.size() - 1;
  lastTerm = terms.get(ords[lastElement], new BytesRef());
  seekTerm = terms.get(ords[upto], spare);
}