Java 类org.apache.lucene.util.ByteBlockPool 实例源码

项目:lams    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:siren-join    文件:BytesRefTermsSet.java   
private void readFromBytes(BytesRef bytes) {
  // Read pruned flag
  this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);

  // Read size fo the set
  int size = Bytes.readInt(bytes);

  // Read terms
  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  BytesRef reusable = new BytesRef();
  for (int i = 0; i < size; i++) {
    Bytes.readBytesRef(bytes, reusable);
    set.add(reusable);
  }
}
项目:search    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:NYBC    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:NYBC    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:read-open-source-code    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:read-open-source-code    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:read-open-source-code    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:read-open-source-code    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:read-open-source-code    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:Maskana-Gestor-de-Conocimiento    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:Maskana-Gestor-de-Conocimiento    文件:ByteSliceReader.java   
public void init(ByteBlockPool pool, int startIndex, int endIndex) {

    assert endIndex-startIndex >= 0;
    assert startIndex >= 0;
    assert endIndex >= 0;

    this.pool = pool;
    this.endIndex = endIndex;

    level = 0;
    bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
    bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
    buffer = pool.buffers[bufferUpto];
    upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

    final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

    if (startIndex+firstSize >= endIndex) {
      // There is only this one slice to read
      limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
    } else
      limit = upto+firstSize-4;
  }
项目:lams    文件:TermsHash.java   
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  }
}
项目:lams    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:TermsHashPerField.java   
public void add(int textStart) throws IOException {
  int termID = bytesHash.addByPoolOffset(textStart);
  if (termID >= 0) {      // New posting
    // First time we are seeing this token since we last
    // flushed the hash.
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    newTerm(termID);

  } else {
    termID = (-termID)-1;
    int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    addTerm(termID);
  }
}
项目:lams    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:siren-join    文件:BytesRefTermsSet.java   
@Override
public void readFrom(StreamInput in) throws IOException {
  this.setIsPruned(in.readBoolean());
  int size = in.readInt();

  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  for (long i = 0; i < size; i++) {
    set.add(in.readBytesRef());
  }
}
项目:search    文件:MemoryIndexTest.java   
private Allocator randomByteBlockAllocator() {
  if (random().nextBoolean()) {
    return new RecyclingByteBlockAllocator();
  } else {
    return new ByteBlockPool.DirectAllocator();
  }
}
项目:search    文件:TermsHash.java   
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  }
}
项目:search    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:TermsHashPerField.java   
public void add(int textStart) throws IOException {
  int termID = bytesHash.addByPoolOffset(textStart);
  if (termID >= 0) {      // New posting
    // First time we are seeing this token since we last
    // flushed the hash.
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    newTerm(termID);

  } else {
    termID = (-termID)-1;
    int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    addTerm(termID);
  }
}
项目:search    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:NYBC    文件:MemoryIndexTest.java   
private Allocator randomByteBlockAllocator() {
  if (random().nextBoolean()) {
    return new RecyclingByteBlockAllocator();
  } else {
    return new ByteBlockPool.DirectAllocator();
  }
}
项目:NYBC    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:NYBC    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  pendingCounts = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:TermsHashPerField.java   
public void add(int textStart) throws IOException {
  int termID = bytesHash.addByPoolOffset(textStart);
  if (termID >= 0) {      // New posting
    // First time we are seeing this token since we last
    // flushed the hash.
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE)
      intPool.nextBuffer();

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }
}
项目:read-open-source-code    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:read-open-source-code    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
  pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
public void add(int textStart) throws IOException {
  int termID = bytesHash.addByPoolOffset(textStart);
  if (termID >= 0) {      // New posting
    // First time we are seeing this token since we last
    // flushed the hash.
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE)
      intPool.nextBuffer();

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }
}
项目:read-open-source-code    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:read-open-source-code    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
  pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
public void add(int textStart) throws IOException {
  int termID = bytesHash.addByPoolOffset(textStart);
  if (termID >= 0) {      // New posting
    // First time we are seeing this token since we last
    // flushed the hash.
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE)
      intPool.nextBuffer();

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }
}
项目:read-open-source-code    文件:TermsHash.java   
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  }
}
项目:read-open-source-code    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}