Java 类org.apache.lucene.util.BytesRefHash.MaxBytesLengthExceededException 实例源码

项目:search    文件:TestBytesRefHash.java   
@Test(expected = MaxBytesLengthExceededException.class)
public void testLargeValue() {
  int[] sizes = new int[] { random().nextInt(5),
      ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random().nextInt(31),
      ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random().nextInt(37) };
  BytesRef ref = new BytesRef();
  for (int i = 0; i < sizes.length; i++) {
    ref.bytes = new byte[sizes[i]];
    ref.offset = 0;
    ref.length = sizes[i];
    try {
      assertEquals(i, hash.add(ref));
    } catch (MaxBytesLengthExceededException e) {
      if (i < sizes.length - 1)
        fail("unexpected exception at size: " + sizes[i]);
      throw e;
    }
  }
}
项目:NYBC    文件:TestBytesRefHash.java   
@Test(expected = MaxBytesLengthExceededException.class)
public void testLargeValue() {
  int[] sizes = new int[] { random().nextInt(5),
      ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random().nextInt(31),
      ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random().nextInt(37) };
  BytesRef ref = new BytesRef();
  for (int i = 0; i < sizes.length; i++) {
    ref.bytes = new byte[sizes[i]];
    ref.offset = 0;
    ref.length = sizes[i];
    try {
      assertEquals(i, hash.add(ref));
    } catch (MaxBytesLengthExceededException e) {
      if (i < sizes.length - 1)
        fail("unexpected exception at size: " + sizes[i]);
      throw e;
    }
  }
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestBytesRefHash.java   
@Test(expected = MaxBytesLengthExceededException.class)
public void testLargeValue() {
  int[] sizes = new int[] { random().nextInt(5),
      ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random().nextInt(31),
      ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random().nextInt(37) };
  BytesRef ref = new BytesRef();
  for (int i = 0; i < sizes.length; i++) {
    ref.bytes = new byte[sizes[i]];
    ref.offset = 0;
    ref.length = sizes[i];
    try {
      assertEquals(i, hash.add(ref));
    } catch (MaxBytesLengthExceededException e) {
      if (i < sizes.length - 1)
        fail("unexpected exception at size: " + sizes[i]);
      throw e;
    }
  }
}
项目:NYBC    文件:TermsHashPerField.java   
@Override
void add() throws IOException {

  // We are first in the chain so we must "intern" the
  // term text into textStart address
  // Get the text & hash of this term.
  int termID;
  try {
    termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
  } catch (MaxBytesLengthExceededException e) {
    // Not enough room in current block
    // Just skip this term, to remain as robust as
    // possible during indexing.  A TokenFilter
    // can be inserted into the analyzer chain if
    // other behavior is wanted (pruning the term
    // to a prefix, throwing an exception, etc).
    if (docState.maxTermPrefix == null) {
      final int saved = termBytesRef.length;
      try {
        termBytesRef.length = Math.min(30, DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8);
        docState.maxTermPrefix = termBytesRef.toString();
      } finally {
        termBytesRef.length = saved;
      }
    }
    consumer.skippingLongTerm();
    return;
  }
  if (termID >= 0) {// New posting
    bytesHash.byteStart(termID);
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    final int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }

  if (doNextCall)
    nextPerField.add(postingsArray.textStarts[termID]);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
@Override
void add() throws IOException {

  // We are first in the chain so we must "intern" the
  // term text into textStart address
  // Get the text & hash of this term.
  int termID;
  try {
    termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
  } catch (MaxBytesLengthExceededException e) {
    // Not enough room in current block
    // Just skip this term, to remain as robust as
    // possible during indexing.  A TokenFilter
    // can be inserted into the analyzer chain if
    // other behavior is wanted (pruning the term
    // to a prefix, throwing an exception, etc).
    if (docState.maxTermPrefix == null) {
      final int saved = termBytesRef.length;
      try {
        termBytesRef.length = Math.min(30, DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8);
        docState.maxTermPrefix = termBytesRef.toString();
      } finally {
        termBytesRef.length = saved;
      }
    }
    consumer.skippingLongTerm();
    return;
  }
  if (termID >= 0) {// New posting
    bytesHash.byteStart(termID);
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    final int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }

  if (doNextCall)
    nextPerField.add(postingsArray.textStarts[termID]);
}
项目:read-open-source-code    文件:TermsHashPerField.java   
@Override
void add() throws IOException {

  // We are first in the chain so we must "intern" the
  // term text into textStart address
  // Get the text & hash of this term.
  int termID;
  try {
    termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
  } catch (MaxBytesLengthExceededException e) {
    // Not enough room in current block
    // Just skip this term, to remain as robust as
    // possible during indexing.  A TokenFilter
    // can be inserted into the analyzer chain if
    // other behavior is wanted (pruning the term
    // to a prefix, throwing an exception, etc).
    if (docState.maxTermPrefix == null) {
      final int saved = termBytesRef.length;
      try {
        termBytesRef.length = Math.min(30, DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8);
        docState.maxTermPrefix = termBytesRef.toString();
      } finally {
        termBytesRef.length = saved;
      }
    }
    consumer.skippingLongTerm();
    return;
  }
  if (termID >= 0) {// New posting
    bytesHash.byteStart(termID);
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    final int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }

  if (doNextCall)
    nextPerField.add(postingsArray.textStarts[termID]);
}
项目:Maskana-Gestor-de-Conocimiento    文件:TermsHashPerField.java   
@Override
void add() throws IOException {

  // We are first in the chain so we must "intern" the
  // term text into textStart address
  // Get the text & hash of this term.
  int termID;
  try {
    termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
  } catch (MaxBytesLengthExceededException e) {
    // Not enough room in current block
    // Just skip this term, to remain as robust as
    // possible during indexing.  A TokenFilter
    // can be inserted into the analyzer chain if
    // other behavior is wanted (pruning the term
    // to a prefix, throwing an exception, etc).
    if (docState.maxTermPrefix == null) {
      final int saved = termBytesRef.length;
      try {
        termBytesRef.length = Math.min(30, DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8);
        docState.maxTermPrefix = termBytesRef.toString();
      } finally {
        termBytesRef.length = saved;
      }
    }
    consumer.skippingLongTerm();
    return;
  }
  if (termID >= 0) {// New posting
    bytesHash.byteStart(termID);
    // Init stream slices
    if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
      intPool.nextBuffer();
    }

    if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
      bytePool.nextBuffer();
    }

    intUptos = intPool.buffer;
    intUptoStart = intPool.intUpto;
    intPool.intUpto += streamCount;

    postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;

    for(int i=0;i<streamCount;i++) {
      final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
      intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
    }
    postingsArray.byteStarts[termID] = intUptos[intUptoStart];

    consumer.newTerm(termID);

  } else {
    termID = (-termID)-1;
    final int intStart = postingsArray.intStarts[termID];
    intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
    intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
    consumer.addTerm(termID);
  }

  if (doNextCall)
    nextPerField.add(postingsArray.textStarts[termID]);
}