Java 类org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hadoop-EAR    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hadoop-on-lustre    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:RDFS    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hadoop-0.20    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:mapreduce-fork    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hortonworks-extension    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:hortonworks-extension    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:t4f-data    文件:SnapshotIndex.java   
public void test() throws Exception {

        Directory dir = null;

        IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
        SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);
        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41,
                AosAnalyser.NO_LIMIT_TOKEN_COUNT_SIMPLE_ANALYSER);
        conf.setIndexDeletionPolicy(snapshotter);
        IndexWriter writer = new IndexWriter(dir, conf);

        try {
            IndexCommit commit = snapshotter.snapshot("unique-id");
            Collection<String> fileNames = commit.getFileNames();
            /* <iterate over & copy files from fileNames> */
        }
        finally {
            snapshotter.release("unique-id");
        }
    }
项目:hadoop-gpu    文件:TestMixedDirectory.java   
public void testMixedDirectoryAndPolicy() throws IOException {
  Directory readDir = new RAMDirectory();
  updateIndex(readDir, 0, numDocsPerUpdate,
      new KeepOnlyLastCommitDeletionPolicy());

  verify(readDir, numDocsPerUpdate);

  IndexOutput out =
      readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2)
          + ".cfs");
  out.writeInt(0);
  out.close();

  Directory writeDir = new RAMDirectory();
  Directory mixedDir = new MixedDirectory(readDir, writeDir);
  updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate,
      new MixedDeletionPolicy());

  verify(readDir, numDocsPerUpdate);
  verify(mixedDir, 2 * numDocsPerUpdate);
}
项目:elasticsearch_my    文件:StoreTests.java   
public void testUserDataRead() throws IOException {
    final ShardId shardId = new ShardId("index", "_na_", 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
    IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
    SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    config.setIndexDeletionPolicy(deletionPolicy);
    IndexWriter writer = new IndexWriter(store.directory(), config);
    Document doc = new Document();
    doc.add(new TextField("id", "1", Field.Store.NO));
    writer.addDocument(doc);
    Map<String, String> commitData = new HashMap<>(2);
    String syncId = "a sync id";
    String translogId = "a translog id";
    commitData.put(Engine.SYNC_COMMIT_ID, syncId);
    commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogId);
    writer.setCommitData(commitData);
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata;
    metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());
    assertFalse(metadata.asMap().isEmpty());
    // do not check for correct files, we have enough tests for that above
    assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
    assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId));
    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
}
项目:linden    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param conf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir, Configuration conf)
    throws IOException {
  logger.info("Construct a shard writer");

  this.conf = conf;
  this.fs = fs;
  localFs = FileSystem.getLocal(conf);
  perm = new Path(shard.getDirectory());
  taxoPerm = new Path(shard.getDirectory() + ".taxonomy");
  String indexDir = tempDir + "/" + "index";
  String taxoDir = tempDir + "/" + "taxo";
  temp = new Path(indexDir);
  taxoTemp = new Path(taxoDir);

  if (localFs.exists(temp)) {
    File tempFile = new File(temp.getName());
    if (tempFile.exists()) {
      LindenReducer.deleteDir(tempFile);
    }
  }

  if (!fs.exists(perm)) {
    fs.mkdirs(perm);
  } else {
    moveToTrash(conf, perm);
    fs.mkdirs(perm);
  }

  if (!fs.exists(taxoPerm)) {
    fs.mkdirs(taxoPerm);
  } else {
    moveToTrash(conf, taxoPerm);
    fs.mkdirs(taxoPerm);
  }
  IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, null);
  config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
  writer = new IndexWriter(FSDirectory.open(new File(indexDir)), config);
  taxoWriter = new DirectoryTaxonomyWriter(FSDirectory.open(new File(taxoDir)));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:hadoop-EAR    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hadoop-EAR    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:search    文件:IndexRevisionTest.java   
@Test
public void testNoSnapshotDeletionPolicy() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
  conf.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    assertNotNull(new IndexRevision(writer));
    fail("should have failed when IndexDeletionPolicy is not Snapshot");
  } catch (IllegalArgumentException e) {
    // expected
  } finally {
    IOUtils.close(writer, dir);
  }
}
项目:MIaS    文件:Indexing.java   
/**
 * Indexes files located in given input path.
 * @param path Path to the documents directory. Can be a single file as well.
 * @param rootDir A path in the @path parameter which is a root directory for the document storage. It determines the relative path
 * the files will be index with.
 */
public void indexFiles(String path, String rootDir) {
    storage = rootDir;
    if (!storage.endsWith(File.separator)) {
        storage += File.separator;
    }
    final File docDir = new File(path);
    if (!docDir.exists() || !docDir.canRead()) {
        LOG.fatal("Document directory '{}' does not exist or is not readable, please check the path.",docDir.getAbsoluteFile());            
        System.exit(1);
    }
    try {
        startTime = System.currentTimeMillis();
        IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_45, analyzer);
        PayloadSimilarity ps = new PayloadSimilarity();
        ps.setDiscountOverlaps(false);
        config.setSimilarity(ps);
        config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
        try (IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), config))
        {
            LOG.info("Getting list of documents to index.");
            List<File> files = getDocs(docDir);
            countFiles(files);
            LOG.info("Number of documents to index is {}",count);
            indexDocsThreaded(files, writer);
        }
    } catch (IOException ex) {
        LOG.error(ex);
    }
}
项目:MIaS    文件:Indexing.java   
/**
     * Optimizes the index.
     */
    public void optimize() {        
        IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_31, analyzer);
        config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());  
        // TODO what do we measure here ? time of optimization or optimiziation
        // and index opening aswell
        startTime = System.currentTimeMillis();
        try(IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), config)){
//            writer.optimize();    
            LOG.info("Optimizing time: {} ms",System.currentTimeMillis()-startTime);
        } catch (IOException e) {
            LOG.error(e.getMessage());
        }
    }
项目:MIaS    文件:Indexing.java   
/**
 * Deletes files located in given path from the index
 *
 * @param path Path of the files to be deleted
 */
public void deleteFiles(String path) {
    final File docDir = new File(path);
    if (!docDir.exists() || !docDir.canRead()) {
        LOG.error("Document directory '{}' does not exist or is not readable, please check the path.", docDir.getAbsolutePath());
        System.exit(1);
    }
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_31, analyzer);
    config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    try(IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), config)) { 
        deleteDocs(writer, docDir);
    } catch (IOException ex) {
        System.out.println(ex.getMessage());
    }
}
项目:hadoop-on-lustre    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hadoop-on-lustre    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:RDFS    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:RDFS    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:hadoop-0.20    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hadoop-0.20    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:mapreduce-fork    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:mapreduce-fork    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:Maskana-Gestor-de-Conocimiento    文件:IndexRevisionTest.java   
@Test
public void testNoSnapshotDeletionPolicy() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
  conf.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    assertNotNull(new IndexRevision(writer));
    fail("should have failed when IndexDeletionPolicy is not Snapshot");
  } catch (IllegalArgumentException e) {
    // expected
  } finally {
    IOUtils.close(writer, dir);
  }
}
项目:hortonworks-extension    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hortonworks-extension    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:hortonworks-extension    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hortonworks-extension    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:hadoop-gpu    文件:IntermediateForm.java   
private IndexWriter createWriter() throws IOException {
  IndexWriter writer =
      new IndexWriter(dir, false, null,
          new KeepOnlyLastCommitDeletionPolicy());
  writer.setUseCompoundFile(false);

  if (iconf != null) {
    int maxFieldLength = iconf.getIndexMaxFieldLength();
    if (maxFieldLength > 0) {
      writer.setMaxFieldLength(maxFieldLength);
    }
  }

  return writer;
}
项目:hadoop-gpu    文件:ShardWriter.java   
/**
 * Constructor
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir,
    IndexUpdateConfiguration iconf) throws IOException {
  LOG.info("Construct a shard writer");

  this.fs = fs;
  localFs = FileSystem.getLocal(iconf.getConfiguration());
  perm = new Path(shard.getDirectory());
  temp = new Path(tempDir);

  long initGeneration = shard.getGeneration();
  if (!fs.exists(perm)) {
    assert (initGeneration < 0);
    fs.mkdirs(perm);
  } else {
    restoreGeneration(fs, perm, initGeneration);
  }
  dir =
      new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
          iconf.getConfiguration());

  // analyzer is null because we only use addIndexes, not addDocument
  writer =
      new IndexWriter(dir, false, null,
          initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
              : new MixedDeletionPolicy());
  setParameters(iconf);
}
项目:elasticsearch_my    文件:InternalEngineTests.java   
protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
    return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
}
项目:elasticsearch_my    文件:ShadowEngineTests.java   
protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
    return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
}