private FileChannel openReader(long generationId) throws IOException { ensureOpen(); if (readChannels.containsKey(generationId)) { return readChannels.get(generationId); } try { Path translogFilePath = this.translogPath.resolve(getFileNameFromId(tmpTranslogGeneration.get())); if (!Files.exists(translogFilePath)) { return null; } // maybe a lot of readers try to open reader and put it to readChannel cache, because read lock is shared FileChannel readChannel = FileChannel.open(translogFilePath, StandardOpenOption.READ); FileChannel originReadChannel = readChannels.putIfAbsent(generationId, readChannel); if (originReadChannel != null) { IOUtils.close(readChannel); return originReadChannel; } else { return readChannel; } } catch (Throwable e) { throw e; } }
/** * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired * an {@link LockObtainFailedException} is thrown and all previously acquired locks are released. * * @param index the index to lock shards for * @param lockTimeoutMS how long to wait for acquiring the indices shard locks * @return the {@link ShardLock} instances for this index. * @throws IOException if an IOException occurs. */ public List<ShardLock> lockAllForIndex(Index index, Settings settings, long lockTimeoutMS) throws IOException { final Integer numShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); if (numShards == null || numShards <= 0) { throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards"); } logger.trace("locking all shards for index {} - [{}]", index, numShards); List<ShardLock> allLocks = new ArrayList<>(numShards); boolean success = false; long startTimeNS = System.nanoTime(); try { for (int i = 0; i < numShards; i++) { long timeoutLeftMS = Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS))); allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS)); } success = true; } finally { if (success == false) { logger.trace("unable to lock all shards for index {}", index); IOUtils.closeWhileHandlingException(allLocks); } } return allLocks; }
/** called from DirectoryReader.open(...) methods */ static DirectoryReader open(final Directory directory, final IndexCommit commit, final int termInfosIndexDivisor) throws IOException { return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) { @Override protected Object doBody(String segmentFileName) throws IOException { SegmentInfos sis = new SegmentInfos(); sis.read(directory, segmentFileName); final SegmentReader[] readers = new SegmentReader[sis.size()]; for (int i = sis.size()-1; i >= 0; i--) { boolean success = false; try { readers[i] = new SegmentReader(sis.info(i), termInfosIndexDivisor, IOContext.READ); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(readers); } } } return new StandardDirectoryReader(directory, readers, null, sis, termInfosIndexDivisor, false); } }.run(commit); }
public void loopRead(Executor executor) { executor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { if (isOpen.get()) { try { onException(MockChannel.this, e); } catch (Exception ex) { logger.warn("failed on handling exception", ex); IOUtils.closeWhileHandlingException(MockChannel.this); // pure paranoia } } } @Override protected void doRun() throws Exception { StreamInput input = new InputStreamStreamInput(new BufferedInputStream(activeChannel.getInputStream())); // There is a (slim) chance that we get interrupted right after a loop iteration, so check explicitly while (isOpen.get() && !Thread.currentThread().isInterrupted()) { cancellableThreads.executeIO(() -> readMessage(MockChannel.this, input)); } } }); }
/** * @param type the ec2 hostname type to discover. * @return the appropriate host resolved from ec2 meta-data, or null if it cannot be obtained. * @see CustomNameResolver#resolveIfPossible(String) */ @SuppressForbidden(reason = "We call getInputStream in doPrivileged and provide SocketPermission") public InetAddress[] resolve(Ec2HostnameType type) throws IOException { InputStream in = null; String metadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name; try { URL url = new URL(metadataUrl); logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); URLConnection urlConnection = SocketAccess.doPrivilegedIOException(url::openConnection); urlConnection.setConnectTimeout(2000); in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream); BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); String metadataResult = urlReader.readLine(); if (metadataResult == null || metadataResult.length() == 0) { throw new IOException("no gce metadata returned from [" + url + "] for [" + type.configName + "]"); } // only one address: because we explicitly ask for only one via the Ec2HostnameType return new InetAddress[] { InetAddress.getByName(metadataResult) }; } catch (IOException e) { throw new IOException("IOException caught when fetching InetAddress from [" + metadataUrl + "]", e); } finally { IOUtils.closeWhileHandlingException(in); } }
private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception { final Class<?> clazz = this.getClass(); TestCluster testCluster = clusters.remove(clazz); // remove this cluster first clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere switch (currentClusterScope) { case SUITE: if (testCluster == null) { // only build if it's not there yet testCluster = buildWithPrivateContext(currentClusterScope, seed); } break; case TEST: // close the previous one and create a new one IOUtils.closeWhileHandlingException(testCluster); testCluster = buildTestCluster(currentClusterScope, seed); break; } clusters.put(clazz, testCluster); return testCluster; }
/** * Copies the file <i>src</i> to {@link Directory} <i>to</i> under the new * file name <i>dest</i>. * <p> * If you want to copy the entire source directory to the destination one, you * can do so like this: * * <pre class="prettyprint"> * Directory to; // the directory to copy to * for (String file : dir.listAll()) { * dir.copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name * } * </pre> * <p> * <b>NOTE:</b> this method does not check whether <i>dest</i> exist and will * overwrite it if it does. */ public void copy(Directory to, String src, String dest, IOContext context) throws IOException { IndexOutput os = null; IndexInput is = null; boolean success = false; try { os = to.createOutput(dest, context); is = openInput(src, context); os.copyBytes(is, is.length()); success = true; } finally { if (success) { IOUtils.close(os, is); } else { IOUtils.closeWhileHandlingException(os, is); try { to.deleteFile(dest); } catch (Throwable t) { } } } }
private void notifyCoreClosedListeners(Throwable th) { synchronized(coreClosedListeners) { for (CoreClosedListener listener : coreClosedListeners) { // SegmentReader uses our instance as its // coreCacheKey: try { listener.onClose(this); } catch (Throwable t) { if (th == null) { th = t; } else { th.addSuppressed(t); } } } IOUtils.reThrowUnchecked(th); } }
/** * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are released. * * @param index the index to lock shards for * @param lockTimeoutMS how long to wait for acquiring the indices shard locks * @return the {@link ShardLock} instances for this index. * @throws IOException if an IOException occurs. */ public List<ShardLock> lockAllForIndex(Index index, IndexSettings settings, long lockTimeoutMS) throws IOException, ShardLockObtainFailedException { final int numShards = settings.getNumberOfShards(); if (numShards <= 0) { throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards"); } logger.trace("locking all shards for index {} - [{}]", index, numShards); List<ShardLock> allLocks = new ArrayList<>(numShards); boolean success = false; long startTimeNS = System.nanoTime(); try { for (int i = 0; i < numShards; i++) { long timeoutLeftMS = Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS))); allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS)); } success = true; } finally { if (success == false) { logger.trace("unable to lock all shards for index {}", index); IOUtils.closeWhileHandlingException(allLocks); } } return allLocks; }
public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { try { changeState(IndexShardState.CLOSED, reason); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { if (engine != null && flushEngine) { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times // Also closing refreshListeners to prevent us from accumulating any more listeners IOUtils.close(engine, refreshListeners); indexShardOperationsLock.close(); } } } }
@Override public void close() throws IOException { // TODO: add a finish() at least to PushBase? DV too...? boolean success = false; try { if (docOut != null) { CodecUtil.writeFooter(docOut); } if (posOut != null) { CodecUtil.writeFooter(posOut); } if (payOut != null) { CodecUtil.writeFooter(payOut); } success = true; } finally { if (success) { IOUtils.close(docOut, posOut, payOut); } else { IOUtils.closeWhileHandlingException(docOut, posOut, payOut); } docOut = posOut = payOut = null; } }
public Location writeToLocal(BytesReference data) throws IOException { final long position; final long generation; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); if (writtenOffset > TRANSLOG_ROLLING_SIZE_BYTES) { IOUtils.close(writeChannel); tmpTranslogGeneration.incrementAndGet(); writeChannel = FileChannel.open(this.translogPath.resolve(getFileNameFromId(tmpTranslogGeneration.get())), StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE); writtenOffset = 0; } generation = tmpTranslogGeneration.get(); position = writtenOffset; try { data.writeTo(writeChannel); } catch (Throwable e) { throw e; } writtenOffset = writtenOffset + data.length(); } return new Translog.Location(generation, position, data.length()); }
@Override public void commit() throws IOException { ImmutableTranslogReader toClose = null; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); if (currentCommittingTranslog == null) { prepareCommit(); } lastCommittedTranslogFileGeneration = current.getGeneration(); // this is important - otherwise old files will not be cleaned up if (recoveredTranslogs.isEmpty() == false) { IOUtils.close(recoveredTranslogs); recoveredTranslogs.clear(); } toClose = this.currentCommittingTranslog; this.currentCommittingTranslog = null; } finally { IOUtils.close(toClose); } }
void trimUnreferencedReaders() { try (ReleasableLock ignored = writeLock.acquire()) { if (closed.get()) { // we're shutdown potentially on some tragic event - don't delete anything return; } long minReferencedGen = outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE); minReferencedGen = Math.min(lastCommittedTranslogFileGeneration, minReferencedGen); final long finalMinReferencedGen = minReferencedGen; List<TranslogReader> unreferenced = readers.stream().filter(r -> r.getGeneration() < finalMinReferencedGen).collect(Collectors.toList()); for (final TranslogReader unreferencedReader : unreferenced) { Path translogPath = unreferencedReader.path(); logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); IOUtils.closeWhileHandlingException(unreferencedReader); IOUtils.deleteFilesIgnoringExceptions(translogPath, translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration()))); } readers.removeAll(unreferenced); } }
/** * Moves the index folder found in <code>source</code> to <code>target</code> */ void upgrade(final Index index, final Path source, final Path target) throws IOException { boolean success = false; try { Files.move(source, target, StandardCopyOption.ATOMIC_MOVE); success = true; } catch (NoSuchFileException | FileNotFoundException exception) { // thrown when the source is non-existent because the folder was renamed // by another node (shared FS) after we checked if the target exists logger.error((Supplier<?>) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + "upgrading with single node", target), exception); throw exception; } finally { if (success) { logger.info("{} moved from [{}] to [{}]", index, source, target); logger.trace("{} syncing directory [{}]", index, target); IOUtils.fsync(target, true); } } }
/** * Copy the contents of the given InputStream to the given OutputStream. * Closes both streams when done. * * @param in the stream to copy from * @param out the stream to copy to * @return the number of bytes copied * @throws IOException in case of I/O errors */ public static long copy(InputStream in, OutputStream out, byte[] buffer) throws IOException { Objects.requireNonNull(in, "No InputStream specified"); Objects.requireNonNull(out, "No OutputStream specified"); boolean success = false; try { long byteCount = 0; int bytesRead; while ((bytesRead = in.read(buffer)) != -1) { out.write(buffer, 0, bytesRead); byteCount += bytesRead; } out.flush(); success = true; return byteCount; } finally { if (success) { IOUtils.close(in, out); } else { IOUtils.closeWhileHandlingException(in, out); } } }
/** * Copy the contents of the given Reader to the given Writer. * Closes both when done. * * @param in the Reader to copy from * @param out the Writer to copy to * @return the number of characters copied * @throws IOException in case of I/O errors */ public static int copy(Reader in, Writer out) throws IOException { Objects.requireNonNull(in, "No Reader specified"); Objects.requireNonNull(out, "No Writer specified"); boolean success = false; try { int byteCount = 0; char[] buffer = new char[BUFFER_SIZE]; int bytesRead; while ((bytesRead = in.read(buffer)) != -1) { out.write(buffer, 0, bytesRead); byteCount += bytesRead; } out.flush(); success = true; return byteCount; } finally { if (success) { IOUtils.close(in, out); } else { IOUtils.closeWhileHandlingException(in, out); } } }
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new Lucene41PostingsReader(state.directory, state.fieldInfos, state.segmentInfo, state.context, state.segmentSuffix); boolean success = false; try { FieldsProducer ret = new BlockTreeTermsReader(state.directory, state.fieldInfos, state.segmentInfo, postingsReader, state.context, state.segmentSuffix, state.termsIndexDivisor); success = true; return ret; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
/** NOTE: this method closes the TokenStream, even on exception, which is awkward * because really the caller who called {@link Analyzer#tokenStream} should close it, * but when trying that there are recursion issues when we try to use the same * TokenStrem twice in the same recursion... */ public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException { int numTokens = 0; boolean success = false; try { stream.reset(); consumer.reset(stream); while (stream.incrementToken()) { consumer.nextToken(); numTokens++; } consumer.end(); } finally { if (success) { stream.close(); } else { IOUtils.closeWhileHandlingException(stream); } } return numTokens; }
/** * Tests that closing views after the translog is fine and we can reopen the translog */ public void testPendingDelete() throws IOException { translog.add(new Translog.Index("test", "1", new byte[]{1})); translog.prepareCommit(); Translog.TranslogGeneration generation = translog.getGeneration(); TranslogConfig config = translog.getConfig(); translog.close(); translog = new Translog(config, generation, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); translog.add(new Translog.Index("test", "2", new byte[]{2})); translog.prepareCommit(); Translog.View view = translog.newView(); translog.add(new Translog.Index("test", "3", new byte[]{3})); translog.close(); IOUtils.close(view); translog = new Translog(config, generation, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); }
/** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { throw new UserException(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); setFileAttributes(destBinDir, BIN_DIR_PERMS); try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { throw new UserException( ExitCodes.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); } Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); Files.copy(srcFile, destFile); setFileAttributes(destFile, BIN_FILES_PERMS); } } IOUtils.rm(tmpBinDir); // clean up what we just copied }
@Override public void close() throws IOException { boolean success = false; try { if (meta != null) { meta.writeVInt(-1); // write EOF marker CodecUtil.writeFooter(meta); // write checksum } if (data != null) { CodecUtil.writeFooter(data); // write checksum } success = true; } finally { if (success) { IOUtils.close(data, meta); } else { IOUtils.closeWhileHandlingException(data, meta); } meta = data = null; } }
/** expert: Creates a new writer */ public Lucene410DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException { boolean success = false; try { String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension); data = state.directory.createOutput(dataName, state.context); CodecUtil.writeHeader(data, dataCodec, Lucene410DocValuesFormat.VERSION_CURRENT); String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension); meta = state.directory.createOutput(metaName, state.context); CodecUtil.writeHeader(meta, metaCodec, Lucene410DocValuesFormat.VERSION_CURRENT); maxDoc = state.segmentInfo.getDocCount(); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(this); } } }
@SuppressForbidden(reason = "tries to determine if disk is spinning") // TODO: move PathUtils to be package-private here instead of // public+forbidden api! ESFileStore(FileStore in) { this.in = in; Boolean spins; // Lucene's IOUtils.spins only works on Linux today: if (Constants.LINUX) { try { spins = IOUtils.spins(PathUtils.get(getMountPointLinux(in))); } catch (Exception e) { spins = null; } } else { spins = null; } this.spins = spins; }
@Override public synchronized void close() throws IOException { try { if (lock != null) { try { lock.release(); lock = null; } finally { clearLockHeld(path); } } } finally { IOUtils.close(channel); channel = null; } }
@After public void tearDown() throws Exception { try { logger.info("shutting down..."); // JDK stack is broken, it does not iterate in the expected order (http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4475301) final List<Closeable> reverse = new ArrayList<>(); while (!closeables.isEmpty()) { reverse.add(closeables.pop()); } IOUtils.close(reverse); } finally { terminate(executorService); terminate(threadPool); super.tearDown(); } }
@Override public SegmentInfo read(Directory directory, String segmentName, IOContext context) throws IOException { // NOTE: this is NOT how 3.x is really written... String fileName = IndexFileNames.segmentFileName(segmentName, "", Lucene3xSegmentInfoFormat.UPGRADED_SI_EXTENSION); boolean success = false; IndexInput input = directory.openInput(fileName, context); try { SegmentInfo si = readUpgradedSegmentInfo(segmentName, directory, input); success = true; return si; } finally { if (!success) { IOUtils.closeWhileHandlingException(input); } else { input.close(); } } }
@Override public void inform(ResourceLoader loader) throws IOException { InputStream stream = null; try { if (dictFile != null) // the dictionary can be empty. dictionary = getWordSet(loader, dictFile, false); // TODO: Broken, because we cannot resolve real system id // ResourceLoader should also supply method like ClassLoader to get resource URL stream = loader.openResource(hypFile); final InputSource is = new InputSource(stream); is.setEncoding(encoding); // if it's null let xml parser decide is.setSystemId(hypFile); if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0)) { hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is); } else { hyphenator = Lucene43HyphenationCompoundWordTokenFilter.getHyphenationTree(is); } } finally { IOUtils.closeWhileHandlingException(stream); } }
public void testMissingTranslog() throws IOException { // test that we can force start the engine , even if the translog is missing. engine.close(); // fake a new translog, causing the engine to point to a missing one. Translog translog = createTranslog(); long id = translog.currentFileGeneration(); translog.close(); IOUtils.rm(translog.location().resolve(Translog.getFilename(id))); try { engine = createEngine(store, primaryTranslogDir); fail("engine shouldn't start without a valid translog id"); } catch (EngineCreationFailureException ex) { // expected } // now it should be OK. EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG); engine = new InternalEngine(config); }
@Override public SpanQuery getSpanQuery(Element e) throws ParserException { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String value = DOMUtils.getNonBlankTextOrFail(e); List<SpanQuery> clausesList = new ArrayList<>(); TokenStream ts = null; try { ts = analyzer.tokenStream(fieldName, value); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(bytes))); clausesList.add(stq); } ts.end(); SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()])); soq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f)); return soq; } catch (IOException ioe) { throw new ParserException("IOException parsing value:" + value); } finally { IOUtils.closeWhileHandlingException(ts); } }
@Override protected void afterIfSuccessful() throws Exception { super.afterIfSuccessful(); if (translog.isOpen()) { if (translog.currentFileGeneration() > 1) { translog.commit(); assertFileDeleted(translog, translog.currentFileGeneration() - 1); } translog.close(); } assertFileIsPresent(translog, translog.currentFileGeneration()); IOUtils.rm(translog.location()); // delete all the locations }
@Override public void close() { final List<TranslogReader> toClose = new ArrayList<>(); try { synchronized (this) { if (closed == false) { try { if (onClose != null) { onClose.handle(this); } } finally { closed = true; toClose.addAll(orderedTranslogs); orderedTranslogs.clear(); } } } } finally { try { // Close out of lock to prevent deadlocks between channel close which checks for // references in InternalChannelReference.closeInternal (waiting on a read lock) // and other FsTranslog#newTranslog calling FsView.onNewTranslog (while having a write lock) IOUtils.close(toClose); } catch (Exception e) { throw new ElasticsearchException("failed to close view", e); } } }
/** * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams. * This means that the Fields values will not be accessible. * * @throws IOException If an I/O error occurs */ @Override public final void close() throws IOException { if (!closed) { IOUtils.close(fieldsStream, indexStream); closed = true; } }
public void testIsFieldWithinQuery() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12").getMillis(); long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2016-04-03").getMillis(); Document doc = new Document(); LongPoint field = new LongPoint("my_date", instant1); doc.add(field); w.addDocument(doc); field.setLongValue(instant2); w.addDocument(doc); DirectoryReader reader = DirectoryReader.open(w); DateFieldType ft = new DateFieldType(); ft.setName("my_date"); DateMathParser alternateFormat = new DateMathParser(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER); doTestIsFieldWithinQuery(ft, reader, null, null); doTestIsFieldWithinQuery(ft, reader, null, alternateFormat); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, alternateFormat); // Fields with no value indexed. DateFieldType ft2 = new DateFieldType(); ft2.setName("my_date2"); QueryRewriteContext context = new QueryRewriteContext(null, null, null, xContentRegistry(), null, null, () -> nowInMillis); assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context)); IOUtils.close(reader, w, dir); }
@Override public synchronized void close() { if (this.open.compareAndSet(true, false)) { if (activeDisruptionScheme != null) { activeDisruptionScheme.testClusterClosed(); activeDisruptionScheme = null; } IOUtils.closeWhileHandlingException(nodes.values()); nodes.clear(); executor.shutdownNow(); } }
private void clearDataIfNeeded(RestartCallback callback) throws IOException { if (callback.clearData(name)) { NodeEnvironment nodeEnv = node.getNodeEnvironment(); if (nodeEnv.hasNodeFile()) { final Path[] locations = nodeEnv.nodeDataPaths(); logger.debug("removing node data paths: [{}]", Arrays.toString(locations)); IOUtils.rm(locations); } } }
@Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file)) { Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]); } IOUtils.fsync(file, false); IOUtils.fsync(path, true); }
@Override public void close() throws IOException { try { IOUtils.close(openFiles); } finally { norms.clear(); openFiles.clear(); } }
/** * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams. * This means that the Fields values will not be accessible. * * @throws IOException If there is a low-level I/O error. */ public final void close() throws IOException { if (!closed) { IOUtils.close(fieldsStream, indexStream, storeCFSReader); closed = true; } }
/** * Writes an automaton to a file. */ public void save(final File file) throws IOException { boolean success = false; OutputStream os = new BufferedOutputStream(new FileOutputStream(file)); try { save(new OutputStreamDataOutput(os)); success = true; } finally { if (success) { IOUtils.close(os); } else { IOUtils.closeWhileHandlingException(os); } } }