@Override @SuppressForbidden(reason = "debug") protected void stopInternal() { Releasables.close(serverOpenChannels, () -> { final List<Tuple<String, Future<?>>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); for (final Map.Entry<String, ServerBootstrap> entry : serverBootstraps.entrySet()) { serverBootstrapCloseFutures.add( Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); } for (final Tuple<String, Future<?>> future : serverBootstrapCloseFutures) { future.v2().awaitUninterruptibly(); if (!future.v2().isSuccess()) { logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); } } serverBootstraps.clear(); if (bootstrap != null) { bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); bootstrap = null; } }); }
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) { if (logger.isDebugEnabled()) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure); } addShardFailure(shardIndex, new ShardSearchFailure(failure)); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { if (successfulOps.get() == 0) { listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures())); } else { try { executeFetchPhase(); } catch (Exception e) { e.addSuppressed(failure); listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY)); } } } }
/** * Scans the hunspell directory and loads all found dictionaries */ private void scanAndLoadDictionaries() throws IOException { if (Files.isDirectory(hunspellDir)) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(hunspellDir)) { for (Path file : stream) { if (Files.isDirectory(file)) { try (DirectoryStream<Path> inner = Files.newDirectoryStream(hunspellDir.resolve(file), "*.dic")) { if (inner.iterator().hasNext()) { // just making sure it's indeed a dictionary dir try { getDictionary(file.getFileName().toString()); } catch (Exception e) { // The cache loader throws unchecked exception (see #loadDictionary()), // here we simply report the exception and continue loading the dictionaries logger.error( (Supplier<?>) () -> new ParameterizedMessage( "exception while loading dictionary {}", file.getFileName()), e); } } } } } } } }
private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFailure, String message, @Nullable Exception failure, ClusterState state) { try { AllocatedIndex<? extends Shard> indexService = indicesService.indexService(shardRouting.shardId().getIndex()); if (indexService != null) { indexService.removeShard(shardRouting.shardId().id(), message); } } catch (ShardNotFoundException e) { // the node got closed on us, ignore it } catch (Exception inner) { inner.addSuppressed(failure); logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "[{}][{}] failed to remove shard after failure ([{}])", shardRouting.getIndexName(), shardRouting.getId(), message), inner); } if (sendShardFailure) { sendFailShard(shardRouting, message, failure, state); } }
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) { try { logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "[{}][{}] failed to mark shard as failed (because of [{}])", shardRouting.getIndexName(), shardRouting.getId(), message), inner); } }
/** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} */ @Override public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { if (nodeEnv.hasNodeFile()) { String indexName = metaData.getIndex().getName(); try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " + "the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (Exception e) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); } } }
protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { if (cause instanceof ReadTimeoutException) { if (logger.isTraceEnabled()) { logger.trace("Connection timeout [{}]", ctx.channel().remoteAddress()); } ctx.channel().close(); } else { if (!lifecycle.started()) { // ignore return; } if (!NetworkExceptionHelper.isCloseConnectionException(cause)) { logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "caught exception while handling client http traffic, closing connection {}", ctx.channel()), cause); ctx.channel().close(); } else { logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "caught exception while handling client http traffic, closing connection {}", ctx.channel()), cause); ctx.channel().close(); } } }
@Override protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); indexStateService.closeIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() { @Override public void onResponse(ClusterStateUpdateResponse response) { listener.onResponse(new CloseIndexResponse(response.isAcknowledged())); } @Override public void onFailure(Exception t) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); }
@Override public void onFailure(Exception e) { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { logger.error( (Supplier<?>) () -> new ParameterizedMessage( "unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e), true // be safe ); } else { logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e); } } }
/** * Validates the given settings by running it through all update listeners without applying it. This * method will not change any settings but will fail if any of the settings can't be applied. */ public synchronized Settings validateUpdate(Settings settings) { final Settings current = Settings.builder().put(this.settings).put(settings).build(); final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); List<RuntimeException> exceptions = new ArrayList<>(); for (SettingUpdater<?> settingUpdater : settingUpdaters) { try { // ensure running this through the updater / dynamic validator // don't check if the value has changed we wanna test this anyways settingUpdater.getValue(current, previous); } catch (RuntimeException ex) { exceptions.add(ex); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); } } // here we are exhaustive and record all settings that failed. ExceptionsHelper.rethrowAndSuppress(exceptions); return current; }
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) { if (nodeJoinController == null) { throw new IllegalStateException("discovery module is not yet started"); } else { // we do this in a couple of places including the cluster update thread. This one here is really just best effort // to ensure we fail as fast as possible. MembershipAction.ensureIndexCompatibility(node.getVersion().minimumIndexCompatibilityVersion(), state.getMetaData()); // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; } nodeJoinController.handleJoinRequest(node, callback); } }
/** * Creates repository holder */ private Repository createRepository(RepositoryMetaData repositoryMetaData) { logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()); Repository.Factory factory = typesRegistry.get(repositoryMetaData.type()); if (factory == null) { throw new RepositoryException(repositoryMetaData.name(), "repository type [" + repositoryMetaData.type() + "] does not exist"); } try { Repository repository = factory.create(repositoryMetaData); repository.start(); return repository; } catch (Exception e) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } }
@Override protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) { final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request))); if (concreteIndices.isEmpty()) { listener.onResponse(new DeleteIndexResponse(true)); return; } DeleteIndexClusterStateUpdateRequest deleteRequest = new DeleteIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices.toArray(new Index[concreteIndices.size()])); deleteIndexService.deleteIndices(deleteRequest, new ActionListener<ClusterStateUpdateResponse>() { @Override public void onResponse(ClusterStateUpdateResponse response) { listener.onResponse(new DeleteIndexResponse(response.isAcknowledged())); } @Override public void onFailure(Exception t) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); listener.onFailure(t); } }); }
private void cleanupAfterError(Exception exception) { if(snapshotCreated) { try { repositoriesService.repository(snapshot.snapshot().getRepository()) .finalizeSnapshot(snapshot.snapshot().getSnapshotId(), snapshot.indices(), snapshot.startTime(), ExceptionsHelper.detailedMessage(exception), 0, Collections.emptyList(), snapshot.getRepositoryStateId()); } catch (Exception inner) { inner.addSuppressed(exception); logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); }
private void onShardClose(ShardLock lock, boolean ownsShard) { if (deleted.get()) { // we remove that shards content if this index has been deleted try { if (ownsShard) { try { eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } finally { shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } } } catch (IOException e) { shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings); logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e); } } }
@Override public final void run() { try { runInternal(); } catch (Exception ex) { if (lastThrownException == null || sameException(lastThrownException, ex) == false) { // prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs indexService.logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "failed to run task {} - suppressing re-occurring exceptions unless the exception changes", toString()), ex); lastThrownException = ex; } } finally { onTaskCompletion(); } }
/** * validates a set of potentially newly discovered nodes and returns an immutable * list of the nodes that has passed. */ protected List<DiscoveryNode> validateNewNodes(Set<DiscoveryNode> nodes) { for (Iterator<DiscoveryNode> it = nodes.iterator(); it.hasNext(); ) { DiscoveryNode node = it.next(); if (!transportService.nodeConnected(node)) { try { logger.trace("connecting to node [{}]", node); transportService.connectToNode(node); } catch (Exception e) { it.remove(); logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); } } } return Collections.unmodifiableList(new ArrayList<>(nodes)); }
@Override protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) { final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard indexShard = indexService.getShard(shardId.id()); for (int i = 0; i < request.locations.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); try { TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest); response.add(request.locations.get(i), termVectorsResponse); } catch (Exception t) { if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } } } return response; }
protected static boolean testRelationSupport(SpatialOperation relation) { if (relation == SpatialOperation.IsDisjointTo) { // disjoint works in terms of intersection relation = SpatialOperation.Intersects; } try { GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3); RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area"); Shape shape = SpatialContext.GEO.makePoint(0, 0); SpatialArgs args = new SpatialArgs(relation, shape); strategy.makeQuery(args); return true; } catch (UnsupportedSpatialOperation e) { final SpatialOperation finalRelation = relation; ESLoggerFactory .getLogger(GeoFilterIT.class.getName()) .info((Supplier<?>) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); return false; } }
@Override public void match(LogEvent event) { if (event.getLevel().equals(level) && event.getLoggerName().equals(loggerName) && event.getMessage() instanceof ParameterizedMessage) { final ParameterizedMessage message = (ParameterizedMessage)event.getMessage(); saw = message.getFormat().equals(messagePattern) && Arrays.deepEquals(arguments, message.getParameters()) && throwablePredicate.test(event.getThrown()); } }
public void testPrefixLogger() throws IOException, IllegalAccessException, UserException { setupLogging("prefix"); final String prefix = randomBoolean() ? null : randomAsciiOfLength(16); final Logger logger = Loggers.getLogger("prefix", prefix); logger.info("test"); logger.info("{}", "test"); final Exception e = new Exception("exception"); logger.info(new ParameterizedMessage("{}", "test"), e); final String path = System.getProperty("es.logs.base_path") + System.getProperty("file.separator") + System.getProperty("es.logs.cluster_name") + ".log"; final List<String> events = Files.readAllLines(PathUtils.get(path)); final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); final int stackTraceLength = sw.toString().split(System.getProperty("line.separator")).length; final int expectedLogLines = 3; assertThat(events.size(), equalTo(expectedLogLines + stackTraceLength)); for (int i = 0; i < expectedLogLines; i++) { if (prefix == null) { assertThat(events.get(i), startsWith("test")); } else { assertThat(events.get(i), startsWith("[" + prefix + "] test")); } } }
private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { logger.error((Supplier<?>) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); } fail("Unexpected shard failures!"); } assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards())); }
@Override public void waitForTaskCompletion(Task task, long untilInNanos) { for (MockTaskManagerListener listener : listeners) { try { listener.waitForTaskCompletion(task); } catch (Exception e) { logger.warn( (Supplier<?>) () -> new ParameterizedMessage( "failed to notify task manager listener about waitForTaskCompletion the task with id {}", task.getId()), e); } } super.waitForTaskCompletion(task, untilInNanos); }
@Override public void run() { try { runnable.run(); } catch (Exception e) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); } }
@Override public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException { try { CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); logger.trace("creating container [{}]", container); SocketAccess.doPrivilegedException(blobContainer::createIfNotExists); } catch (IllegalArgumentException e) { logger.trace((Supplier<?>) () -> new ParameterizedMessage("fails creating container [{}]", container), e); throw new RepositoryException(container, e.getMessage(), e); } }
public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { return defaultVersion; } try { return Version.parse(version); } catch (ParseException e) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); return defaultVersion; } }
@Override protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) { indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() { @Override public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { listener.onResponse(new DeleteIndexTemplateResponse(response.acknowledged())); } @Override public void onFailure(Exception e) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); listener.onFailure(e); } }); }
@Override public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) { logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, message, exception, createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); }
void sendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) throws Exception { store.incRef(); try { ArrayUtil.timSort(files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first for (int i = 0; i < files.length; i++) { final StoreFileMetaData md = files[i]; try (IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { // it's fine that we are only having the indexInput in the try/with block. The copy methods handles // exceptions during close correctly and doesn't hide the original exception. Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md)); } catch (Exception e) { final IOException corruptIndexException; if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md); failEngine(corruptIndexException); throw corruptIndexException; } else { // corruption has happened on the way to replica RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(e); logger.warn( (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage( "{} Remote file corruption on node {}, recovering {}. local checksum OK", shardId, request.targetNode(), md), corruptIndexException); throw exception; } } else { throw e; } } } } finally { store.decRef(); } }
@Override public void handleException(TransportException exp) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); if (awaitingResponses.decrementAndGet() == 0) { allNodesResponded(); } }
/** * Stores the task failure */ public <Response extends ActionResponse> void storeResult(Task task, Exception error, ActionListener<Response> listener) { DiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); if (localNode == null) { // too early to store anything, shouldn't really be here - just pass the error along listener.onFailure(error); return; } final TaskResult taskResult; try { taskResult = task.result(localNode, error); } catch (IOException ex) { logger.warn( (Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); return; } taskResultsService.storeResult(taskResult, new ActionListener<Void>() { @Override public void onResponse(Void aVoid) { listener.onFailure(error); } @Override public void onFailure(Exception e) { logger.warn( (Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); listener.onFailure(e); } }); }
public String taskResultIndexMapping() { try (InputStream is = getClass().getResourceAsStream(TASK_RESULT_INDEX_MAPPING_FILE)) { ByteArrayOutputStream out = new ByteArrayOutputStream(); Streams.copy(is, out); return out.toString(IOUtils.UTF_8); } catch (Exception e) { logger.error( (Supplier<?>) () -> new ParameterizedMessage( "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e); throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e); } }
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) { try { if (logger.isTraceEnabled()) { logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary()); } ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting); shardResults[shardIndex] = result; if (logger.isTraceEnabled()) { logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary()); } } catch (Exception e) { BroadcastShardOperationFailedException failure = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", e); failure.setShard(shardRouting.shardId()); shardResults[shardIndex] = failure; if (TransportActions.isShardNotAvailableException(e)) { if (logger.isTraceEnabled()) { logger.trace( (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage( "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } else { if (logger.isDebugEnabled()) { logger.debug( (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage( "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } } }
/** * Returns a list of snapshots from repository sorted by snapshot creation date * * @param repositoryName repository name * @param snapshotIds snapshots for which to fetch snapshot information * @param ignoreUnavailable if true, snapshots that could not be read will only be logged with a warning, * if false, they will throw an error * @return list of snapshots */ public List<SnapshotInfo> snapshots(final String repositoryName, List<SnapshotId> snapshotIds, final boolean ignoreUnavailable) { final Set<SnapshotInfo> snapshotSet = new HashSet<>(); final Set<SnapshotId> snapshotIdsToIterate = new HashSet<>(snapshotIds); // first, look at the snapshots in progress final List<SnapshotsInProgress.Entry> entries = currentSnapshots(repositoryName, snapshotIdsToIterate.stream().map(SnapshotId::getName).collect(Collectors.toList())); for (SnapshotsInProgress.Entry entry : entries) { snapshotSet.add(inProgressSnapshot(entry)); snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId()); } // then, look in the repository final Repository repository = repositoriesService.repository(repositoryName); for (SnapshotId snapshotId : snapshotIdsToIterate) { try { snapshotSet.add(repository.getSnapshotInfo(snapshotId)); } catch (Exception ex) { if (ignoreUnavailable) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); } else { throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex); } } } final ArrayList<SnapshotInfo> snapshotList = new ArrayList<>(snapshotSet); CollectionUtil.timSort(snapshotList); return Collections.unmodifiableList(snapshotList); }
private void sendCommitToNode(final DiscoveryNode node, final ClusterState clusterState, final SendingController sendingController) { try { logger.trace("sending commit for cluster state (uuid: [{}], version [{}]) to [{}]", clusterState.stateUUID(), clusterState.version(), node); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).build(); // no need to put a timeout on the options here, because we want the response to eventually be received // and not log an error if it arrives after the timeout transportService.sendRequest(node, COMMIT_ACTION_NAME, new CommitClusterStateRequest(clusterState.stateUUID()), options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { if (sendingController.getPublishingTimedOut()) { logger.debug("node {} responded to cluster state commit [{}]", node, clusterState.version()); } sendingController.getPublishResponseHandler().onResponse(node); } @Override public void handleException(TransportException exp) { logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } }
/** * tries marking the publishing as failed, if a decision wasn't made yet * * @return true if the publishing was failed and the cluster state is *not* committed **/ private synchronized boolean markAsFailed(String details, Exception reason) { if (committedOrFailed()) { return committed == false; } logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); return true; }
@Override public void onFailure(String source, Exception e) { for (MembershipAction.JoinCallback callback : callbacks) { try { callback.onFailure(e); } catch (Exception inner) { logger.error((Supplier<?>) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner); } } }
@Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { for (MembershipAction.JoinCallback callback : callbacks) { try { callback.onSuccess(); } catch (Exception e) { logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error during [{}]", source), e); } } }
void onFailedFreedContext(Throwable e, DiscoveryNode node) { logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get())); } else { expHolder.set(e); } }
@Override public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { logger.trace( (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage( "Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, request), e); request.onRetry(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { // Forking a thread on local node via transport service so that custom transport service have an // opportunity to execute custom logic before the replica operation begins String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; TransportChannelResponseHandler<TransportResponse.Empty> handler = new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, new ConcreteShardRequest<>(request, targetAllocationID), handler); } @Override public void onClusterServiceClose() { responseWithFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { throw new AssertionError("Cannot happen: there is not timeout"); } }); } else { responseWithFailure(e); } }