Java 类org.apache.logging.log4j.util.Supplier 实例源码

项目:elasticsearch_my    文件:Netty4Transport.java   
@Override
@SuppressForbidden(reason = "debug")
protected void stopInternal() {
    Releasables.close(serverOpenChannels, () -> {
        final List<Tuple<String, Future<?>>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size());
        for (final Map.Entry<String, ServerBootstrap> entry : serverBootstraps.entrySet()) {
            serverBootstrapCloseFutures.add(
                Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS)));
        }
        for (final Tuple<String, Future<?>> future : serverBootstrapCloseFutures) {
            future.v2().awaitUninterruptibly();
            if (!future.v2().isSuccess()) {
                logger.debug(
                    (Supplier<?>) () -> new ParameterizedMessage(
                        "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause());
            }
        }
        serverBootstraps.clear();

        if (bootstrap != null) {
            bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly();
            bootstrap = null;
        }
    });
}
项目:elasticsearch_my    文件:HunspellService.java   
/**
 * Scans the hunspell directory and loads all found dictionaries
 */
private void scanAndLoadDictionaries() throws IOException {
    if (Files.isDirectory(hunspellDir)) {
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(hunspellDir)) {
            for (Path file : stream) {
                if (Files.isDirectory(file)) {
                    try (DirectoryStream<Path> inner = Files.newDirectoryStream(hunspellDir.resolve(file), "*.dic")) {
                        if (inner.iterator().hasNext()) { // just making sure it's indeed a dictionary dir
                            try {
                                getDictionary(file.getFileName().toString());
                            } catch (Exception e) {
                                // The cache loader throws unchecked exception (see #loadDictionary()),
                                // here we simply report the exception and continue loading the dictionaries
                                logger.error(
                                    (Supplier<?>) () -> new ParameterizedMessage(
                                        "exception while loading dictionary {}", file.getFileName()), e);
                            }
                        }
                    }
                }
            }
        }
    }
}
项目:elasticsearch_my    文件:IndicesClusterStateService.java   
private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFailure, String message, @Nullable Exception failure,
                                ClusterState state) {
    try {
        AllocatedIndex<? extends Shard> indexService = indicesService.indexService(shardRouting.shardId().getIndex());
        if (indexService != null) {
            indexService.removeShard(shardRouting.shardId().id(), message);
        }
    } catch (ShardNotFoundException e) {
        // the node got closed on us, ignore it
    } catch (Exception inner) {
        inner.addSuppressed(failure);
        logger.warn(
            (Supplier<?>) () -> new ParameterizedMessage(
                "[{}][{}] failed to remove shard after failure ([{}])",
                shardRouting.getIndexName(),
                shardRouting.getId(),
                message),
            inner);
    }
    if (sendShardFailure) {
        sendFailShard(shardRouting, message, failure, state);
    }
}
项目:elasticsearch_my    文件:SearchScrollQueryThenFetchAsyncAction.java   
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
    if (logger.isDebugEnabled()) {
        logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
    }
    addShardFailure(shardIndex, new ShardSearchFailure(failure));
    successfulOps.decrementAndGet();
    if (counter.decrementAndGet() == 0) {
        if (successfulOps.get() == 0) {
            listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures()));
        } else {
            try {
                executeFetchPhase();
            } catch (Exception e) {
                e.addSuppressed(failure);
                listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
            }
        }
    }
}
项目:elasticsearch_my    文件:GeoFilterIT.java   
protected static boolean testRelationSupport(SpatialOperation relation) {
    if (relation == SpatialOperation.IsDisjointTo) {
        // disjoint works in terms of intersection
        relation = SpatialOperation.Intersects;
    }
    try {
        GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3);
        RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area");
        Shape shape = SpatialContext.GEO.makePoint(0, 0);
        SpatialArgs args = new SpatialArgs(relation, shape);
        strategy.makeQuery(args);
        return true;
    } catch (UnsupportedSpatialOperation e) {
        final SpatialOperation finalRelation = relation;
        ESLoggerFactory
            .getLogger(GeoFilterIT.class.getName())
            .info((Supplier<?>) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e);
        return false;
    }
}
项目:elasticsearch_my    文件:MockTaskManager.java   
@Override
public Task register(String type, String action, TransportRequest request) {
    Task task = super.register(type, action, request);
    if (task != null) {
        for (MockTaskManagerListener listener : listeners) {
            try {
                listener.onTaskRegistered(task);
            } catch (Exception e) {
                logger.warn(
                    (Supplier<?>) () -> new ParameterizedMessage(
                        "failed to notify task manager listener about unregistering the task with id {}",
                        task.getId()),
                    e);
            }
        }
    }
    return task;
}
项目:elasticsearch_my    文件:TransportOpenIndexAction.java   
@Override
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
    final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
    OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
            .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
            .indices(concreteIndices);

    indexStateService.openIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

        @Override
        public void onResponse(ClusterStateUpdateResponse response) {
            listener.onResponse(new OpenIndexResponse(response.isAcknowledged()));
        }

        @Override
        public void onFailure(Exception t) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
            listener.onFailure(t);
        }
    });
}
项目:elasticsearch_my    文件:SyncedFlushService.java   
@Override
public void onShardInactive(final IndexShard indexShard) {
    // we only want to call sync flush once, so only trigger it when we are on a primary
    if (indexShard.routingEntry().primary()) {
        attemptSyncedFlush(indexShard.shardId(), new ActionListener<ShardsSyncedFlushResult>() {
            @Override
            public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
                logger.trace("{} sync flush on inactive shard returned successfully for sync_id: {}", syncedFlushResult.getShardId(), syncedFlushResult.syncId());
            }

            @Override
            public void onFailure(Exception e) {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
            }
        });
    }
}
项目:elasticsearch_my    文件:ClientScrollableHitSource.java   
@Override
public void clearScroll(String scrollId, Runnable onCompletion) {
    ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
    clearScrollRequest.addScrollId(scrollId);
    /*
     * Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
     * if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
     */
    client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
        @Override
        public void onResponse(ClearScrollResponse response) {
            logger.debug("Freed [{}] contexts", response.getNumFreed());
            onCompletion.run();
        }

        @Override
        public void onFailure(Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
            onCompletion.run();
        }
    });
}
项目:elasticsearch_my    文件:NodesFaultDetection.java   
private void notifyNodeFailure(final DiscoveryNode node, final String reason) {
    try {
        threadPool.generic().execute(new Runnable() {
            @Override
            public void run() {
                for (Listener listener : listeners) {
                    listener.onNodeFailure(node, reason);
                }
            }
        });
    } catch (EsRejectedExecutionException ex) {
        logger.trace(
            (Supplier<?>) () -> new ParameterizedMessage(
                "[node  ] [{}] ignoring node failure (reason [{}]). Local node is shutting down",
                node,
                reason),
            ex);
    }
}
项目:elasticsearch_my    文件:ZenDiscovery.java   
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) {
    if (nodeJoinController == null) {
        throw new IllegalStateException("discovery module is not yet started");
    } else {
        // we do this in a couple of places including the cluster update thread. This one here is really just best effort
        // to ensure we fail as fast as possible.
        MembershipAction.ensureIndexCompatibility(node.getVersion().minimumIndexCompatibilityVersion(), state.getMetaData());
        // try and connect to the node, if it fails, we can raise an exception back to the client...
        transportService.connectToNode(node);

        // validate the join request, will throw a failure if it fails, which will get back to the
        // node calling the join request
        try {
            membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
        } catch (Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e);
            callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
            return;
        }
        nodeJoinController.handleJoinRequest(node, callback);
    }
}
项目:elasticsearch_my    文件:RepositoriesService.java   
/**
 * Creates repository holder
 */
private Repository createRepository(RepositoryMetaData repositoryMetaData) {
    logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name());
    Repository.Factory factory = typesRegistry.get(repositoryMetaData.type());
    if (factory == null) {
        throw new RepositoryException(repositoryMetaData.name(),
            "repository type [" + repositoryMetaData.type() + "] does not exist");
    }
    try {
        Repository repository = factory.create(repositoryMetaData);
        repository.start();
        return repository;
    } catch (Exception e) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
        throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
    }
}
项目:elasticsearch_my    文件:TransportBulkAction.java   
void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener<BulkResponse> listener) {
    long ingestStartTimeInNanos = System.nanoTime();
    BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
    ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
        logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]",
            indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
        bulkRequestModifier.markCurrentItemAsFailed(exception);
    }, (exception) -> {
        if (exception != null) {
            logger.error("failed to execute pipeline for a bulk request", exception);
            listener.onFailure(exception);
        } else {
            long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
            BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
            ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
            if (bulkRequest.requests().isEmpty()) {
                // at this stage, the transport bulk action can't deal with a bulk request with no requests,
                // so we stop and send an empty response back to the client.
                // (this will happen if pre-processing all items in the bulk failed)
                actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0));
            } else {
                doExecute(task, bulkRequest, actionListener);
            }
        }
    });
}
项目:elasticsearch_my    文件:GlobalCheckpointSyncAction.java   
public void updateCheckpointForShard(ShardId shardId) {
    execute(new PrimaryRequest(shardId), new ActionListener<ReplicationResponse>() {
        @Override
        public void onResponse(ReplicationResponse replicationResponse) {
            if (logger.isTraceEnabled()) {
                logger.trace("{} global checkpoint successfully updated (shard info [{}])", shardId,
                    replicationResponse.getShardInfo());
            }
        }

        @Override
        public void onFailure(Exception e) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to update global checkpoint", shardId), e);
        }
    });
}
项目:elasticsearch_my    文件:AbstractSearchAsyncAction.java   
@Override
public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) {
    /* This is the main search phase transition where we move to the next phase. At this point we check if there is
     * at least one successful operation left and if so we move to the next phase. If not we immediately fail the
     * search phase as "all shards failed"*/
    if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure
        if (logger.isDebugEnabled()) {
            final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
            Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()),
                cause);
        }
        onPhaseFailure(currentPhase, "all shards failed", null);
    } else {
        if (logger.isTraceEnabled()) {
            final String resultsFrom = results.getSuccessfulResults()
                .map(r -> r.shardTarget().toString()).collect(Collectors.joining(","));
            logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
                currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion);
        }
        executePhase(nextPhase);
    }
}
项目:elasticsearch_my    文件:TransportCloseIndexAction.java   
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
    final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
    CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
            .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
            .indices(concreteIndices);

    indexStateService.closeIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

        @Override
        public void onResponse(ClusterStateUpdateResponse response) {
            listener.onResponse(new CloseIndexResponse(response.isAcknowledged()));
        }

        @Override
        public void onFailure(Exception t) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
            listener.onFailure(t);
        }
    });
}
项目:elasticsearch_my    文件:TransportUpdateSettingsAction.java   
@Override
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
    final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
    UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
            .indices(concreteIndices)
            .settings(request.settings())
            .setPreserveExisting(request.isPreserveExisting())
            .ackTimeout(request.timeout())
            .masterNodeTimeout(request.masterNodeTimeout());

    updateSettingsService.updateSettings(clusterStateUpdateRequest, new ActionListener<ClusterStateUpdateResponse>() {
        @Override
        public void onResponse(ClusterStateUpdateResponse response) {
            listener.onResponse(new UpdateSettingsResponse(response.isAcknowledged()));
        }

        @Override
        public void onFailure(Exception t) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
            listener.onFailure(t);
        }
    });
}
项目:elasticsearch_my    文件:IndexService.java   
@Override
public final void run() {
    try {
        runInternal();
    } catch (Exception ex) {
        if (lastThrownException == null || sameException(lastThrownException, ex) == false) {
            // prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs
            indexService.logger.warn(
                (Supplier<?>) () -> new ParameterizedMessage(
                    "failed to run task {} - suppressing re-occurring exceptions unless the exception changes",
                    toString()),
                ex);
            lastThrownException = ex;
        }
    } finally {
        onTaskCompletion();
    }
}
项目:elasticsearch_my    文件:SnapshotsService.java   
private void cleanupAfterError(Exception exception) {
    if(snapshotCreated) {
        try {
            repositoriesService.repository(snapshot.snapshot().getRepository())
                               .finalizeSnapshot(snapshot.snapshot().getSnapshotId(),
                                                 snapshot.indices(),
                                                 snapshot.startTime(),
                                                 ExceptionsHelper.detailedMessage(exception),
                                                 0,
                                                 Collections.emptyList(),
                                                 snapshot.getRepositoryStateId());
        } catch (Exception inner) {
            inner.addSuppressed(exception);
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
        }
    }
    userCreateSnapshotListener.onFailure(e);
}
项目:elasticsearch_my    文件:TransportClientNodesService.java   
/**
 * validates a set of potentially newly discovered nodes and returns an immutable
 * list of the nodes that has passed.
 */
protected List<DiscoveryNode> validateNewNodes(Set<DiscoveryNode> nodes) {
    for (Iterator<DiscoveryNode> it = nodes.iterator(); it.hasNext(); ) {
        DiscoveryNode node = it.next();
        if (!transportService.nodeConnected(node)) {
            try {
                logger.trace("connecting to node [{}]", node);
                transportService.connectToNode(node);
            } catch (Exception e) {
                it.remove();
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
            }
        }
    }

    return Collections.unmodifiableList(new ArrayList<>(nodes));
}
项目:elasticsearch_my    文件:BytesRestResponse.java   
private static XContentBuilder build(RestChannel channel, RestStatus status, Exception e) throws IOException {
    ToXContent.Params params = channel.request();
    if (params.paramAsBoolean("error_trace", !REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) {
        params =  new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params);
    } else if (e != null) {
        Supplier<?> messageSupplier = () -> new ParameterizedMessage("path: {}, params: {}",
                channel.request().rawPath(), channel.request().params());

        if (status.getStatus() < 500) {
            SUPPRESSED_ERROR_LOGGER.debug(messageSupplier, e);
        } else {
            SUPPRESSED_ERROR_LOGGER.warn(messageSupplier, e);
        }
    }

    XContentBuilder builder = channel.newErrorBuilder().startObject();
    ElasticsearchException.generateFailureXContent(builder, params, e, channel.detailedErrorsEnabled());
    builder.field(STATUS, status.getStatus());
    builder.endObject();
    return builder;
}
项目:elasticsearch_my    文件:TcpTransport.java   
@Override
protected void doRunInLifecycle() throws Exception {
    for (Map.Entry<DiscoveryNode, NodeChannels> entry : connectedNodes.entrySet()) {
        DiscoveryNode node = entry.getKey();
        NodeChannels channels = entry.getValue();
        for (Channel channel : channels.getChannels()) {
            try {
                sendMessage(channel, pingHeader, successfulPings::inc);
            } catch (Exception e) {
                if (isOpen(channel)) {
                    logger.debug(
                        (Supplier<?>) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
                    failedPings.inc();
                } else {
                    logger.trace(
                        (Supplier<?>) () -> new ParameterizedMessage(
                            "[{}] failed to send ping transport message (channel closed)", node), e);
                }
            }
        }
    }
}
项目:elasticsearch_my    文件:TransportDeleteIndexAction.java   
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
    final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request)));
    if (concreteIndices.isEmpty()) {
        listener.onResponse(new DeleteIndexResponse(true));
        return;
    }

    DeleteIndexClusterStateUpdateRequest deleteRequest = new DeleteIndexClusterStateUpdateRequest()
        .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
        .indices(concreteIndices.toArray(new Index[concreteIndices.size()]));

    deleteIndexService.deleteIndices(deleteRequest, new ActionListener<ClusterStateUpdateResponse>() {

        @Override
        public void onResponse(ClusterStateUpdateResponse response) {
            listener.onResponse(new DeleteIndexResponse(response.isAcknowledged()));
        }

        @Override
        public void onFailure(Exception t) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
            listener.onFailure(t);
        }
    });
}
项目:elasticsearch_my    文件:TribeService.java   
public void startNodes() {
    for (Node node : nodes) {
        try {
            getClusterService(node).addListener(new TribeClusterStateListener(node));
            node.start();
        } catch (Exception e) {
            // calling close is safe for non started nodes, we can just iterate over all
            for (Node otherNode : nodes) {
                try {
                    otherNode.close();
                } catch (Exception inner) {
                    inner.addSuppressed(e);
                    logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to close node {} on failed start", otherNode), inner);
                }
            }
            if (e instanceof RuntimeException) {
                throw (RuntimeException) e;
            }
            throw new ElasticsearchException(e);
        }
    }
}
项目:elasticsearch_my    文件:ClusterService.java   
public ClusterService(Settings settings,
                      ClusterSettings clusterSettings, ThreadPool threadPool, Supplier<DiscoveryNode> localNodeSupplier) {
    super(settings);
    this.localNodeSupplier = localNodeSupplier;
    this.operationRouting = new OperationRouting(settings, clusterSettings);
    this.threadPool = threadPool;
    this.clusterSettings = clusterSettings;
    this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
    // will be replaced on doStart.
    this.state = new AtomicReference<>(ClusterState.builder(clusterName).build());

    this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
            this::setSlowTaskLoggingThreshold);

    this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);

    localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);

    initialBlocks = ClusterBlocks.builder();
}
项目:elasticsearch_my    文件:ClusterService.java   
@Override
public void onNodeAck(DiscoveryNode node, @Nullable Exception e) {
    if (!ackedTaskListener.mustAck(node)) {
        //we always wait for the master ack anyway
        if (!node.equals(nodes.getMasterNode())) {
            return;
        }
    }
    if (e == null) {
        logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
    } else {
        this.lastFailure = e;
        logger.debug(
            (Supplier<?>) () -> new ParameterizedMessage(
                "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
            e);
    }

    if (countDown.countDown()) {
        logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
        FutureUtils.cancel(ackTimeoutCallback);
        ackedTaskListener.onAllNodesAcked(lastFailure);
    }
}
项目:elasticsearch_my    文件:AbstractScopedSettings.java   
/**
 * Validates the given settings by running it through all update listeners without applying it. This
 * method will not change any settings but will fail if any of the settings can't be applied.
 */
public synchronized Settings validateUpdate(Settings settings) {
    final Settings current = Settings.builder().put(this.settings).put(settings).build();
    final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
    List<RuntimeException> exceptions = new ArrayList<>();
    for (SettingUpdater<?> settingUpdater : settingUpdaters) {
        try {
            // ensure running this through the updater / dynamic validator
            // don't check if the value has changed we wanna test this anyways
            settingUpdater.getValue(current, previous);
        } catch (RuntimeException ex) {
            exceptions.add(ex);
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
        }
    }
    // here we are exhaustive and record all settings that failed.
    ExceptionsHelper.rethrowAndSuppress(exceptions);
    return current;
}
项目:elasticsearch_my    文件:IndexFolderUpgrader.java   
/**
 * Moves the index folder found in <code>source</code> to <code>target</code>
 */
void upgrade(final Index index, final Path source, final Path target) throws IOException {
    boolean success = false;
    try {
        Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
        success = true;
    } catch (NoSuchFileException | FileNotFoundException exception) {
        // thrown when the source is non-existent because the folder was renamed
        // by another node (shared FS) after we checked if the target exists
        logger.error((Supplier<?>) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " +
            "upgrading with single node", target), exception);
        throw exception;
    } finally {
        if (success) {
            logger.info("{} moved from [{}] to [{}]", index, source, target);
            logger.trace("{} syncing directory [{}]", index, target);
            IOUtils.fsync(target, true);
        }
    }
}
项目:elasticsearch_my    文件:AzureStorageServiceImpl.java   
@Override
public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException {
    try {
        CloudBlobClient client = this.getSelectedClient(account, mode);
        CloudBlobContainer blobContainer = client.getContainerReference(container);
        logger.trace("creating container [{}]", container);
        SocketAccess.doPrivilegedException(blobContainer::createIfNotExists);
    } catch (IllegalArgumentException e) {
        logger.trace((Supplier<?>) () -> new ParameterizedMessage("fails creating container [{}]", container), e);
        throw new RepositoryException(container, e.getMessage(), e);
    }
}
项目:elasticsearch_my    文件:TransportShardMultiGetAction.java   
@Override
protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) {
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    IndexShard indexShard = indexService.getShard(shardId.id());

    if (request.refresh() && !request.realtime()) {
        indexShard.refresh("refresh_flag_mget");
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
        MultiGetRequest.Item item = request.items.get(i);
        try {
            GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(),
                item.versionType(), item.fetchSourceContext());
            response.add(request.locations.get(i), new GetResponse(getResult));
        } catch (Exception e) {
            if (TransportActions.isShardNotAvailableException(e)) {
                throw (ElasticsearchException) e;
            } else {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
                    item.type(), item.id()), e);
                response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
            }
        }
    }

    return response;
}
项目:elasticsearch_my    文件:MockTaskManager.java   
@Override
public void waitForTaskCompletion(Task task, long untilInNanos) {
    for (MockTaskManagerListener listener : listeners) {
        try {
            listener.waitForTaskCompletion(task);
        } catch (Exception e) {
            logger.warn(
                (Supplier<?>) () -> new ParameterizedMessage(
                    "failed to notify task manager listener about waitForTaskCompletion the task with id {}",
                    task.getId()),
                e);
        }
    }
    super.waitForTaskCompletion(task, untilInNanos);
}
项目:elasticsearch_my    文件:StatsIT.java   
private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
    ShardSearchFailure[] failures = response.getShardFailures();
    if (failures.length != expectedFailures) {
        for (ShardSearchFailure failure : failures) {
            logger.error((Supplier<?>) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause());
        }
        fail("Unexpected shard failures!");
    }
    assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
}
项目:elasticsearch_my    文件:FetchSearchPhase.java   
private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget,
                          final CountedCollector<FetchSearchResult> counter,
                          final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult,
                          final Transport.Connection connection) {
    context.getSearchTransport().sendExecuteFetch(connection, fetchSearchRequest, context.getTask(),
        new ActionListener<FetchSearchResult>() {
            @Override
            public void onResponse(FetchSearchResult result) {
                counter.onResult(shardIndex, result, shardTarget);
            }

            @Override
            public void onFailure(Exception e) {
                try {
                    if (logger.isDebugEnabled()) {
                        logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase",
                            fetchSearchRequest.id()), e);
                    }
                    counter.onFailure(shardIndex, shardTarget, e);
                } finally {
                    // the search context might not be cleared on the node where the fetch was executed for example
                    // because the action was rejected by the thread pool. in this case we need to send a dedicated
                    // request to clear the search context.
                    releaseIrrelevantSearchContext(querySearchResult);
                }
            }
        });
}
项目:elasticsearch_my    文件:IndexingMemoryController.java   
/** ask this shard to refresh, in the background, to free up heap */
protected void writeIndexingBufferAsync(IndexShard shard) {
    threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() {
        @Override
        public void doRun() {
            shard.writeIndexingBuffer();
        }

        @Override
        public void onFailure(Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
        }
    });
}
项目:elasticsearch_my    文件:IndexingMemoryController.java   
/**
 * ask this shard to check now whether it is inactive, and reduces its indexing buffer if so.
 */
protected void checkIdle(IndexShard shard, long inactiveTimeNS) {
    try {
        shard.checkIdle(inactiveTimeNS);
    } catch (AlreadyClosedException e) {
        logger.trace((Supplier<?>) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
    }
}
项目:elasticsearch_my    文件:RecoveryTarget.java   
@Override
protected void closeInternal() {
    try {
        // clean open index outputs
        Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
        while (iterator.hasNext()) {
            Map.Entry<String, IndexOutput> entry = iterator.next();
            logger.trace("closing IndexOutput file [{}]", entry.getValue());
            try {
                entry.getValue().close();
            } catch (Exception e) {
                logger.debug(
                    (Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
            }
            iterator.remove();
        }
        // trash temporary files
        for (String file : tempFileNames.keySet()) {
            logger.trace("cleaning temporary file [{}]", file);
            store.deleteQuiet(file);
        }
    } finally {
        // free store. increment happens in constructor
        store.decRef();
        indexShard.recoveryStats().decCurrentAsTarget();
        closedLatch.countDown();
    }
}
项目:elasticsearch_my    文件:IndicesStore.java   
@Override
public void handleException(TransportException exp) {
    logger.debug((Supplier<?>) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp);
    if (awaitingResponses.decrementAndGet() == 0) {
        allNodesResponded();
    }
}
项目:elasticsearch_my    文件:TaskManager.java   
/**
 * Stores the task result
 */
public <Response extends ActionResponse> void storeResult(Task task, Response response, ActionListener<Response> listener) {
    DiscoveryNode localNode = lastDiscoveryNodes.getLocalNode();
    if (localNode == null) {
        // too early to store anything, shouldn't really be here - just pass the response along
        logger.warn("couldn't store response {}, the node didn't join the cluster yet", response);
        listener.onResponse(response);
        return;
    }
    final TaskResult taskResult;
    try {
        taskResult = task.result(localNode, response);
    } catch (IOException ex) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), ex);
        listener.onFailure(ex);
        return;
    }

    taskResultsService.storeResult(taskResult, new ActionListener<Void>() {
        @Override
        public void onResponse(Void aVoid) {
            listener.onResponse(response);
        }

        @Override
        public void onFailure(Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), e);
            listener.onFailure(e);
        }
    });
}
项目:elasticsearch_my    文件:TaskResultsService.java   
public String taskResultIndexMapping() {
    try (InputStream is = getClass().getResourceAsStream(TASK_RESULT_INDEX_MAPPING_FILE)) {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        Streams.copy(is, out);
        return out.toString(IOUtils.UTF_8);
    } catch (Exception e) {
        logger.error(
            (Supplier<?>) () -> new ParameterizedMessage(
                "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
        throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e);
    }

}
项目:elasticsearch_my    文件:NodeJoinController.java   
@Override
public void onFailure(String source, Exception e) {
    for (MembershipAction.JoinCallback callback : callbacks) {
        try {
            callback.onFailure(e);
        } catch (Exception inner) {
            logger.error((Supplier<?>) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner);
        }
    }
}