public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, @Nullable Location location, @Nullable Exception operationFailure, IndexShard primary, Logger logger) { super(request, finalResponse, operationFailure); this.location = location; assert location == null || operationFailure == null : "expected either failure to be null or translog location to be null, " + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; if (operationFailure != null) { this.finishedAsyncActions = true; } else { /* * We call this before replication because this might wait for a refresh and that can take a while. * This way we wait for the refresh in parallel on the primary and on the replica. */ new AsyncAfterWriteAction(primary, request, location, this, logger).run(); } }
public static void handleDiscord4JException(@NotNull Logger logger, @NotNull Exception e, @NotNull ICommand commandHandler, @NotNull IMessage message) { try { logger.error("{} failed to handle command, a {} was captured", () -> commandHandler.getClass().getSimpleName(), () -> e.getClass().getSimpleName()); logger.error(e); if (e instanceof MissingPermissionsException) { messageDeletionService().schedule(getMessageBuilder(message) .appendContent(message.getAuthor().mention()) .appendContent(" I dont have the necessary permissions to execute that action,") .appendContent(" please give me the following permissions and try again") .appendContent(System.lineSeparator()) .appendContent(((MissingPermissionsException) e).getMissingPermissions().toString()) .send()); } } catch (@NotNull RateLimitException | DiscordException | MissingPermissionsException e1) { logger.error(e1); } }
/** * In the case we follow an elected master the new cluster state needs to have the same elected master and * the new cluster state version needs to be equal or higher than our cluster state version. * If the first condition fails we reject the cluster state and throw an error. * If the second condition fails we ignore the cluster state. */ public static boolean shouldIgnoreOrRejectNewClusterState(Logger logger, ClusterState currentState, ClusterState newClusterState) { validateStateIsFromCurrentMaster(logger, currentState.nodes(), newClusterState); // reject cluster states that are not new from the same master if (currentState.supersedes(newClusterState) || (newClusterState.nodes().getMasterNodeId().equals(currentState.nodes().getMasterNodeId()) && currentState.version() == newClusterState.version())) { // if the new state has a smaller version, and it has the same master node, then no need to process it logger.debug("received a cluster state that is not newer than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); return true; } // reject older cluster states if we are following a master if (currentState.nodes().getMasterNodeId() != null && newClusterState.version() < currentState.version()) { logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); return true; } return false; }
static List<ZenPing.PingResponse> filterPingResponses(List<ZenPing.PingResponse> fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) { List<ZenPing.PingResponse> pingResponses; if (masterElectionIgnoreNonMasters) { pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); } else { pingResponses = fullPingResponses; } if (logger.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); if (pingResponses.isEmpty()) { sb.append(" {none}"); } else { for (ZenPing.PingResponse pingResponse : pingResponses) { sb.append("\n\t--> ").append(pingResponse); } } logger.debug("filtered ping responses: (ignore_non_masters [{}]){}", masterElectionIgnoreNonMasters, sb); } return pingResponses; }
/** * Registers domains to resource manager from resource listeners. * @param novousLogger */ private static void handleResourceManager(Logger novousLogger) { novousLogger.info("Registering resource manager test..."); ResourceLinker.REGISTERED_RESOURCE_LINKERS.add(new TestResourceLinker()); novousLogger.info("Registering abstract resource linked manager..."); PairedRegistry<String, FallbackResourceManager> resourceManagerRegistry = (PairedRegistry<String, FallbackResourceManager>) Minecraft .getMinecraft().getResourceManager(); for (ResourceLinker linker : ResourceLinker.REGISTERED_RESOURCE_LINKERS) { linker.getLinkedDomains().forEach((domain) -> resourceManagerRegistry.register (domain, new LinkedResourceManager())); } novousLogger.info("Testing resource linked manager..."); TestResourceLinker.checkTestSuccess(); }
public static <V> V func_181617_a(FutureTask<V> p_181617_0_, Logger p_181617_1_) { try { p_181617_0_.run(); return p_181617_0_.get(); } catch (ExecutionException executionexception) { p_181617_1_.fatal((String)"Error executing task", (Throwable)executionexception); } catch (InterruptedException interruptedexception) { p_181617_1_.fatal((String)"Error executing task", (Throwable)interruptedexception); } return (V)null; }
@Nullable public static <V> V runTask(FutureTask<V> task, Logger logger) { try { task.run(); return task.get(); } catch (ExecutionException executionexception) { logger.fatal((String)"Error executing task", (Throwable)executionexception); } catch (InterruptedException interruptedexception) { logger.fatal((String)"Error executing task", (Throwable)interruptedexception); } return (V)null; }
@Test public void test03() { // Logger logger = LogManager.getFormatterLogger(); Logger logger = LogManager.getLogger(); String name = "李志伟"; Date birthday = new Date(); logger.debug("用户名称:[{}], 日期:[{}]", name, birthday); logger.info("用户名称:[{}], 日期:[{}]", name, birthday); logger.warn("用户名称:[{}], 日期:[{}]", name, birthday); logger.error("用户名称:[{}], 日期:[{}]", name, birthday); logger.fatal("用户名称:[{}], 日期:[{}]", name, birthday); logger.error("异常信息提示", new RuntimeException("异常信息")); LogManager.shutdown(); }
static void logGcOverhead( final Logger logger, final JvmMonitor.Threshold threshold, final long current, final long elapsed, final long seq) { switch (threshold) { case WARN: if (logger.isWarnEnabled()) { logger.warn(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed)); } break; case INFO: if (logger.isInfoEnabled()) { logger.info(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed)); } break; case DEBUG: if (logger.isDebugEnabled()) { logger.debug(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed)); } break; } }
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, Function<String, Transport.Connection> nodeIdToConnection, Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts, Executor executor, SearchRequest request, ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task, SearchPhaseResults<Result> resultConsumer) { super(name, request, shardsIts, logger); this.startTime = startTime; this.logger = logger; this.searchTransportService = searchTransportService; this.executor = executor; this.request = request; this.task = task; this.listener = listener; this.nodeIdToConnection = nodeIdToConnection; this.clusterStateVersion = clusterStateVersion; this.concreteIndexBoosts = concreteIndexBoosts; this.aliasFilter = aliasFilter; this.results = resultConsumer; }
@Override protected PerformanceLogger createPerformanceLogger() throws IOException { final Logger logger = createLogger(); final PerformanceLogger perfLogger = new PerformanceLogger() { @Override public void log(String message) { logger.info(message); } @Override public boolean isEnabled() { return logger.isEnabled(Level.INFO); } }; return perfLogger; }
@Test public void test() { { Logger logger = LogManager.getLogger("test.logger"); logger.error("This is an error!"); logger.error("This is another error!"); logger.error("This is a third error!"); } assertThat(collector.getLogs()).hasSize(3) .contains("This is an error!", "This is another error!", "This is a third error!"); List<LogEvent> rawLogs = (List<LogEvent>) collector.getRawLogs(); assertThat(rawLogs).hasSize(3); assertTrue(rawLogs.stream().allMatch(l -> l.getLevel() == Level.ERROR)); }
/** * logs the weight of failed members wrt the given previous view */ public void logCrashedMemberWeights(NetView oldView, Logger log) { InternalDistributedMember lead = oldView.getLeadMember(); for (InternalDistributedMember mbr : this.crashedMembers) { if (!oldView.contains(mbr)) { continue; } int mbrWeight = mbr.getNetMember().getMemberWeight(); switch (mbr.getVmKind()) { case DistributionManager.NORMAL_DM_TYPE: if (lead != null && mbr.equals(lead)) { mbrWeight += 15; } else { mbrWeight += 10; } break; case DistributionManager.LOCATOR_DM_TYPE: mbrWeight += 3; break; case DistributionManager.ADMIN_ONLY_DM_TYPE: break; default: throw new IllegalStateException("Unknown member type: " + mbr.getVmKind()); } log.info(" " + mbr + " had a weight of " + mbrWeight); } }
public static void debug(Class<?> clazz, String msg, Object... args) { final Logger logger = LogManager.getLogger(clazz); if (logger.isDebugEnabled()) { logger.debug(msg, args); } }
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, Function<String, Transport.Connection> nodeIdToConnection, Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size())); this.searchPhaseController = searchPhaseController; }
public void testDefaults() throws IOException, UserException { final Path configDir = getDataPath("config"); final String level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR).toString(); final Settings settings = Settings.builder() .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("logger.level", level) .build(); final Environment environment = new Environment(settings); LogConfigurator.configure(environment); final String loggerName = "test"; final Logger logger = ESLoggerFactory.getLogger(loggerName); assertThat(logger.getLevel().toString(), equalTo(level)); }
public void testResolveOrder() throws Exception { final Path configDir = getDataPath("config"); final Settings settings = Settings.builder() .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("logger.test_resolve_order", "TRACE") .build(); final Environment environment = new Environment(settings); LogConfigurator.configure(environment); // args should overwrite whatever is in the config final String loggerName = "test_resolve_order"; final Logger logger = ESLoggerFactory.getLogger(loggerName); assertTrue(logger.isTraceEnabled()); }
public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); Loggers.addAppender(settingsLogger, mockAppender); Loggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), true); builder.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "false"); settings.updateIndexMetaData(newIndexMeta("index", builder.build())); // Make sure we log the change: assertTrue(mockAppender.sawUpdateAutoThrottle); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false); } finally { Loggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); Loggers.setLevel(settingsLogger, (Level) null); } }
public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener, boolean forceExecution) { this.logger = logger; this.threadPool = threadPool; this.executor = executor; this.listener = listener; this.forceExecution = forceExecution; }
public static boolean isUpgraded(Client client, String index) throws Exception { Logger logger = Loggers.getLogger(OldIndexUtils.class); int toUpgrade = 0; for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes()); toUpgrade += status.getToUpgradeBytes(); } return toUpgrade == 0; }
@Test public void testLogging() { Slab chunk = new SlabImpl(32); Slab chunk2 = new SlabImpl(1024 * 1024 * 5); this.freeListManager = createFreeListManager(ma, new Slab[] {chunk, chunk2}); OffHeapStoredObject c = this.freeListManager.allocate(24); OffHeapStoredObject c2 = this.freeListManager.allocate(1024 * 1024); OffHeapStoredObject.release(c.getAddress(), this.freeListManager); OffHeapStoredObject.release(c2.getAddress(), this.freeListManager); Logger lw = mock(Logger.class); this.freeListManager.logOffHeapState(lw, 1024); }
AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher, ShardId shardId, Logger logger) { super(wrappedSearcher.source(), indexSearcher); // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher // with a wrapped reader. this.wrappedSearcher = wrappedSearcher; this.logger = logger; this.shardId = shardId; initialRefCount = wrappedSearcher.reader().getRefCount(); assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed"; }
@Override public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } }
private static void init() { LoggerContext context = ((org.apache.logging.log4j.core.Logger) LogManager .getLogger(BASE_LOGGER_NAME, GemFireParameterizedMessageFactory.INSTANCE)).getContext(); context.removePropertyChangeListener(propertyChangeListener); context.addPropertyChangeListener(propertyChangeListener); context.reconfigure(); // propertyChangeListener invokes configureFastLoggerDelegating configureLoggers(false, false); }
IndexFieldCache(Logger logger,final Cache<Key, Accountable> cache, Index index, String fieldName, Listener... listeners) { this.logger = logger; this.listeners = listeners; this.index = index; this.fieldName = fieldName; this.cache = cache; }
BlockingTarget(RecoveryState.Stage stageToBlock, CountDownLatch recoveryBlocked, CountDownLatch releaseRecovery, IndexShard shard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, Logger logger) { super(shard, sourceNode, listener, version -> {}); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; this.stageToBlock = stageToBlock; this.logger = logger; if (SUPPORTED_STAGES.contains(stageToBlock) == false) { throw new UnsupportedOperationException(stageToBlock + " is not supported"); } }
/** * Checks the current classpath for duplicate classes * @throws IllegalStateException if jar hell was found */ public static void checkJarHell() throws IOException, URISyntaxException { ClassLoader loader = JarHell.class.getClassLoader(); Logger logger = Loggers.getLogger(JarHell.class); if (logger.isDebugEnabled()) { logger.debug("java.class.path: {}", System.getProperty("java.class.path")); logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path")); if (loader instanceof URLClassLoader ) { logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs())); } } checkJarHell(parseClassPath()); }
/** * In the case we follow an elected master the new cluster state needs to have the same elected master * This method checks for this and throws an exception if needed */ public static void validateStateIsFromCurrentMaster(Logger logger, DiscoveryNodes currentNodes, ClusterState newClusterState) { if (currentNodes.getMasterNodeId() == null) { return; } if (!currentNodes.getMasterNodeId().equals(newClusterState.nodes().getMasterNodeId())) { logger.warn("received a cluster state from a different master than the current one, rejecting (received {}, current {})", newClusterState.nodes().getMasterNode(), currentNodes.getMasterNode()); throw new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + newClusterState.nodes().getMasterNode() + ", current " + currentNodes.getMasterNode() + ")"); } }
/** * A wrapper around a Consumer that throws a checked exception. * * @param unsafeBiConsumer Something that acts like a consumer but may throw an exception * @param logger Logger that used to log if an exception thrown * @return A consumer that is wrapped in a try-catch converting the checked exception into a runtime exception */ public static <T, U> BiConsumer<T, U> toBiConsumer(UnsafeBiConsumer<T, U> unsafeBiConsumer, Logger logger) { return (t, u) -> { try { unsafeBiConsumer.accept(t, u); } catch (Exception e) { throw logger.throwing(new RuntimeException(e)); } }; }
@Test public void gemfireVerboseShouldLogIfGemfireVerboseIsAccept() { configureLogging(this.configFileGemfireVerboseAccept); Logger logger = LogService.getLogger(); String msg = this.testName.getMethodName(); logger.info(LogMarker.GEMFIRE_VERBOSE, msg); assertThat(this.systemOutRule.getLog()).contains(msg); }
public WriteReplicaResult(ReplicaRequest request, @Nullable Location location, @Nullable Exception operationFailure, IndexShard replica, Logger logger) { super(operationFailure); this.location = location; if (operationFailure != null) { this.finishedAsyncActions = true; } else { new AsyncAfterWriteAction(replica, request, location, this, logger).run(); } }
/** * A wrapper around a Consumer that throws a checked exception. * * @param unsafeConsumer Something that acts like a consumer but may throw an exception * @param logger Logger that used to log if an exception thrown * @return A consumer that is wrapped in a try-catch converting the checked exception into a runtime exception */ public static <T> Consumer<T> toConsumer(UnsafeConsumer<T> unsafeConsumer, Logger logger) { return t -> { try { unsafeConsumer.accept(t); } catch (Exception e) { throw logger.throwing(new RuntimeException(e)); } }; }
private void logFragmentState(Logger lw) { for (Fragment f : this.fragmentList) { int freeSpace = f.freeSpace(); if (freeSpace > 0) { lw.info("Fragment at " + f.getAddress() + " of size " + f.getSize() + " has " + freeSpace + " bytes free."); } } }
public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) { // check for explicit version on the specific analyzer component String sVersion = settings.get("version"); if (sVersion != null) { return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // check for explicit version on the index itself as default for all analysis components sVersion = indexSettings.get("index.analysis.version"); if (sVersion != null) { return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // resolve the analysis version based on the version the index was created with return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion; }
public Log4j2Impl(String clazz) { Logger logger = LogManager.getLogger(clazz); if (logger instanceof AbstractLogger) { log = new Log4j2AbstractLoggerImpl((AbstractLogger) logger); } else { log = new Log4j2LoggerImpl(logger); } }
/** * Tries to open an index for the given location. This includes reading the * segment infos and possible corruption markers. If the index can not * be opened, an exception is thrown */ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException { try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); SegmentInfos segInfo = Lucene.readSegmentInfos(dir); logger.trace("{} loaded segment info [{}]", shardId, segInfo); } }
MetadataSnapshot(IndexCommit commit, Directory directory, Logger logger) throws IOException { LoadedMetadata loadedMetadata = loadMetadata(commit, directory, logger); metadata = loadedMetadata.fileMetadata; commitUserData = loadedMetadata.userData; numDocs = loadedMetadata.numDocs; assert metadata.isEmpty() || numSegmentFiles() == 1 : "numSegmentFiles: " + numSegmentFiles(); }
@Test public void test() { { Logger logger = LogManager.getLogger("test.logger"); logger.error("This is an error!"); logger.error("This is another error!"); } assertThat(collector.getLogs()) .hasSize(2) .contains("This is an error!", "This is another error!"); }
public void dump(Logger log, Level level) { log.log(level, "Project Slug: {}", projectSlug == null ? "n/a" : projectSlug); log.log(level, "Game Version: {}", gameVersion == null ? "n/a" : gameVersion); log.log(level, "Project Version: {}", projectVersion == null ? "n/a" : projectVersion); log.log(level, "Output Directory: {}", output == null ? "n/a" : output.getAbsolutePath()); log.log(level, "Temporary Files Directory: {}", tmpDirectory == null ? "n/a" : tmpDirectory.getAbsolutePath()); log.log(level, "Installation mode: {}", mode == null ? "n/a" : mode.name().toLowerCase()); log.log(level, "Server Mode: {}", server); }