@GET @Path("startBillingRun") @Produces("text/plain") public String startBillingRun() { try { for (Invoice i : repository.findAll()) { MDC.put(INVOICE_ID, Long.toString(i.getId())); try { i.calculateTotal(); } finally { MDC.remove(INVOICE_ID); } /////////////////////////////// try (final CloseableThreadContext.Instance c = CloseableThreadContext.put(INVOICE_ID, String.valueOf(i.getId()))) { i.calculateTotal(); } } return "OK"; } catch (Exception e) { return "Error occured, please look at log"; } }
@Override protected void doClose() throws IOException, IllegalStateException { try( final Instance logCtx = CloseableThreadContext .put(KEY_TEST_STEP_ID, stepId) .put(KEY_CLASS_NAME, StorageDriverBase.class.getSimpleName()) ) { ioTasksDispatchCoroutine.close(); itemDataInput.close(); childTasksQueue.clear(); inTasksQueue.clear(); final int ioResultsQueueSize = ioResultsQueue.size(); if(ioResultsQueueSize > 0) { Loggers.ERR.warn( "{}: I/O results queue contains {} unhandled elements", toString(), ioResultsQueueSize ); } ioResultsQueue.clear(); authTokens.clear(); pathToCredMap.clear(); pathMap.clear(); Loggers.MSG.debug("{}: closed", toString()); } }
@Override public final void channelCreated(final Channel channel) throws Exception { try( final Instance ctx = CloseableThreadContext .put(KEY_TEST_STEP_ID, stepId) .put(KEY_CLASS_NAME, CLS_NAME) ) { final ChannelPipeline pipeline = channel.pipeline(); appendHandlers(pipeline); if(Loggers.MSG.isTraceEnabled()) { Loggers.MSG.trace( "{}: new channel pipeline configured: {}", stepId, pipeline.toString() ); } } }
@Override protected void doClose() throws IOException { super.doClose(); for(final Coroutine ioCoroutine : ioCoroutines) { ioCoroutine.close(); } for(int i = 0; i < ioWorkerCount; i ++) { try(final Instance logCtx = CloseableThreadContext.put(KEY_CLASS_NAME, CLS_NAME)) { if(ioTaskBuffs[i].tryLock(Coroutine.TIMEOUT_NANOS, TimeUnit.NANOSECONDS)) { ioTaskBuffs[i].clear(); } else if(ioTaskBuffs[i].size() > 0){ Loggers.ERR.debug( "Failed to obtain the I/O tasks buff lock in time, thread dump:\n{}", new ThreadDump().toString() ); } } catch(final InterruptedException e) { LogUtil.exception( Level.WARN, e, "Unexpected failure, I/O tasks buff remains uncleared" ); } ioTaskBuffs[i] = null; } ioCoroutines.clear(); }
public static void register(final LoadController controller, final MetricsContext metricsCtx) throws InterruptedException { if(INSTANCE.allMetricsLock.tryLock(1, TimeUnit.SECONDS)) { try( final Instance stepIdCtx = CloseableThreadContext .put(KEY_TEST_STEP_ID, metricsCtx.getStepId()) ) { final Map<MetricsContext, Closeable> controllerMetrics = INSTANCE.allMetrics.computeIfAbsent( controller, c -> new HashMap<>() ); controllerMetrics.put(metricsCtx, new Meter(metricsCtx)); Loggers.MSG.debug("Metrics context \"{}\" registered", metricsCtx); } catch(final MalformedObjectNameException e) { LogUtil.exception( Level.WARN, e, "Failed to register the MBean for the metrics context \"{}\"", metricsCtx.toString() ); } finally { INSTANCE.allMetricsLock.unlock(); } } else { Loggers.ERR.warn( "Locking timeout at register call, thread dump:\n{}", new ThreadDump().toString() ); } }
@Override public final void run() { try(final Instance ctx = CloseableThreadContext.putAll(threadContext)) { super.run(); } catch(final Throwable cause) { LOG.log(Level.SEVERE, "Unhandled thread failure", cause); } }
@Override public void run() { try( final CloseableThreadContext.Instance ctx = CloseableThreadContext .put(KEY_TEST_STEP_ID, localConfig.getTestConfig().getStepConfig().getId()) .put(KEY_CLASS_NAME, getClass().getSimpleName()) ) { invoke(); } }
@Override public final void adjustIoBuffers(final long avgTransferSize, final IoType ioType) { int size; try( final Instance logCtx = CloseableThreadContext .put(KEY_TEST_STEP_ID, stepId) .put(KEY_CLASS_NAME, CLS_NAME) ) { if(avgTransferSize < BUFF_SIZE_MIN) { size = BUFF_SIZE_MIN; } else if(BUFF_SIZE_MAX < avgTransferSize) { size = BUFF_SIZE_MAX; } else { size = (int) avgTransferSize; } if(IoType.CREATE.equals(ioType)) { Loggers.MSG.info( "Adjust output buffer size: {}", SizeInBytes.formatFixedSize(size) ); bootstrap.option(ChannelOption.SO_RCVBUF, BUFF_SIZE_MIN); bootstrap.option(ChannelOption.SO_SNDBUF, size); } else if(IoType.READ.equals(ioType)) { Loggers.MSG.info("Adjust input buffer size: {}", SizeInBytes.formatFixedSize(size)); bootstrap.option(ChannelOption.SO_RCVBUF, size); bootstrap.option(ChannelOption.SO_SNDBUF, BUFF_SIZE_MIN); } else { bootstrap.option(ChannelOption.SO_RCVBUF, BUFF_SIZE_MIN); bootstrap.option(ChannelOption.SO_SNDBUF, BUFF_SIZE_MIN); } } }
@Override @SuppressWarnings("unchecked") public T build() throws OmgShootMyFootException, InterruptedException { try( final Instance ctx = CloseableThreadContext .put(KEY_TEST_STEP_ID, stepName) .put(KEY_CLASS_NAME, BasicStorageDriverBuilder.class.getSimpleName()) ) { final DriverConfig driverConfig = storageConfig.getDriverConfig(); final String driverType = driverConfig.getType(); final boolean verifyFlag = itemConfig.getDataConfig().getVerify(); final ServiceLoader<StorageDriverFactory<I, O, T>> loader = ServiceLoader.load( (Class) StorageDriverFactory.class, Extensions.CLS_LOADER ); for(final StorageDriverFactory<I, O, T> storageDriverFactory : loader) { if(driverType.equals(storageDriverFactory.getName())) { return storageDriverFactory.create( stepName, contentSrc, loadConfig, storageConfig, verifyFlag ); } } Loggers.ERR.fatal( "Failed to create the storage driver for the type \"{}\"", driverType ); return null; } }
@Override public void doFilter(final ServletRequest request, final ServletResponse response, final FilterChain chain) throws IOException, ServletException { final Span currentSpan = (Span) request.getAttribute(TRACE_REQUEST_ATTR); final HttpSession httpSession = ((HttpServletRequest) request).getSession(false); final User user = httpSession == null ? null : (User) httpSession.getAttribute(USER); try (final CloseableThreadContext.Instance ignored = CloseableThreadContext.put("uuid", user == null ? null : user.getLogin() + "-" + currentSpan.getTraceId())) { chain.doFilter(request, response); } }
/** * Log a metric to a logger * * @param metric * the metric to log * @param currentLogger * the logger to use * @param logLevel * the log level to use, if null, the default level of FINE will * be used. * @param printProperties * print properties if set to true. * @param returnValue * The return value to print in the logs. */ public static void logMetricToLogger(OperationMetric metric, Logger currentLogger, Level logLevel, Object returnValue, boolean printProperties) { if (currentLogger == null) { return; } if (metric == null) { return; } if (logLevel == null) { logLevel = DEFAULT_LOG_LEVEL; } if (currentLogger.isEnabled(logLevel)) { // String logMessage = formatMetricLog(metric, returnValue, printProperties); List<String> keyValuePairList = metric.getKeyValuePairList(); String parameters = ""; if( keyValuePairList != null ) { parameters = keyValuePairList.toString(); } CloseableThreadContext.Instance ctx = CloseableThreadContext.put(OperationMetric.FIELD_PARAMETERS, parameters); ctx.put(OperationMetric.FIELD_PARAMETERS, parameters); ctx.put(OperationMetric.FIELD_ID, Long.toString(metric.getIdentifier())); ctx.put(OperationMetric.FIELD_PARENT_ID, Long.toString(metric.getParentIdentifier())); ctx.put(OperationMetric.FIELD_START_TIME, Long.toString(metric.getStartTime())); ctx.put(OperationMetric.FIELD_STOP_TIME, Long.toString(metric.getStopTime())); ctx.put(OperationMetric.FIELD_DURATION, Long.toString(metric.getDuration())); ctx.put(OperationMetric.FIELD_DURATION_MS, Long.toString(metric.getDuration()/1000000)); ctx.put(OperationMetric.FIELD_RESULT_SIZE, Integer.toString(metric.getResultSize())); ctx.put(OperationMetric.FIELD_CACHE_HIT, Boolean.toString(metric.isResultFetchedFromCache())); ctx.put(OperationMetric.FIELD_CACHE_ENABLED, Boolean.toString(metric.isOperationCacheEnabled())); ctx.put(OperationMetric.FIELD_SUCCESSFUL, Boolean.toString(metric.isSuccessful())); Map<String, String> properties = metric.getProperties(); if( properties != null ) { ctx = CloseableThreadContext.putAll(properties); } currentLogger.log(logLevel, metric.getOperationName()); if( ctx != null ) { ctx.close(); } } PerformanceLogger.increase(metric); }
public static void unregister(final LoadController controller, final MetricsContext metricsCtx) throws InterruptedException { if(INSTANCE.allMetricsLock.tryLock(1, TimeUnit.SECONDS)) { try( final Instance stepIdCtx = CloseableThreadContext .put(KEY_TEST_STEP_ID, metricsCtx.getStepId()) ) { final Map<MetricsContext, Closeable> controllerMetrics = INSTANCE.allMetrics.get(controller); if(controllerMetrics != null) { metricsCtx.refreshLastSnapshot(); // last time // check for the metrics threshold state if entered if( metricsCtx.isThresholdStateEntered() && !metricsCtx.isThresholdStateExited() ) { exitMetricsThresholdState(metricsCtx); } // file output if(metricsCtx.getSumPersistFlag()) { Loggers.METRICS_FILE_TOTAL.info(new MetricsCsvLogMessage(metricsCtx)); } if(metricsCtx.getPerfDbResultsFileFlag()) { Loggers.METRICS_EXT_RESULTS_FILE.info( new ExtResultsXmlLogMessage(metricsCtx) ); } // console output Loggers.METRICS_STD_OUT.info( new MetricsAsciiTableLogMessage(Collections.singleton(metricsCtx), true) ); Loggers.METRICS_STD_OUT.info(new BasicMetricsLogMessage(metricsCtx)); final Closeable meterMBean = controllerMetrics.remove(metricsCtx); if(meterMBean != null) { try { meterMBean.close(); } catch(final IOException e) { LogUtil.exception(Level.WARN, e, "Failed to close the meter MBean"); } } } else { Loggers.ERR.debug("Metrics context \"{}\" has not been registered", metricsCtx); } if(controllerMetrics != null && controllerMetrics.size() == 0) { INSTANCE.allMetrics.remove(controller); } } finally { INSTANCE.allMetricsLock.unlock(); Loggers.MSG.debug("Metrics context \"{}\" unregistered", metricsCtx); } } else { Loggers.ERR.warn( "Locking timeout at unregister call, thread dump:\n{}", new ThreadDump().toString() ); } }
protected void appendNewJob(final Map<String, Object> subTree, final Config config) throws ScenarioParseException { try( final CloseableThreadContext.Instance logCtx = CloseableThreadContext.put( KEY_TEST_STEP_ID, config.getTestConfig().getStepConfig().getId() ) ) { Loggers.MSG.debug("Load the subtree to the step \"{}\"", this.toString()); final String jobType = (String) subTree.get(KEY_NODE_TYPE); if(jobType == null) { throw new ScenarioParseException( "No \"" + KEY_NODE_TYPE + "\" element for the job" ); } else { switch(jobType) { case NODE_TYPE_CHAIN: append(new ChainLoadStep(config, subTree)); break; case NODE_TYPE_COMMAND: append(new CommandStep(config, subTree)); break; case NODE_TYPE_FOR: append(new ForStep(config, subTree)); break; case NODE_TYPE_LOAD: append(new LoadStep(config, subTree, false)); break; case NODE_TYPE_PARALLEL: append(new ParallelStep(config, subTree)); break; case NODE_TYPE_PRECONDITION: append(new LoadStep(config, subTree, true)); break; case NODE_TYPE_SEQUENTIAL: append(new SequentialStep(config, subTree)); break; case NODE_TYPE_MIXED: append(new MixedLoadStep(config, subTree)); break; default: throw new ScenarioParseException( "\"" + this.toString() + "\": unexpected job type value: " + jobType ); } } } }
@Override protected final void doInterrupt() throws IllegalStateException { try( final Instance ctx = CloseableThreadContext .put(KEY_TEST_STEP_ID, stepId) .put(KEY_CLASS_NAME, CLS_NAME) ) { try { if(IO_EXECUTOR_LOCK.tryLock(Coroutine.TIMEOUT_NANOS, TimeUnit.NANOSECONDS)) { try { IO_EXECUTOR_REF_COUNT --; Loggers.MSG.debug( "{}: decreased the I/O executor ref count to {}", toString(), IO_EXECUTOR_REF_COUNT ); if(IO_EXECUTOR_REF_COUNT == 0) { Loggers.MSG.info("{}: shutdown the I/O executor", toString()); if( IO_EXECUTOR .shutdownGracefully(0, 1, TimeUnit.MILLISECONDS) .await(10) ) { Loggers.MSG.debug("{}: I/O workers stopped in time", toString()); } else { Loggers.ERR.debug("{}: I/O workers stopping timeout", toString()); } IO_EXECUTOR = null; } } finally { IO_EXECUTOR_LOCK.unlock(); } } else { Loggers.ERR.error( "Failed to obtain the I/O executor lock in time, thread dump:\n{}", new ThreadDump().toString() ); } } catch(final InterruptedException e) { LogUtil.exception(Level.WARN, e, "Graceful I/O workers shutdown was interrupted"); } } }