@RequestMapping("/all-links") public String allLinks(Model model) { Timer timer = metricRegistry.timer("all-links"); Context context = timer.time(); try { List<Link> asdex = linkRepo.findByNameContainingOrderByNameAsc("ASDE-X"); model.addAttribute("asdex", asdex); List<Link> assc = linkRepo.findByNameContainingOrderByNameAsc("ASSC"); model.addAttribute("assc", assc); List<Link> tdls = linkRepo.findByNameContainingOrderByNameAsc("TDLS"); model.addAttribute("tdls", tdls); List<Link> efsts = linkRepo.findByNameContainingOrderByNameAsc("EFSTS"); model.addAttribute("efsts", efsts); List<Link> stars = linkRepo.findByNameContainingOrderByNameAsc("STARS"); model.addAttribute("stars", stars); List<Link> rvr = linkRepo.findByNameContainingOrderByNameAsc("RVR"); model.addAttribute("rvr", rvr); return "all-links"; } finally { context.stop(); } }
@Override public OptionalLong serialOf(UUID l) { try (Context time = seqLookupLatency.time();) { List<Long> res = jdbcTemplate.query(PGConstants.SELECT_SER_BY_ID, new Object[] { "{\"id\":\"" + l + "\"}" }, this::extractSerFromResultSet); if (res.size() > 1) { throw new IllegalStateException("Event ID appeared twice!?"); } else if (res.isEmpty()) { return OptionalLong.empty(); } Long ser = res.get(0); if (ser != null && ser.longValue() > 0) { return OptionalLong.of(ser.longValue()); } else { return OptionalLong.empty(); } } }
@Override public void run() { try (final Context context = metrics.getDownloadTimer().time()) { download(); } catch (Throwable t) { metrics.getServerErrorsMeter().mark(); LOG.error("While handling {}", artifactDownloadRequest.getTargetDirectory(), t); exceptionNotifier.notify(t, ImmutableMap.of("s3Bucket", artifactDownloadRequest.getS3Artifact().getS3Bucket(), "s3Key", artifactDownloadRequest.getS3Artifact().getS3ObjectKey(), "targetDirectory", artifactDownloadRequest.getTargetDirectory())); try { getResponse().sendError(500); } catch (Throwable t2) { LOG.error("While sending error for {}", artifactDownloadRequest.getTargetDirectory(), t2); } } finally { continuation.complete(); } }
/** * Will block until there is space for the query to be submitted */ public Observable<QueryResponse> add(Query<?> query, Keyspace keyspace, boolean keepErrors) { QueryRequest queryRequest = new QueryRequest(query); queryRequest.acquirePermit(); Context context = addTimer.time(); Observable<QueryResponse> observable = new QueriesObservableCollapser(queryRequest, keyspace) .observe() .doOnError((error) -> failureMeter.mark()) .doOnEach(a -> { if (a.getThrowable() != null) { LOG.error("Error while executing statement", a.getThrowable()); } else if (a.isOnNext()) { LOG.trace("Executed {}", a.getValue()); } }) .subscribeOn(scheduler) .doOnTerminate(context::close); return keepErrors ? observable : ignoreErrors(observable); }
@Override protected List<QueryResponse> run() throws GraknClientException { List<Query<?>> queryList = queries.stream().map(QueryRequest::getQuery) .collect(Collectors.toList()); try { return retryer.call(() -> { try (Context c = graqlExecuteTimer.time()) { return graknClient.graqlExecute(queryList, keyspace); } }); } catch (RetryException | ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof GraknClientException) { throw (GraknClientException) cause; } else { throw new RuntimeException("Unexpected exception while retrying, " + queryList.size() + " queries failed.", e); } } finally { queries.forEach(QueryRequest::releasePermit); } }
/** * Apply {@link ai.grakn.concept.Attribute} post processing jobs the concept ids in the provided configuration * * @return True if successful. */ @Override public boolean start() { try (Context context = metricRegistry() .timer(name(PostProcessingTask.class, "execution")).time()) { CommitLog commitLog = getPostProcessingCommitLog(configuration()); commitLog.attributes().forEach((conceptIndex, conceptIds) -> { Context contextSingle = metricRegistry() .timer(name(PostProcessingTask.class, "execution-single")).time(); try { Keyspace keyspace = commitLog.keyspace(); int maxRetry = engineConfiguration().getProperty(GraknConfigKey.LOADER_REPEAT_COMMITS); GraknTxMutators.runMutationWithRetry(factory(), keyspace, maxRetry, (graph) -> postProcessor().mergeDuplicateConcepts(graph, conceptIndex, conceptIds)); } finally { contextSingle.stop(); } }); LOG.debug(JOB_FINISHED, Schema.BaseType.ATTRIBUTE.name(), commitLog.attributes()); return true; } }
public JsonArray mget(String...keys) { try(Context context = mgetTimer.time()) { JsonArray results=array(); try(Jedis resource = jedisPool.getResource()) { byte[][] byteKeys = Arrays.stream(keys).map(key -> key(key)).toArray(size -> new byte[size][]); List<byte[]> redisResults = resource.mget(byteKeys); if(redisResults!=null) { for(byte[] blob:redisResults) { if(blob != null) { // some results will be null results.add(parser.parseObject(new String(CompressionUtils.decompress(blob), utf8))); } } } } catch (JedisException e) { // make sure we can find back jedis related stuff in kibana throw new IllegalStateException("problem connecting to jedis", e); } notFoundMeter.mark(); return results; } }
@Override public JsonObject create(JsonObject o, String parentId, boolean replace) { try(Context time = createTimer.time()) { JsonObject object; if(defaultTransformation != null) { object=defaultTransformation.apply(o); } else { object=o; } JsonObject esResponse = client.createObject(index.writeAlias(), type,object.getString("id"), parentId, object, replace); JsonObject response = object.deepClone(); // make sure the id aligns with the actual id in elasticsearch String id = esResponse.getString("_id"); response.put("id", id); markModifiedInRedis(id, parentId); return response; } }
@Override public JsonObject create(JsonObject o, boolean replace) { try(Context time = createTimer.time()) { JsonObject object; if(defaultTransformation != null) { object = defaultTransformation.apply(o); } else { object=o; } JsonObject esResponse = client.createObject(index.writeAlias(), type,object.getString("id"), null, object, replace); if(esResponse.get("_shards","successful").asInt() != 1) { throw new EsOperationFailedException("Elasticsearch create did not succeeed " + esResponse); } // Elasticsearch does not return us the full object JsonObject response = object.deepClone(); // make sure the id aligns with the actual id in elasticsearch response.put("id", esResponse.getString("_id")); return response; } }
@Override public void onEvent(final TSDBMetricMeta meta, final long sequence, final boolean endOfBatch) throws Exception { final Context ctx = dispatchHandlerTimer.time(); try { if(outQueueTextFormat) { outQueue.acquireAppender().writeDocument(w -> w.write(MessageType.METRICMETA.shortName).marshallable(meta)); } else { outQueue.acquireAppender().writeBytes(meta); } cacheDb.add(meta.getTsuid()); meta.recordTimer(endToEndTimer); } finally { meta.reset(); ctx.stop(); } }
@Test public void testRedisRateLimit() throws InterruptedException { reporter.start(3, TimeUnit.SECONDS); ApplicationContext ac = new ClassPathXmlApplicationContext("root-context.xml"); JedisPool pool = (JedisPool) ac.getBean("jedisPool"); RedisRateLimiter limiter = new RedisRateLimiter(pool, TimeUnit.MINUTES, 300); while (true) { boolean flag = false; Context context = timer.time(); if(limiter.acquire("testMKey1")) { flag = true; } context.stop(); if (flag) { requests.mark(); } Thread.sleep(1); } }
private void runCompaction(File commitLogFile) { if (runCompactions) { compactionExecutor.submit(new Runnable() { @Override public void run() { try { Context time = compactionTime.time(); Compactor.run(storage, directory, commitLogFile, configuration.getBufferSize(), configuration.getMaxDataFileSize(), configuration.getMaxFileGenerations()); openDataFiles(); deleteCommitLogFiles(commitLogFile); compactionCounter.inc(); time.stop(); } catch (Exception e) { logger.error("Could not run compaction.", e); } } }); } }
/** * Return an expected session for the user, falling back to database retrieval * should the session not yet exist in cache. * * @param userName * the user name * @return the user session * @throws GameBootRuntimeException * the game boot runtime exception */ public UserSession expected(String userName) throws GameBootRuntimeException { Optional<Context> ctx = helper.startTimer(CACHED_SESSION_TIMER); try { String noSession = "No session for " + userName; check(NO_USERNAME, isEmpty(userName), "No username specified"); check(NO_USER_SESSION, !activeSessions.hasSession(userName), noSession); List<UserSession> sessions = assist.activeSessions(); Optional<UserSession> o = find(sessions, us -> us.getUser().getUserName().equals(userName)); // may not yet be in the cached list return o.isPresent() ? o.get() : sessionCheck(repository.findOpenSession(userName)); } finally { helper.stopTimer(ctx); } }
/** * Return an expected session for the given id, falling back to database * retrieval should the session not yet exist in cache. * * @param id * the id * @return the user session * @throws GameBootRuntimeException * the game boot runtime exception */ public UserSession expected(Long id) throws GameBootRuntimeException { Optional<Context> ctx = helper.startTimer(CACHED_SESSION_TIMER); try { String noSession = "No session for " + id; check(INVALID_SESSION_ID, id == null, "No session id specified"); check(NO_USER_SESSION, !activeSessions.hasSession(id), noSession); List<UserSession> sessions = assist.activeSessions(); Optional<UserSession> o = find(sessions, us -> us.getId().equals(id)); // may not yet be in the cached list return o.isPresent() ? o.get() : sessionCheck(repository.findOpenSession(id)); } finally { helper.stopTimer(ctx); } }
/** * Will encode the message if decoded, decode the message if encoded. * * @param key * the key * @param message * the message byte array * @return the converted byte array * @throws Exception * the exception */ public byte[] convert(byte[] key, byte[] message) throws Exception { Optional<Context> ctx = helper.startTimer(OTP_CONVERSION); try { check(key, message); byte[] converted = new byte[message.length]; for (int i = 0; i < message.length; i++) { converted[i] = (byte) (message[i] ^ key[i]); } return converted; } finally { helper.stopTimer(ctx); } }
@SuppressWarnings("unchecked") @Override public Map<JMXNodeLocation, ObjectName> fetchNodes(JMXNodeLocation query) throws ProtocolException { try { Set<ObjectName> searchResponse; try (Context timerContext = getDiagnostics().getRequestTimer().time()) { searchResponse = this.mbs.queryNames(query.getObjectName(), null); } Map<JMXNodeLocation, ObjectName> result = new HashMap<>(); for (ObjectName objectName : searchResponse) { JMXNodeLocation location = new JMXNodeLocation(objectName); result.put(location, objectName); } return Collections.unmodifiableMap(result); } catch (Exception e) { getDiagnostics().getErrorRate().mark(1); throw new ProtocolException(e); } }
public LinkRelevance nextURL(boolean asyncLoad) throws FrontierPersistentException, DataNotFoundException { Context timerContext = selectTimer.time(); try { LinkRelevance link = scheduler.nextLink(asyncLoad); if (link == null) { if (scheduler.hasPendingLinks()) { throw new DataNotFoundException(false, "No links available for selection right now."); } else { throw new DataNotFoundException(true, "Frontier run out of links."); } } frontier.delete(link); schedulerLog.printf("%d\t%.5f\t%s\n", System.currentTimeMillis(), link.getRelevance(), link.getURL().toString()); return link; } finally { timerContext.stop(); } }
private void startTimer(final MetricGroup metricGroup, final String infixValue, final Exchange exchange) { Map<String, Context> timerContextMap = getTimerContextMap(exchange); if (timerContextMap != null) { String fullTimerName; if (infixValue != null) { fullTimerName = MetricRegistry.name(this.endpoint.getName(), infixValue, this.endpoint.getTimingName()); } else { fullTimerName = MetricRegistry.name(this.endpoint.getName(), this.endpoint.getTimingName()); } // stop previous context if it exists Context timerContext = timerContextMap.get(fullTimerName); if (timerContext != null) { timerContext.stop(); } // start new context timerContext = metricGroup.getTimer().time(); timerContextMap.put(fullTimerName, timerContext); } else { LOGGER.warn(MARKER, "timerContextMap is null, timing will not be recorded correctly"); } }
/** * @param infixValue * @param exchange */ private void stopTimer(final String infixValue, final Exchange exchange) { Map<String, Context> timerContextMap = getTimerContextMap(exchange); if (timerContextMap != null) { String fullTimerName; if (infixValue != null) { fullTimerName = MetricRegistry.name(this.endpoint.getName(), infixValue, this.endpoint.getTimingName()); } else { fullTimerName = MetricRegistry.name(this.endpoint.getName(), this.endpoint.getTimingName()); } // stop previous context if it exists Context timerContext = timerContextMap.get(fullTimerName); if (timerContext != null) { timerContext.stop(); } } else { LOGGER.warn(MARKER, "timerContextMap is null, timing will not be recorded correctly"); } }
@Test public void testTraversalPerformance() { TinkerGraph t = TinkerGraphFactory.createTinkerGraph(); FramedGraph f = new FramedGraph(t); Timer timer = metrics.timer("gremlin"); Context time = timer.time(); for (int count = 0; count < iterations; count++) { GremlinPipeline g = new GremlinPipeline(t); g.V().both().both().both().toList(); } long nanoseconds = time.stop(); System.out.println("Iterate over all GremlinPipeline " + nanoseconds / 1000000); time = timer.time(); for (int count = 0; count < iterations; count++) { f.V().both().both().both().toList(); } nanoseconds = time.stop(); System.out.println("Iterate over all Totorom " + nanoseconds / 1000000); }
private Observable<Boolean> directPoster(Observable<List<Sample>> samples, MetricRegistry metrics) { final SampleRepository repository = repository(); final Timer timer = metrics.timer("writes"); final Meter completions = metrics.meter("samples-completed"); Func1<List<Sample>, Boolean> insert = new Func1<List<Sample>, Boolean>() { @Override public Boolean call(List<Sample> s) { int sz = s.size(); try (Context timerCtx = timer.time()) { repository.insert(s); return true; } finally { completions.mark(sz); } } }; return (m_threadCount == 1 ? samples.map(insert) : parMap(samples, metrics, insert)).all(Functions.<Boolean>identity()); }
@Override public boolean deallocate(final AllocationRequest request) { if ( ! supports(request) ) return false; try { return lockManager.lock(new AllocateResourceLock(request), new LockCallback<Boolean>() { @Override public Boolean doWithLock() { Context c = deallocateTimer.time(); try { return acquireLockAndDeallocate(request); } finally { c.stop(); } } }); } catch( UnsupportedAllocation e ) { log.info("Unsupported allocation for [{}] : {}", this, e.getMessage()); return false; } }
@Override public void run() { boolean success = false; try (final Context context = metrics.getDownloadTimer().time()) { success = download(); if (!success) { metrics.getServerErrorsMeter().mark(); getResponse().sendError(500, "Hit client timeout"); } } catch (Throwable t) { metrics.getServerErrorsMeter().mark(); LOG.error("While handling {}", artifactDownloadRequest.getTargetDirectory(), t); exceptionNotifier.notify(String.format("Error handling download (%s)", t.getMessage()), t, ImmutableMap.of("s3Bucket", artifactDownloadRequest.getS3Artifact().getS3Bucket(), "s3Key", artifactDownloadRequest.getS3Artifact().getS3ObjectKey(), "targetDirectory", artifactDownloadRequest.getTargetDirectory())); try { getResponse().sendError(500); } catch (Throwable t2) { LOG.error("While sending error for {}", artifactDownloadRequest.getTargetDirectory(), t2); } } finally { continuation.complete(); } }
@Test public void testOpenCloseConnections() throws SQLException { for (int i = 0; i < MAX_ITERATIONS; i++) { Context context = timer.time(); Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeQuery("select * from city"); conn.close(); context.stop(); } logReporter.report(); }
@Test public void testTimerTime() throws Exception { Timer a = registry.timer("B"); Context context = a.time(); context.stop(); assertEquals(a, updatedMetric); }
@RequestMapping("/index") public String index(Model model) { Timer timer = metricRegistry.timer("index"); Context context = timer.time(); try { List<Tracon> tracons = traconRepo.findAllByOrderByNameAsc(); model.addAttribute("tracons", tracons); return "index"; } finally { context.stop(); } }
@RequestMapping("/all-builds") public String allBuilds(Model model) { Timer timer = metricRegistry.timer("all-builds"); Context context = timer.time(); try { List<SiteStatus> statuses = statusRepo.findAllByOrderByTraconAsc(); model.addAttribute("statuses", statuses); return "all-builds"; } finally { context.stop(); } }
@Override @Transactional public void publish(@NonNull List<? extends Fact> factsToPublish) { try (Context time = publishLatency.time();) { List<Fact> copiedListOfFacts = Lists.newArrayList(factsToPublish); final int numberOfFactsToPublish = factsToPublish.size(); log.trace("Inserting {} fact(s) in batches of {}", numberOfFactsToPublish, BATCH_SIZE); jdbcTemplate.batchUpdate(PGConstants.INSERT_FACT, copiedListOfFacts, BATCH_SIZE, ( statement, fact) -> { statement.setString(1, fact.jsonHeader()); statement.setString(2, fact.jsonPayload()); }); // add serials to headers jdbcTemplate.batchUpdate(PGConstants.UPDATE_FACT_SERIALS, copiedListOfFacts, BATCH_SIZE, ( statement, fact) -> { final String idMatch = "{\"id\":\"" + fact.id() + "\"}"; statement.setString(1, idMatch); }); publishMeter.mark(numberOfFactsToPublish); } catch (DataAccessException sql) { publishFailedCounter.inc(); // yikes if (sql instanceof DuplicateKeyException) { throw new IllegalArgumentException(sql.getMessage()); } else { throw sql; } } }
@Override public Optional<Fact> fetchById(@NonNull UUID id) { try (Context time = fetchLatency.time();) { return jdbcTemplate.query(PGConstants.SELECT_BY_ID, new Object[] { "{\"id\":\"" + id + "\"}" }, this::extractFactFromResultSet).stream().findFirst(); } }
@Override public CompletableFuture<MastershipRole> requestRoleFor(DeviceId deviceId) { checkPermission(CLUSTER_WRITE); checkNotNull(deviceId, DEVICE_ID_NULL); final Context timer = startTimer(requestRoleTimer); return store.requestRole(deviceId).whenComplete((result, error) -> stopTimer(timer)); }
@Override public void handleRequest(HttpServerExchange exchange) throws Exception { Context context = timer.time(); try { handler.handleRequest(exchange); } finally { context.close(); } }
@Override protected <T> T call(Supplier<T> callable) { try (Context context = requestTimer.time()) { return callable.get(); } catch (AmazonServiceException e) { if (!is403or404(e)) { exceptionMeter.mark(); } throw e; } }
private void uploadBatch(List<Path> toUpload) { final long start = System.currentTimeMillis(); LOG.info("{} Uploading {} item(s)", logIdentifier, toUpload.size()); int success = 0; for (int i = 0; i < toUpload.size(); i++) { final Context context = metrics.getUploadTimer().time(); final Path file = toUpload.get(i); if (!configuration.isCheckForOpenFiles() || !fileOpen(file)) { try { uploadSingle(i, file); metrics.upload(); success++; Files.delete(file); } catch (S3ServiceException se) { metrics.error(); LOG.warn("{} Couldn't upload {} due to {} ({}) - {}", logIdentifier, file, se.getErrorCode(), se.getResponseCode(), se.getErrorMessage(), se); exceptionNotifier.notify(se, ImmutableMap.of("logIdentifier", logIdentifier, "file", file.toString(), "errorCode", se.getErrorCode(), "responseCode", Integer.toString(se.getResponseCode()), "errorMessage", se.getErrorMessage())); } catch (RetryException re) { metrics.error(); LOG.warn("{} Couldn't upload or delete {}", logIdentifier, file, re); exceptionNotifier.notify(re.getCause(), ImmutableMap.of("logIdentifier", logIdentifier, "file", file.toString(), "failedAttempts", Integer.toString(re.getNumberOfFailedAttempts()))); } catch (Exception e) { metrics.error(); LOG.warn("{} Couldn't upload or delete {}", logIdentifier, file, e); exceptionNotifier.notify(e, ImmutableMap.of("logIdentifier", logIdentifier, "file", file.toString())); } finally { context.stop(); } } else { LOG.info("{} is in use by another process, will retry upload later", file); } } LOG.info("{} Uploaded {} out of {} item(s) in {}", logIdentifier, success, toUpload.size(), JavaUtils.duration(start)); }
/** * A timer measures both the rate that a particular piece of code is called and the distribution * of its duration. For example we want to measure the rate and handling duration of incoming * requests. */ private static void reportTimer() { // Create or fetch (if it is already created) the metric. final Timer timer = registry.timer( APP_PREFIX.tagged("what", "incoming-request-time").tagged("endpoint", "/v1/get_stuff")); // Do this before starting to do the thing. This creates a measurement context object // that you can pass around. final Context context = timer.time(); // Do stuff that takes time (e.g., process the request) try { Thread.sleep(100); } catch (final InterruptedException e) { e.printStackTrace(); } // Tell the context that it's done. This will register the duration and counts one // occurrence. context.stop(); // That's it! The rest will be automatically done inside semantic metrics library. The // reported measurements will be kept in the registry. // Every time the reporter wants to report, different stats and aggregations (all the // stats that you would get from a meter and a histogram are included) will be calculated // and // datapoints will be created and reported. }
/** * A helper function which acquires a connection to redis from the pool and then uses it for some operations. * This function ensures the connection is closed properly. * * @param function The function which contactes redis and returns some result * @param <X> The type of the result returned. * @return The result of contacting redis. */ private <X> X contactRedis(Function<Jedis, X> function){ try(Jedis jedis = jedisPool.getResource(); Context ignored = contactRedisTimer.time()){ return function.apply(jedis); } catch (JedisException e) { LOG.error("Could not contact redis. Active: {}. Idle: {}", jedisPool.getNumActive(), jedisPool.getNumIdle(), e); throw e; } }
@Override public void accept(Task task) { checkPreconditions(); Timer executeTimer = metricRegistry .timer(name(RedisTaskQueueConsumer.class, "execute")); Context context = executeTimer.time(); TaskState taskState = task.getTaskState(); TaskConfiguration taskConfiguration = task.getTaskConfiguration(); BackgroundTask runningTask; try { runningTask = taskState.taskClass().newInstance(); runningTask.initialize(taskConfiguration, config, factory, metricRegistry, postProcessor); metricRegistry.meter(name(RedisTaskQueueConsumer.class, "initialized")).mark(); if (taskShouldResume(task)) { // Not implemented throw new NotImplementedException(); } else { runningTask.start(); metricRegistry.meter(name(RedisTaskQueueConsumer.class, "run")).mark(); } } catch (IllegalAccessException | InstantiationException e) { metricRegistry.meter(name(RedisTaskQueueConsumer.class, "failed")).mark(); LOG.error("{} had an instantiantion exception", task.getTaskState().getId(), e); throw new RuntimeException(e); } catch (RuntimeException throwable) { metricRegistry.meter(name(RedisTaskQueueConsumer.class, "failed")).mark(); LOG.error("{} could not be completed successfully", task.getTaskState().getId(), throwable); throw new RuntimeException(throwable); } finally { context.stop(); } }
public void put(JsonObject value) { try(Context context = putTimer.time()) { String id = value.getString("id"); Validate.notEmpty(id); try(Jedis resource = jedisPool.getResource()) { resource.setex(key(id), expirationInSeconds, CompressionUtils.compress(value.toString().getBytes(utf8))); } catch (JedisException e) { // make sure we can find back jedis related stuff in kibana throw new IllegalStateException("problem connecting to jedis", e); } } }
public void delete(String key) { try(Context context = delTimer.time()) { try(Jedis resource = jedisPool.getResource()) { resource.del(key(key)); } catch (JedisException e) { // make sure we can find back jedis related stuff in kibana throw new IllegalStateException("problem connecting to jedis", e); } } }