/** * Add a metric event to be aggregated. * Events with the same name, unit, and dimensions will have their values * aggregated into {@link StatisticSet}s, with the aggregated data * available via {@link #flush}. * * @param context Metric context to use for dimension information * @param name Metric name * @param value Recorded value for the metric event * @param unit Unit for interpreting the value */ public void add( final TypedMap context, final Metric name, final double value, final StandardUnit unit) { //TODO: avoid doing this every time for a context - caching, or? List<Dimension> dimensions = dimensionMapper.getDimensions(name, context); DatumKey key = new DatumKey(name.toString(), unit, dimensions); statisticsMap.merge( key, new StatisticSet() .withMaximum(value) .withMinimum(value) .withSampleCount(1D) .withSum(value), MetricDataAggregator::sum); }
private MetricDatum makeDatum( final String id, final String name, final double sum, final double min, final double max, final int count, final StandardUnit unit) { MetricDatum md = new MetricDatum().withMetricName(name).withUnit(unit); final StatisticSet statSet = new StatisticSet() .withSampleCount(Double.valueOf(count)) .withSum(sum) .withMinimum(min) .withMaximum(max); md.setStatisticValues(statSet); List<Dimension> dimensions = new ArrayList<>(1); Dimension trace = new Dimension().withName(ContextData.ID.name).withValue(id); dimensions.add(trace); md.setDimensions(dimensions); return md; }
@Override public List<Measurement> measure() { return eventQueues.entrySet().stream() .map((Map.Entry<String, CircularFifoQueue<Event>> e) -> { List<Event> events = e.getValue().stream().collect(toList()); int all = events.size(); long success = events.stream().filter(a -> a == Event.SUCCESS).count(); long failure = events.stream().filter(a -> a == Event.FAILURE).count(); double percentFailureDouble = all > 0 ? (double) failure / (double) all : 0; boolean enoughDataToAlert = all == numberToKeep; String display = String.format("Last %s calls: %s success, %s failure (%.2f%% failure)%s", Integer.min(all, numberToKeep), success, all-success, percentFailureDouble * 100, enoughDataToAlert ? "" : " - not enough calls to report status yet" ); return new Measurement(e.getKey(), getStatusFromPercentage(enoughDataToAlert, percentFailureDouble), display, new Measurement.CloudwatchValue(percentFailureDouble * 100, StandardUnit.Percent)); }) .collect(toList()); }
@Override public List<Measurement> measure() { SystemStatus s = new SystemStatus(); List<Measurement> measurements = new ArrayList<>(); double load = s.load(); String formatted = new DecimalFormat("#.##").format(load); String status; if (load > 10) { status = "ERROR"; } else if (load > 5) { status = "WARN"; } else { status = "INFO"; } measurements.add(new Measurement("load.avg", status, formatted, new Measurement.CloudwatchValue(load, StandardUnit.None))); return measurements; }
/** * The {@link Snapshot} values of {@link Histogram} are reported as {@link StatisticSet} raw. In other words, the * conversion using the duration factor does NOT apply. * <p> * Please note, the reported values submitted only if they show some data (greater than zero) in order to: * <p> * 1. save some money * 2. prevent com.amazonaws.services.cloudwatch.model.InvalidParameterValueException if empty {@link Snapshot} * is submitted * <p> * If {@link Builder#withZeroValuesSubmission()} is {@code true}, then all values will be submitted * * @see Histogram#getSnapshot */ private void processHistogram(final String metricName, final Histogram histogram, final List<MetricDatum> metricData) { final Snapshot snapshot = histogram.getSnapshot(); if (builder.withZeroValuesSubmission || snapshot.size() > 0) { for (final Percentile percentile : builder.percentiles) { final double value = snapshot.getValue(percentile.getQuantile()); stageMetricDatum(true, metricName, value, StandardUnit.None, percentile.getDesc(), metricData); } } // prevent empty snapshot from causing InvalidParameterValueException if (snapshot.size() > 0) { stageMetricDatum(builder.withArithmeticMean, metricName, snapshot.getMean(), StandardUnit.None, DIMENSION_SNAPSHOT_MEAN, metricData); stageMetricDatum(builder.withStdDev, metricName, snapshot.getStdDev(), StandardUnit.None, DIMENSION_SNAPSHOT_STD_DEV, metricData); stageMetricDatumWithRawSnapshot(builder.withStatisticSet, metricName, snapshot, StandardUnit.None, metricData); } }
/** * Please note, the reported values submitted only if they show some data (greater than zero) in order to: * <p> * 1. save some money * 2. prevent com.amazonaws.services.cloudwatch.model.InvalidParameterValueException if empty {@link Snapshot} * is submitted * <p> * If {@link Builder#withZeroValuesSubmission()} is {@code true}, then all values will be submitted */ private void stageMetricDatum(final boolean metricConfigured, final String metricName, final double metricValue, final StandardUnit standardUnit, final String dimensionValue, final List<MetricDatum> metricData) { // Only submit metrics that show some data, so let's save some money if (metricConfigured && (builder.withZeroValuesSubmission || metricValue > 0)) { final Set<Dimension> dimensions = new LinkedHashSet<>(builder.globalDimensions); dimensions.add(new Dimension().withName(DIMENSION_NAME_TYPE).withValue(dimensionValue)); metricData.add(new MetricDatum() .withTimestamp(new Date(builder.clock.getTime())) .withValue(cleanMetricValue(metricValue)) .withMetricName(metricName) .withDimensions(dimensions) .withUnit(standardUnit)); } }
private void stageMetricDatumWithConvertedSnapshot(final boolean metricConfigured, final String metricName, final Snapshot snapshot, final StandardUnit standardUnit, final List<MetricDatum> metricData) { if (metricConfigured) { double scaledSum = convertDuration(LongStream.of(snapshot.getValues()).sum()); final StatisticSet statisticSet = new StatisticSet() .withSum(scaledSum) .withSampleCount((double) snapshot.size()) .withMinimum(convertDuration(snapshot.getMin())) .withMaximum(convertDuration(snapshot.getMax())); final Set<Dimension> dimensions = new LinkedHashSet<>(builder.globalDimensions); dimensions.add(new Dimension().withName(DIMENSION_NAME_TYPE).withValue(DIMENSION_SNAPSHOT_SUMMARY)); metricData.add(new MetricDatum() .withTimestamp(new Date(builder.clock.getTime())) .withMetricName(metricName) .withDimensions(dimensions) .withStatisticValues(statisticSet) .withUnit(standardUnit)); } }
private void stageMetricDatumWithRawSnapshot(final boolean metricConfigured, final String metricName, final Snapshot snapshot, final StandardUnit standardUnit, final List<MetricDatum> metricData) { if (metricConfigured) { double total = LongStream.of(snapshot.getValues()).sum(); final StatisticSet statisticSet = new StatisticSet() .withSum(total) .withSampleCount((double) snapshot.size()) .withMinimum((double) snapshot.getMin()) .withMaximum((double) snapshot.getMax()); final Set<Dimension> dimensions = new LinkedHashSet<>(builder.globalDimensions); dimensions.add(new Dimension().withName(DIMENSION_NAME_TYPE).withValue(DIMENSION_SNAPSHOT_SUMMARY)); metricData.add(new MetricDatum() .withTimestamp(new Date(builder.clock.getTime())) .withMetricName(metricName) .withDimensions(dimensions) .withStatisticValues(statisticSet) .withUnit(standardUnit)); } }
@Override protected void runOneIteration() throws Exception { try { _cloudWatch.putMetricData( new PutMetricDataRequest() .withNamespace(NAMESPACE) .withMetricData( new MetricDatum() .withTimestamp(new Date()) .withMetricName(ACTIVE_AND_PENDING_SCANS) .withValue((double) (_activeScanCount + _pendingScanCount)) .withUnit(StandardUnit.Count) .withDimensions(_dimensions))); } catch (AmazonClientException e) { _log.error("Failed to publish active and pending scans metric", e); } }
@Test public void sendMetricFromHeaderValues() throws Exception { template.send("direct:start", ExchangePattern.InOnly, new Processor() { public void process(Exchange exchange) throws Exception { exchange.getIn().setHeader(CwConstants.METRIC_NAMESPACE, "camel.apache.org/overriden"); exchange.getIn().setHeader(CwConstants.METRIC_NAME, "OverridenMetric"); exchange.getIn().setHeader(CwConstants.METRIC_VALUE, Double.valueOf(3)); exchange.getIn().setHeader(CwConstants.METRIC_UNIT, StandardUnit.Bytes.toString()); exchange.getIn().setHeader(CwConstants.METRIC_TIMESTAMP, LATER); } }); ArgumentCaptor<PutMetricDataRequest> argument = ArgumentCaptor.forClass(PutMetricDataRequest.class); verify(cloudWatchClient).putMetricData(argument.capture()); assertEquals("camel.apache.org/overriden", argument.getValue().getNamespace()); assertEquals("OverridenMetric", argument.getValue().getMetricData().get(0).getMetricName()); assertEquals(Double.valueOf(3), argument.getValue().getMetricData().get(0).getValue()); assertEquals(StandardUnit.Bytes.toString(), argument.getValue().getMetricData().get(0).getUnit()); assertEquals(LATER, argument.getValue().getMetricData().get(0).getTimestamp()); }
@Test public void useDefaultValuesForMetricUnitAndMetricValue() throws Exception { template.send("direct:start", ExchangePattern.InOnly, new Processor() { public void process(Exchange exchange) throws Exception { exchange.getIn().setHeader(CwConstants.METRIC_NAME, "errorCount"); } }); ArgumentCaptor<PutMetricDataRequest> argument = ArgumentCaptor.forClass(PutMetricDataRequest.class); verify(cloudWatchClient).putMetricData(argument.capture()); assertEquals("errorCount", argument.getValue().getMetricData().get(0).getMetricName()); assertEquals(Double.valueOf(1), argument.getValue().getMetricData().get(0).getValue()); assertEquals(StandardUnit.Count.toString(), argument.getValue().getMetricData().get(0).getUnit()); }
@Override public void run() { try { List<MetricDatum> metricData = new ArrayList<>(); Date now = new Date(); metricData.addAll(heartbeats.entrySet().stream().map(entry -> new MetricDatum().withMetricName("Heartbeats") .withDimensions(new Dimension().withName("Client").withValue(entry.getKey())) .withTimestamp(now) .withUnit(StandardUnit.Count) .withValue(entry.getValue().doubleValue())).collect(Collectors.toList())); heartbeats.clear(); for (List<MetricDatum> chunk :partitionList(metricData, 20)) { awsClient.putMetricData(new PutMetricDataRequest().withNamespace(namespace).withMetricData(chunk)); } } catch (Throwable e) { log.error("Failed to publish CloudWatch metrics: {}", e.toString()); } }
void reportCounter(Map.Entry<String, ? extends Counting> entry, String typeDimValue, List<MetricDatum> data) { Counting metric = entry.getValue(); final long diff = diffLast(metric); if (diff == 0) { // Don't submit metrics that have not changed. No reason to keep these alive. Also saves on CloudWatch // costs. return; } DemuxedKey key = new DemuxedKey(appendGlobalDimensions(entry.getKey())); Iterables.addAll(data, key.newDatums(typeDimName, typeDimValue, new Function<MetricDatum, MetricDatum>() { @Override public MetricDatum apply(MetricDatum datum) { return datum.withValue((double) diff).withUnit(StandardUnit.Count); } })); }
public DatumKey( final String name, final StandardUnit unit, final List<Dimension> dimensions) { this.name = name; this.unit = unit; this.dimensions = dimensions; }
@Override public void count(final Metric label, final long delta, final TypedMap context) { if (!running.get()) { log.debug("count called on shutdown recorder"); //TODO: something besides silently ignore, perhaps IllegalStateException? return; } aggregator.add(context, label, Long.valueOf(delta).doubleValue(), StandardUnit.Count); }
@Test public void record_and_shutdown() throws Exception { final DimensionMapper mapper = new DimensionMapper.Builder() .addGlobalDimension(ContextData.ID) .build(); final String namespace = "spacename"; final String id = UUID.randomUUID().toString(); final TypedMap data = ContextData.withId(id).build(); final double time = 123.45; final AmazonCloudWatch client = mock(AmazonCloudWatch.class); final CloudWatchRecorder recorder = new CloudWatchRecorder(client, namespace, 60, 120, mapper); final MetricRecorder.Context context = recorder.context(data); context.record(M_TIME, time, Unit.MILLISECOND, Instant.now()); context.close(); // shutdown right away, before first scheduled publish recorder.shutdown(); final List<MetricDatum> expected = new ArrayList<>(1); expected.add(makeDatum(id, M_TIME.toString(), time, time, time, 1, StandardUnit.Milliseconds)); verify(client).putMetricData(argThat(new RequestMatcher(namespace, expected))); }
@Test public void single() throws Exception { final DimensionMapper mapper = new DimensionMapper.Builder() .addGlobalDimension(ContextData.ID) .build(); final Metric name = Metric.define("SomeMetric"); final double value = 3.14; final StandardUnit unit = StandardUnit.Terabits; final TypedMap context = ContextData.withId(UUID.randomUUID().toString()).build(); MetricDataAggregator aggregator = new MetricDataAggregator(mapper); aggregator.add(context, name, value, unit); List<MetricDatum> ags = aggregator.flush(); assertEquals("One metric datum should aggregate to one entry", 1, ags.size()); assertEquals("Metric datum has wrong name", name.toString(), ags.get(0).getMetricName()); assertEquals("Metric datum has wrong unit", unit.toString(), ags.get(0).getUnit()); StatisticSet stats = ags.get(0).getStatisticValues(); assertEquals("Metric datum has wrong stats value", Double.valueOf(value), stats.getSum()); assertEquals("Metric datum has wrong stats value", Double.valueOf(value), stats.getMinimum()); assertEquals("Metric datum has wrong stats value", Double.valueOf(value), stats.getMaximum()); assertEquals("Metric datum has wrong stats count", Double.valueOf(1), stats.getSampleCount()); assertTrue("Flush with no data was non-empty", aggregator.flush().isEmpty()); }
private void putMemoryStatus(List<Measurement> measurements, String key, long used, long max) { if (max == 0 || used == -1) { return; } long percentUsed = used / (max / 100); long percentLeft = 100 - percentUsed; String displayValue = toMB(used) + " of " + toMB(max) + " MB (" + percentUsed + "%)"; measurements.add(new Measurement(key, status(percentLeft), displayValue, new Measurement.CloudwatchValue(percentUsed, StandardUnit.Percent))); }
@Override public List<Measurement> measure() { SystemStatus s = new SystemStatus(); List<Measurement> measurements = new ArrayList<>(); long open = s.openFileHandles(); long max = s.maxFileHandles(); double percent = ((double) open / (double) max) * 100; String displayValue = String.format("%s of %s filehandles used (%.2f%%)", open, max, percent); measurements.add(new Measurement("filehandles", statusFromOpenFileHandles(open), displayValue, new Measurement.CloudwatchValue(percent, StandardUnit.Percent))); return measurements; }
private void processCounter(final String metricName, final Counting counter, final List<MetricDatum> metricData) { long currentCount = counter.getCount(); Long lastCount = lastPolledCounts.get(counter); lastPolledCounts.put(counter, currentCount); if (lastCount == null) { lastCount = 0L; } // Only submit metrics that have changed - let's save some money! final long delta = currentCount - lastCount; stageMetricDatum(true, metricName, delta, StandardUnit.Count, DIMENSION_COUNT, metricData); }
private StandardUnit toStandardUnit(final TimeUnit timeUnit) { switch (timeUnit) { case SECONDS: return StandardUnit.Seconds; case MILLISECONDS: return StandardUnit.Milliseconds; case MICROSECONDS: return StandardUnit.Microseconds; default: throw new IllegalArgumentException("Unsupported TimeUnit: " + timeUnit); } }
public static void main(String[] args) { final String USAGE = "To run this example, supply a data point:\n" + "Ex: PutMetricData <data_point>\n"; if (args.length != 1) { System.out.println(USAGE); System.exit(1); } Double data_point = Double.parseDouble(args[0]); final AmazonCloudWatch cw = AmazonCloudWatchClientBuilder.defaultClient(); Dimension dimension = new Dimension() .withName("UNIQUE_PAGES") .withValue("URLS"); MetricDatum datum = new MetricDatum() .withMetricName("PAGES_VISITED") .withUnit(StandardUnit.None) .withValue(data_point) .withDimensions(dimension); PutMetricDataRequest request = new PutMetricDataRequest() .withNamespace("SITE/TRAFFIC") .withMetricData(datum); PutMetricDataResult response = cw.putMetricData(request); System.out.printf("Successfully put data point %f", data_point); }
private void reportTimer(String key, Collection<MetricDatum> data, Map.Entry<String, Timer> met) { Timer timer = met.getValue(); Snapshot snapshot = timer.getSnapshot(); if (reportAggregates) { reportAggregate(key, data, "count", null, timer.getCount()); reportAggregate(key, data, "rate", "1minute", timer.getOneMinuteRate()); reportAggregate(key, data, "rate", "5minute", timer.getFiveMinuteRate()); reportAggregate(key, data, "rate", "15minute", timer.getFifteenMinuteRate()); reportAggregate(key, data, "rate", "mean", timer.getMeanRate()); reportSnapshot(data, snapshot, key); } else { // if no data, don't bother Amazon with it. if (snapshot.size() == 0) { return; } double sum = 0; for (double val : snapshot.getValues()) { sum += val; } // Metrics works in Nanoseconds, which is not one of Amazon's favorites. double max = (double) TimeUnit.NANOSECONDS.toMicros(snapshot.getMax()); double min = (double) TimeUnit.NANOSECONDS.toMicros(snapshot.getMin()); double sumMicros = TimeUnit.NANOSECONDS.toMicros((long) sum); StatisticSet stats = new StatisticSet() .withMaximum(max) .withMinimum(min) .withSum(sumMicros) .withSampleCount((double) snapshot.getValues().length); if (LOG.isDebugEnabled()) { LOG.debug("timer {}: {}", met.getKey(), stats); } data.add(new MetricDatum().withMetricName(met.getKey()) .withDimensions(dimensions) .withStatisticValues(stats) .withUnit(StandardUnit.Microseconds)); } }
private StandardUnit determineUnit(Exchange exchange) { String unit = exchange.getIn().getHeader(CwConstants.METRIC_UNIT, String.class); if (unit == null) { unit = getConfiguration().getUnit(); } return unit != null ? StandardUnit.valueOf(unit) : StandardUnit.Count; }
public static List<Datum> getMetric(String metricName, Dimension dim, String namespace, Date startTime, int period, Statistic statistic, StandardUnit unit) { connect(); GetMetricStatisticsRequest req = new GetMetricStatisticsRequest(); req.setMetricName(metricName); List<Dimension> dimensions = new ArrayList<Dimension>(); dimensions.add(dim); req.setDimensions(dimensions); req.setNamespace(namespace); req.setStartTime(startTime); req.setEndTime(new Date()); if (period % 60 != 0) period = (period / 60) * 60; req.setPeriod(period); if (unit != null) req.setUnit(unit); if (statistic == null) statistic = Statistic.Average; List<String> statistics = new ArrayList<String>(); statistics.add(statistic.name()); req.setStatistics(statistics); GetMetricStatisticsResult res = client.getMetricStatistics(req); return Datum.getAllData(res.getDatapoints(), metricName, statistic); }
public static void retrieveMetrics(List<String> ids, VirtualMachine vm, String[] metricsToBeGet, String localPath, Date date, int period, Statistic statistic, StandardUnit unit) throws Exception { if (ids.size() == 0) return; int count = 1; if (metricsToBeGet != null && metricsToBeGet.length > 0) for (String id : ids) { retrieveMetrics(id, vm, count, metricsToBeGet, localPath, date, period, statistic, unit); ++count; } }
public void retrieveMetrics(String[] metricsToBeGet, String localPath, Date date, int period, Statistic statistic, StandardUnit unit) throws Exception { int count = 1; if (metricsToBeGet != null && metricsToBeGet.length > 0) for (it.cloud.Instance i : instancesSet) { retrieveMetrics(i.id, this, count, metricsToBeGet, localPath, date, period, statistic, unit); ++count; } }
public static void retrieveMetrics(String id, VirtualMachine vm, int count, String[] metricsToBeGet, String localPath, Date date, int period, Statistic statistic, StandardUnit unit) throws Exception { if (metricsToBeGet != null && metricsToBeGet.length > 0) for (String s : metricsToBeGet) { Path file = Paths.get(localPath, vm.name + count, s + ".csv"); file.toFile().getParentFile().mkdirs(); CloudWatch.writeInstanceMetricToFile(file, s, id, date, period, statistic, unit); } }
/** * Convert the unit from the metric into one that CloudWatch likes. */ private StandardUnit convertUnit(String unitString) { if (unitString == null) { return StandardUnit.None; } String lowerUnitString = unitString.toLowerCase(); StandardUnit unit = AWS_UNIT_MAP.get(lowerUnitString); if (unit == null) { return DEFAULT_AWS_UNIT; } else { return unit; } }
protected void emitMetrics(String service, String operation, long startTime, Throwable exception) { final MetricDatum latency = newDatum(service, operation, startTime) .withMetricName("Latency") .withUnit(StandardUnit.Milliseconds) .withValue((double)System.currentTimeMillis() - startTime); metricBatcher.addDatum("AMM", latency); final MetricDatum success = newDatum(service, operation, startTime) .withMetricName("Success") .withValue(exception == null ? 1.0 : 0.0) .withUnit(StandardUnit.Count); metricBatcher.addDatum("AMM", success); }
@Override public Collection<MetricDatum> getMetricDatumCollection() { Collection<MetricDatum> cmd = new HashSet<MetricDatum>(2); MetricDatum md2 = new MetricDatum(); md2.setUnit(StandardUnit.Count); md2.setMetricName("qm-servers-functional"); try{ Client bc = new ClientImpl(beanstalkdip, beanstalkdport, true); List<String> lt = bc.listTubes(); double tot = 0.0; for (String tube : lt) { Map<String,String> st = bc.statsTube(tube); tot += Integer.parseInt(st.get("current-jobs-ready")); } MetricDatum md1 = new MetricDatum(); md1.setUnit(StandardUnit.Count); md1.setValue(tot); md1.setMetricName("queues-all-jobs-ready"); md2.setValue(1.0); cmd.add(md1); } catch (Exception e){ md2.setValue(0.0); } cmd.add(md2); return cmd; }
@Override public Collection<MetricDatum> getMetricDatumCollection() { Collection<MetricDatum> cmd = new HashSet<MetricDatum>(2); MetricDatum md = new MetricDatum(); md.setUnit(StandardUnit.Count); md.setMetricName(metricname); if(ping()){ md.setValue(1.0); } else { md.setValue(0.0); } cmd.add(md); return cmd; }
private void addDatum(String name, double value, LinkedList<PutMetricDataRequest> requests, Date timestamp) { if (logger.isDebugEnabled()) { logger.debug("Adding Datum {} with value {} at {}", name, value, timestamp); } if (requests.isEmpty() || requests.getLast().getMetricData().size() == MAX_CLOUDWATCH_DATUM_PER_REQUEST) { requests.add(createRequest()); } PutMetricDataRequest request = requests.getLast(); MetricDatum datum = new MetricDatum().withTimestamp(timestamp).withValue(value).withMetricName(name).withUnit(StandardUnit.None).withDimensions(createDimensions()); request.withMetricData(datum); }
private void sendCloudWatchConsistencyAlert() { MetricDatum datum = new MetricDatum(); datum.setMetricName(cloudWatchConsistencyMetric); datum.setUnit(StandardUnit.Count); datum.setValue(1.0); PutMetricDataRequest request = new PutMetricDataRequest(); request.setNamespace(namespace); request.setMetricData(Collections.singleton(datum)); cloudWatch.putMetricData(request); }
private void sendCloudWatchTimeoutAlert() { MetricDatum datum = new MetricDatum(); datum.setMetricName(cloudWatchTimeoutMetric); datum.setUnit(StandardUnit.Count); datum.setValue(1.0); PutMetricDataRequest request = new PutMetricDataRequest(); request.setNamespace(namespace); request.setMetricData(Collections.singleton(datum)); cloudWatch.putMetricData(request); }
@Test public void no_aggregation() throws Exception { final DimensionMapper mapper = new DimensionMapper.Builder() .addGlobalDimension(ContextData.ID) .build(); final String namespace = "testytesttest"; final String id = UUID.randomUUID().toString(); final Integer time = Integer.valueOf(23); final Integer load = Integer.valueOf(87); final AmazonCloudWatch client = mock(AmazonCloudWatch.class); CloudWatchRecorder recorder = null; try { // no jitter, publish soon recorder = new CloudWatchRecorder(client, namespace, 0, 1, mapper); final TypedMap data = ContextData.withId(id).build(); final MetricRecorder.Context context = recorder.context(data); final Instant timestamp = Instant.now(); context.record(M_TIME, time, Unit.MILLISECOND, timestamp); context.count(M_FAIL, 1); context.record(M_PERC, load, Unit.PERCENT, timestamp); context.close(); // allow time for one publish Thread.sleep(1024); } finally { recorder.shutdown(); } final List<MetricDatum> expected = new ArrayList<>(2); expected.add(makeDatum(id, M_TIME.toString(), time, time, time, 1, StandardUnit.Milliseconds)); expected.add(makeDatum(id, M_PERC.toString(), load, load, load, 1, StandardUnit.Percent)); expected.add(makeDatum(id, M_FAIL.toString(), 1, 1, 1, 1, StandardUnit.Count)); verify(client).putMetricData(argThat(new RequestMatcher(namespace, expected))); }
@Test public void aggregation() throws Exception { final DimensionMapper mapper = new DimensionMapper.Builder() .addGlobalDimension(ContextData.ID) .build(); final String namespace = "testytesttest"; final String id = UUID.randomUUID().toString(); final double[] timeVals = {867, 5309}; final double[] percVals = {0.01, 97.3, 3.1415}; final int[] failCnts = {1, 3, 0, 42}; final List<MetricDatum> expected = new ArrayList<>(3); expected.add(makeDatum(id, M_TIME.toString(), Arrays.stream(timeVals).sum(), Arrays.stream(timeVals).min().getAsDouble(), Arrays.stream(timeVals).max().getAsDouble(), timeVals.length, StandardUnit.Milliseconds)); expected.add(makeDatum(id, M_PERC.toString(), Arrays.stream(percVals).sum(), Arrays.stream(percVals).min().getAsDouble(), Arrays.stream(percVals).max().getAsDouble(), percVals.length, StandardUnit.Percent)); expected.add(makeDatum(id, M_FAIL.toString(), Arrays.stream(failCnts).sum(), Arrays.stream(failCnts).min().getAsInt(), Arrays.stream(failCnts).max().getAsInt(), failCnts.length, StandardUnit.Count)); final AmazonCloudWatch client = mock(AmazonCloudWatch.class); CloudWatchRecorder recorder = null; try { // no jitter, publish soon recorder = new CloudWatchRecorder(client, namespace, 0, 1, mapper); final TypedMap data = ContextData.withId(id).build(); final MetricRecorder.Context context = recorder.context(data); context.count(M_FAIL, failCnts[0]); context.record(M_PERC, percVals[0], Unit.PERCENT, Instant.now()); context.record(M_TIME, timeVals[0], Unit.MILLISECOND, Instant.now()); context.count(M_FAIL, failCnts[1]); context.record(M_PERC, percVals[1], Unit.PERCENT, Instant.now()); context.record(M_TIME, timeVals[1], Unit.MILLISECOND, Instant.now()); context.count(M_FAIL, failCnts[2]); context.record(M_PERC, percVals[2], Unit.PERCENT, Instant.now()); context.count(M_FAIL, failCnts[3]); context.close(); // allow time for one publish Thread.sleep(1024); } finally { recorder.shutdown(); } verify(client).putMetricData(argThat(new RequestMatcher(namespace, expected))); }
public CloudwatchValue(double value, StandardUnit unit) { this.value = value; this.unit = unit; }
public CloudwatchStatistic(String key, Double value, StandardUnit unit) { this.key = key; this.value = value; this.unit = unit; }
public CloudwatchStatistic(String key, Double value) { this(key, value, StandardUnit.Count); }