public JobRequestStats( JobRequest jobRequest, final int latencyNumBuckets, final int latencyGranularityFactor, final double latencyWarmUpSecs) { this.jobRequest = jobRequest; for (PerfStatType s : PerfStatType.values()) { _stats.put(s, new MutableLong(0)); } _latencyBuckets = new ArrayList<MutableLong>(latencyNumBuckets); for (int i = 0; i < latencyNumBuckets; i++) { _latencyBuckets.add(new MutableLong(0)); } _latencyGranularity = latencyGranularityFactor; _latencyWarmupInSecs = latencyWarmUpSecs; _toUs = 1000; // nano to micro. _totalLatency = 0; this.resetStats(); }
public synchronized void resetStats() { for (MutableLong ml : _stats.values()) { ml.setValue(0); } _receiveEndTimeInNanos = 0; _receiveStartTimeInNanos = 0; _totalLatency = 0; for (MutableLong bucket : _latencyBuckets) { bucket.setValue(0); } _stats.get(PerfStatType.LATENCY_USEC_MIN).setValue(Long.MAX_VALUE); _stats.get(PerfStatType.LATENCY_USEC_MAX).setValue(0); expectedAcksSet = new HashSet<>(); expectedResponseSet = new HashSet<>(); }
@Override public void setup(OperatorContext context) { try { fs = getHDFSInstance(); } catch (IOException ex) { throw new RuntimeException(ex); } this.context = context; lastTimeStamp = System.currentTimeMillis(); fileCounters.setCounter(Counters.TOTAL_BYTES_WRITTEN, new MutableLong()); fileCounters.setCounter(Counters.TOTAL_TIME_ELAPSED, new MutableLong()); super.setup(context); }
@Override public void populateDAG(DAG dag, Configuration conf) { String filePath = "HDFSOutputOperatorBenchmarkingApp/" + System.currentTimeMillis(); dag.setAttribute(DAG.STREAMING_WINDOW_SIZE_MILLIS, 1000); RandomWordGenerator wordGenerator = dag.addOperator("wordGenerator", RandomWordGenerator.class); dag.getOperatorMeta("wordGenerator").getMeta(wordGenerator.output) .getAttributes().put(PortContext.QUEUE_CAPACITY, 10000); dag.getOperatorMeta("wordGenerator").getAttributes() .put(OperatorContext.APPLICATION_WINDOW_COUNT, 1); FSByteOutputOperator hdfsOutputOperator = dag.addOperator("hdfsOutputOperator", new FSByteOutputOperator()); hdfsOutputOperator.setFilePath(filePath); dag.getOperatorMeta("hdfsOutputOperator").getAttributes() .put(OperatorContext.COUNTERS_AGGREGATOR, new BasicCounters.LongAggregator<MutableLong>()); dag.addStream("Generator2HDFSOutput", wordGenerator.output, hdfsOutputOperator.input); }
@Override @SuppressWarnings("unchecked") public Response processStats(BatchedOperatorStats batchedOperatorStats) { BasicCounters<MutableLong> fileCounters = null; for (OperatorStats operatorStats : batchedOperatorStats.getLastWindowedStats()) { if (operatorStats.counters != null) { fileCounters = (BasicCounters<MutableLong>)operatorStats.counters; } } Response response = new Response(); if (fileCounters != null && fileCounters.getCounter(FileCounters.PENDING_FILES).longValue() > 0L || System.currentTimeMillis() - repartitionInterval <= lastRepartition) { response.repartitionRequired = false; return response; } response.repartitionRequired = true; return response; }
@Override public void setup(OperatorContext context) { this.context = context; this.currentWindow = context.getValue(Context.OperatorContext.ACTIVATION_WINDOW_ID); sleepTimeMillis = context.getValue(OperatorContext.SPIN_MILLIS); bucketManager.setBucketCounters(counters); counters.setCounter(CounterKeys.DUPLICATE_EVENTS, new MutableLong()); bucketManager.startService(this); logger.debug("bucket keys at startup {}", waitingEvents.keySet()); for (long bucketKey : waitingEvents.keySet()) { bucketManager.loadBucketData(bucketKey); } if (orderedOutput) { decisions = Maps.newLinkedHashMap(); } }
@Override public Response processStats(BatchedOperatorStats batchedOperatorStats) { List<Stats.OperatorStats> lastWindowedStats = batchedOperatorStats.getLastWindowedStats(); if (lastWindowedStats != null) { for (Stats.OperatorStats os : lastWindowedStats) { if (os.counters != null) { if (os.counters instanceof BasicCounters) { @SuppressWarnings("unchecked") BasicCounters<MutableLong> cs = (BasicCounters<MutableLong>)os.counters; logger.debug("operatorId:{} buckets:[in-memory:{} deleted:{} evicted:{}] events:[in-memory:{} " + "committed-last-window:{} duplicates:{}] low:{} high:{}", batchedOperatorStats.getOperatorId(), cs.getCounter(BucketManager.CounterKeys.BUCKETS_IN_MEMORY), cs.getCounter(BucketManager.CounterKeys.DELETED_BUCKETS), cs.getCounter(BucketManager.CounterKeys.EVICTED_BUCKETS), cs.getCounter(BucketManager.CounterKeys.EVENTS_IN_MEMORY), cs.getCounter(BucketManager.CounterKeys.EVENTS_COMMITTED_LAST_WINDOW), cs.getCounter(CounterKeys.DUPLICATE_EVENTS)); } } } } return null; }
protected AppDataSingleSchemaDimensionStoreHDHT createStore(DAG dag, Configuration conf, String eventSchema) { AppDataSingleSchemaDimensionStoreHDHT store = dag.addOperator("Store", ProcessTimeAwareStore.class); store.setUpdateEnumValues(true); String basePath = Preconditions.checkNotNull(conf.get(PROP_STORE_PATH), "base path should be specified in the properties.xml"); TFileImpl hdsFile = new TFileImpl.DTFileImpl(); basePath += System.currentTimeMillis(); hdsFile.setBasePath(basePath); store.setFileStore(hdsFile); dag.setAttribute(store, Context.OperatorContext.COUNTERS_AGGREGATOR, new BasicCounters.LongAggregator<MutableLong>()); store.setConfigurationSchemaJSON(eventSchema); store.setPartitionCount(storePartitionCount); if(storePartitionCount > 1) { store.setPartitionCount(storePartitionCount); store.setQueryResultUnifier(new DimensionStoreHDHTNonEmptyQueryResultUnifier()); } return store; }
public MySession(SessionManager sessionManager, String id, ThreadLocalManager threadLocalManager, IdManager idManager, SessionStore sessionStore, SessionAttributeListener sessionListener, int inactiveInterval, NonPortableSession nonPortableSession, MutableLong expirationTimeSuggestion, RebuildBreakdownService rebuildBreakdownService) { this.sessionManager = sessionManager; m_id = id; this.threadLocalManager = threadLocalManager; this.idManager = idManager; this.sessionStore = sessionStore; this.sessionListener = sessionListener; m_inactiveInterval = inactiveInterval; m_nonPortalSession = nonPortableSession; m_created = System.currentTimeMillis(); m_accessed = m_created; this.expirationTimeSuggestion = expirationTimeSuggestion; resetExpirationTimeSuggestion(); // set the TERRACOTTA_CLUSTER flag resolveTerracottaClusterProperty(); this.rebuildBreakdownService = rebuildBreakdownService; }
protected Session newSessionWithBlockableMutableLong(final CountDownLatch opStarted, final CountDownLatch opBlocker, final CountDownLatch opCompleted) { // unfortunately, the Maintenance implementation compels us to // use MySession rather than an interface. String uuid = nextUuid(); final MutableLong expirationTimeSuggestion = new MutableLong(System.currentTimeMillis()) { @Override public long longValue() { Callable<Long> callback = new Callable<Long>() { public Long call() throws Exception { return superLongValue(); } }; Long result = execBlockableSessionOp(opStarted, opBlocker, opCompleted, callback); return result; } private long superLongValue() { return super.longValue(); } }; final MySession session = new MySession(sessionComponent,uuid,threadLocalManager,idManager, sessionComponent,sessionListener,sessionComponent.getInactiveInterval(),new MyNonPortableSession(), expirationTimeSuggestion, null); return session; }
protected Session newSessionWithBlockableGetLastAccessedTimeImpl(final CountDownLatch opStarted, final CountDownLatch opBlocker, final CountDownLatch opCompleted) { // unfortunately, the getActiveUserCount() implementation compels us to // use MySession rather than an interface. String uuid = nextUuid(); final MySession session = new MySession(sessionComponent,uuid,threadLocalManager,idManager, sessionComponent,sessionListener,sessionComponent.getInactiveInterval(),new MyNonPortableSession(), new MutableLong(System.currentTimeMillis()), null) { private long superGetLastAccessedTime() { return super.getLastAccessedTime(); } @Override public long getLastAccessedTime() { Callable<Long> callback = new Callable<Long>() { public Long call() throws Exception { return superGetLastAccessedTime(); } }; Long result = execBlockableSessionOp(opStarted, opBlocker, opCompleted, callback); return result; } }; return session; }
@Override public void streamTermIdsForField(String name, int fieldId, List<KeyRange> ranges, final TermIdStream termIdStream, StackBuffer stackBuffer) throws Exception { MutableLong bytes = new MutableLong(); indexes[fieldId].streamKeys(ranges, rawKey -> { try { bytes.add(rawKey.length); return termIdStream.stream(termInterner.intern(rawKey)); } catch (Exception e) { throw new RuntimeException(e); } }, stackBuffer); LOG.inc("count>streamTermIdsForField>total"); LOG.inc("count>streamTermIdsForField>" + name + ">total"); LOG.inc("count>streamTermIdsForField>" + name + ">" + fieldId); LOG.inc("bytes>streamTermIdsForField>total", bytes.longValue()); LOG.inc("bytes>streamTermIdsForField>" + name + ">total", bytes.longValue()); LOG.inc("bytes>streamTermIdsForField>" + name + ">" + fieldId, bytes.longValue()); }
@Override public void multiGetLastIds(String name, int fieldId, MiruTermId[] termIds, int[] results, StackBuffer stackBuffer) throws Exception { byte[][] termIdBytes = new byte[termIds.length][]; for (int i = 0; i < termIds.length; i++) { if (termIds[i] != null) { termIdBytes[i] = termIds[i].getBytes(); } } MutableLong bytes = new MutableLong(); indexes[fieldId].readEach(termIdBytes, null, (monkey, filer, _stackBuffer, lock, index) -> { if (filer != null) { bytes.add(4); results[index] = MiruFilerInvertedIndex.deserLastId(filer); } return null; }, new Void[results.length], stackBuffer); LOG.inc("count>multiGetLastIds>total"); LOG.inc("count>multiGetLastIds>" + name + ">total"); LOG.inc("count>multiGetLastIds>" + name + ">" + fieldId); LOG.inc("bytes>multiGetLastIds>total", bytes.longValue()); LOG.inc("bytes>multiGetLastIds>" + name + ">total", bytes.longValue()); LOG.inc("bytes>multiGetLastIds>" + name + ">" + fieldId, bytes.longValue()); }
@Override public int lastId(StackBuffer stackBuffer) throws Exception { if (lastId == Integer.MIN_VALUE) { MutableLong bytes = new MutableLong(); synchronized (mutationLock) { lastId = keyedFilerStore.read(indexKeyBytes, null, (monkey, filer, stackBuffer1, lock) -> { if (filer != null) { bytes.add(filer.length()); return getLastId(lock, filer, stackBuffer1); } else { return -1; } }, stackBuffer); } LOG.inc("count>lastId>total"); LOG.inc("count>lastId>" + name + ">total"); LOG.inc("count>lastId>" + name + ">" + fieldId); LOG.inc("bytes>lastId>total", bytes.longValue()); LOG.inc("bytes>lastId>" + name + ">total", bytes.longValue()); LOG.inc("bytes>lastId>" + name + ">" + fieldId, bytes.longValue()); } return lastId; }
public StreamBatch<MiruWALEntry, AmzaCursor> getActivity(MiruTenantId tenantId, MiruPartitionId partitionId, AmzaCursor cursor, int batchSize, long stopAtTimestamp, MutableLong bytesCount) throws Exception { getActivityLatency.startTimer(); try { List<MiruWALEntry> activities = new ArrayList<>(); AmzaCursor nextCursor = activityWALReader.stream(tenantId, partitionId, cursor, batchSize, stopAtTimestamp, (collisionId, partitionedActivity, timestamp) -> { activities.add(new MiruWALEntry(collisionId, timestamp, partitionedActivity)); return activities.size() < batchSize; }); return new StreamBatch<>(activities, nextCursor, false, null); } finally { getActivityLatency.stopTimer("Get activity latency", "Check partition health"); } }
public StreamBatch<MiruWALEntry, RCVSCursor> getActivity(MiruTenantId tenantId, MiruPartitionId partitionId, RCVSCursor cursor, int batchSize, long stopAtTimestamp, MutableLong bytesCount) throws Exception { List<MiruWALEntry> activities = new ArrayList<>(); RCVSCursor nextCursor = activityWALReader.stream(tenantId, partitionId, cursor, batchSize, stopAtTimestamp, (collisionId, partitionedActivity, timestamp) -> { activities.add(new MiruWALEntry(collisionId, timestamp, partitionedActivity)); return activities.size() < batchSize; }); return new StreamBatch<>(activities, nextCursor, false, null); }
@Override public long oldestActivityClockTimestamp(MiruTenantId tenantId, MiruPartitionId partitionId) throws Exception { if (blacklist != null && blacklist.contains(new TenantAndPartition(tenantId, partitionId))) { return -1; } final MutableLong oldestClockTimestamp = new MutableLong(-1); activityWAL.getValues(tenantId, new MiruActivityWALRow(partitionId.getId()), new MiruActivityWALColumnKey(MiruPartitionedActivity.Type.ACTIVITY.getSort(), Long.MIN_VALUE), null, 1, false, null, null, miruPartitionedActivity -> { if (miruPartitionedActivity != null && miruPartitionedActivity.type.isActivityType()) { oldestClockTimestamp.setValue(miruPartitionedActivity.clockTimestamp); } return null; // one and done }); return oldestClockTimestamp.longValue(); }
public TakeRowStream(AmzaStats amzaStats, VersionedPartitionName versionedPartitionName, CommitTo commitTo, RingMember ringMember, long lastHighwaterMark, BinaryPrimaryRowMarshaller primaryRowMarshaller, BinaryHighwaterRowMarshaller binaryHighwaterRowMarshaller) { this.amzaStats = amzaStats; this.versionedPartitionName = versionedPartitionName; this.commitTo = commitTo; this.ringMember = ringMember; this.highWaterMark = new MutableLong(lastHighwaterMark); this.lastDeltaIndex = new MutableLong(-1); this.lastTxId = new MutableLong(Long.MIN_VALUE); this.primaryRowMarshaller = primaryRowMarshaller; this.binaryHighwaterRowMarshaller = binaryHighwaterRowMarshaller; this.flushedTxId = new MutableLong(-1); }
private boolean streamBootstrap(long leadershipToken, DataOutputStream dos, MutableLong bytes, VersionedPartitionName versionedPartitionName, int stripe, LivelyEndState livelyEndState) throws Exception { dos.writeLong(leadershipToken); dos.writeLong(-1); dos.writeByte(0); // not online dos.writeByte(0); // last entry marker dos.writeByte(0); // last entry marker dos.writeByte(0); // streamedToEnd marker bytes.add(4); if (versionedPartitionName == null || livelyEndState == null) { // someone thinks we're a member for this partition return true; } else { // BOOTSTRAP'S BOOTSTRAPS! partitionCreator.get("bootstrap", versionedPartitionName, stripe); return false; } }
private DeltaWAL createOrOpen(IoStats ioStats, long id, long prevId) throws Exception { WALTx deltaWALRowsTx = new BinaryWALTx( String.valueOf(prevId) + "_" + String.valueOf(id), ioProvider, primaryRowMarshaller, Integer.MAX_VALUE, 64); MutableLong rows = new MutableLong(); deltaWALRowsTx.open( walDir, io -> { io.validate(ioStats, true, false, (rowFP, rowTxId, rowType, row) -> { rows.increment(); return (rows.longValue() < corruptionParanoiaFactor) ? -1 : rowFP; }, (rowFP, rowTxId, rowType, row) -> -1, null); return null; }); return new DeltaWAL(id, prevId, idProvider, primaryRowMarshaller, highwaterRowMarshaller, deltaWALRowsTx); }
private synchronized void incLatency(long timeRecvd, long latency) { updateTimeRecvd(timeRecvd); // If we're in the warmup period then skip this stat. if ((double)timeRecvd < ((double)_receiveStartTimeInNanos + _latencyWarmupInSecs * 1000000000.0)) { return; } // Convert latency to usec. long usecLatency = (long)((double)latency / _toUs); MutableLong minLat = _stats.get(PerfStatType.LATENCY_USEC_MIN); if(minLat.longValue() > usecLatency) { minLat.setValue(usecLatency); } MutableLong maxLat = _stats.get(PerfStatType.LATENCY_USEC_MAX); if(maxLat.longValue() < usecLatency) { maxLat.setValue(usecLatency); } _totalLatency += usecLatency; _stats.get(PerfStatType.NUM_LATENCY_MSGS).increment(); // Update the buckets long bucketIndex = usecLatency >> _latencyGranularity; if (bucketIndex >= _latencyBuckets.size()) { bucketIndex = _latencyBuckets.size() - 1; } if (bucketIndex < 0) { trace.error("Negative latency: " + usecLatency + " BucketIndex: " + bucketIndex + " _latencyBuckets: " + _latencyBuckets.size()); bucketIndex = 0; } _latencyBuckets.get((int) bucketIndex).increment() ; }
@Override public void setup(OperatorContext context) { this.context = context; commandCounters.setCounter(CommandCounters.ADD, new MutableLong()); commandCounters.setCounter(CommandCounters.ADD_RANGE, new MutableLong()); commandCounters.setCounter(CommandCounters.DELETE, new MutableLong()); commandCounters.setCounter(CommandCounters.CLEAR, new MutableLong()); }
protected Number convertToNumber(Object o) { if (o == null) { return null; } else if (o instanceof MutableDouble || o instanceof MutableLong) { return (Number)o; } else if (o instanceof Double || o instanceof Float) { return new MutableDouble((Number)o); } else if (o instanceof Number) { return new MutableLong((Number)o); } else { return new MutableDouble(o.toString()); } }
@Override @SuppressWarnings("unchecked") public Object aggregate(Collection<?> countersList) { if (countersList.isEmpty()) { return null; } BasicCounters<MutableLong> tempFileCounters = (BasicCounters<MutableLong>)countersList.iterator().next(); MutableLong globalProcessedFiles = tempFileCounters.getCounter(FileCounters.GLOBAL_PROCESSED_FILES); MutableLong globalNumberOfFailures = tempFileCounters.getCounter(FileCounters.GLOBAL_NUMBER_OF_FAILURES); MutableLong globalNumberOfRetries = tempFileCounters.getCounter(FileCounters.GLOBAL_NUMBER_OF_RETRIES); totalLocalProcessedFiles.setValue(0); pendingFiles.setValue(0); totalLocalNumberOfFailures.setValue(0); totalLocalNumberOfRetries.setValue(0); for (Object fileCounters : countersList) { BasicCounters<MutableLong> basicFileCounters = (BasicCounters<MutableLong>)fileCounters; totalLocalProcessedFiles.add(basicFileCounters.getCounter(FileCounters.LOCAL_PROCESSED_FILES)); pendingFiles.add(basicFileCounters.getCounter(FileCounters.PENDING_FILES)); totalLocalNumberOfFailures.add(basicFileCounters.getCounter(FileCounters.LOCAL_NUMBER_OF_FAILURES)); totalLocalNumberOfRetries.add(basicFileCounters.getCounter(FileCounters.LOCAL_NUMBER_OF_RETRIES)); } globalProcessedFiles.add(totalLocalProcessedFiles); globalProcessedFiles.subtract(pendingFiles); globalNumberOfFailures.add(totalLocalNumberOfFailures); globalNumberOfRetries.add(totalLocalNumberOfRetries); BasicCounters<MutableLong> aggregatedCounters = new BasicCounters<MutableLong>(MutableLong.class); aggregatedCounters.setCounter(AggregatedFileCounters.PROCESSED_FILES, globalProcessedFiles); aggregatedCounters.setCounter(AggregatedFileCounters.PENDING_FILES, pendingFiles); aggregatedCounters.setCounter(AggregatedFileCounters.NUMBER_OF_ERRORS, totalLocalNumberOfFailures); aggregatedCounters.setCounter(AggregatedFileCounters.NUMBER_OF_RETRIES, totalLocalNumberOfRetries); return aggregatedCounters; }
public FileSplitter() { currentWindowRecoveryState = Lists.newLinkedList(); fileCounters = new BasicCounters<MutableLong>(MutableLong.class); windowDataManager = new WindowDataManager.NoopWindowDataManager(); scanner = new TimeBasedDirectoryScanner(); blocksThreshold = Integer.MAX_VALUE; }
@Override public void setup(Context.OperatorContext context) { Preconditions.checkArgument(!scanner.files.isEmpty(), "empty files"); Preconditions.checkArgument(blockSize == null || blockSize > 0, "invalid block size"); operatorId = context.getId(); this.context = context; fileCounters.setCounter(Counters.PROCESSED_FILES, new MutableLong()); windowDataManager.setup(context); try { fs = scanner.getFSInstance(); } catch (IOException e) { throw new RuntimeException("creating fs", e); } if (blockSize == null) { blockSize = fs.getDefaultBlockSize(new Path(scanner.files.iterator().next())); } if (context.getValue(Context.OperatorContext.ACTIVATION_WINDOW_ID) < windowDataManager.getLargestCompletedWindow()) { blockMetadataIterator = null; } else { //don't setup scanner while recovery scanner.setup(context); } }
public AbstractBlockReader() { maxReaders = 16; minReaders = 1; intervalMillis = 2 * 60 * 1000L; response = new StatsListener.Response(); backlogPerOperator = Maps.newHashMap(); partitionCount = 1; counters = new BasicCounters<>(MutableLong.class); collectStats = true; lastBlockOpenTime = -1; }
@Override public void setup(Context.OperatorContext context) { operatorId = context.getId(); LOG.debug("{}: partition keys {} mask {}", operatorId, partitionKeys, partitionMask); this.context = context; counters.setCounter(ReaderCounterKeys.BLOCKS, new MutableLong()); counters.setCounter(ReaderCounterKeys.RECORDS, new MutableLong()); counters.setCounter(ReaderCounterKeys.BYTES, new MutableLong()); counters.setCounter(ReaderCounterKeys.TIME, new MutableLong()); sleepTimeMillis = context.getValue(Context.OperatorContext.SPIN_MILLIS); }
public AbstractJMSInputOperator() { counters = new BasicCounters<MutableLong>(MutableLong.class); throwable = new AtomicReference<Throwable>(); pendingAck = Sets.newHashSet(); windowDataManager = new FSWindowDataManager(); lock = new Lock(); //Recovery state is a linked hash map to maintain the order of tuples. currentWindowRecoveryState = Maps.newLinkedHashMap(); holdingBuffer = new ArrayBlockingQueue<Message>(bufferSize) { private static final long serialVersionUID = 201411151139L; @SuppressWarnings("Contract") @Override public boolean add(Message message) { synchronized (lock) { try { return messageConsumed(message) && super.add(message); } catch (JMSException e) { LOG.error("message consumption", e); throwable.set(e); throw new RuntimeException(e); } } } }; }
@Override public void setup(OperatorContext context) { this.context = context; spinMillis = context.getValue(OperatorContext.SPIN_MILLIS); counters.setCounter(CounterKeys.RECEIVED, new MutableLong()); counters.setCounter(CounterKeys.REDELIVERED, new MutableLong()); windowDataManager.setup(context); }
/** * This method is called when a message is added to {@link #holdingBuffer} and can be overwritten by subclasses * if required. This is called by the JMS thread not Operator thread. * * @param message * @return message is accepted. * @throws javax.jms.JMSException */ protected boolean messageConsumed(Message message) throws JMSException { if (message.getJMSRedelivered() && pendingAck.contains(message.getJMSMessageID())) { counters.getCounter(CounterKeys.REDELIVERED).increment(); LOG.warn("IGNORING: Redelivered Message {}", message.getJMSMessageID()); return false; } pendingAck.add(message.getJMSMessageID()); MutableLong receivedCt = counters.getCounter(CounterKeys.RECEIVED); receivedCt.increment(); LOG.debug("message id: {} buffer size: {} received: {}", message.getJMSMessageID(), holdingBuffer.size(), receivedCt.longValue()); return true; }
ReaderStats(int backlog, long readBlocks, long bytes, long time) { BasicCounters<MutableLong> bc = new BasicCounters<>(MutableLong.class); bc.setCounter(AbstractBlockReader.ReaderCounterKeys.BLOCKS, new MutableLong(readBlocks)); bc.setCounter(AbstractBlockReader.ReaderCounterKeys.BYTES, new MutableLong(bytes)); bc.setCounter(AbstractBlockReader.ReaderCounterKeys.TIME, new MutableLong(time)); counters = bc; PortStats portStats = new PortStats("blocks"); portStats.queueSize = backlog; inputPorts = Lists.newArrayList(portStats); }
@Override public void setBucketCounters(@Nonnull BasicCounters<MutableLong> bucketCounters) { this.bucketCounters = bucketCounters; bucketCounters.setCounter(CounterKeys.BUCKETS_IN_MEMORY, new MutableLong()); bucketCounters.setCounter(CounterKeys.EVICTED_BUCKETS, new MutableLong()); bucketCounters.setCounter(CounterKeys.DELETED_BUCKETS, new MutableLong()); bucketCounters.setCounter(CounterKeys.EVENTS_COMMITTED_LAST_WINDOW, new MutableLong()); bucketCounters.setCounter(CounterKeys.EVENTS_IN_MEMORY, new MutableLong()); recordStats = true; }
public AbstractDeduper() { waitingEvents = Maps.newHashMap(); partitionKeys = Sets.newHashSet(0); partitionMask = 0; fetchedBuckets = new LinkedBlockingQueue<AbstractBucket<INPUT>>(); counters = new BasicCounters<MutableLong>(MutableLong.class); }
protected PubSubWebSocketAppDataResult createQueryResult(DAG dag, Configuration conf, AppDataSingleSchemaDimensionStoreHDHT store) { PubSubWebSocketAppDataResult wsOut = new PubSubWebSocketAppDataResult(); URI queryUri = getQueryUri(dag, conf); wsOut.setUri(queryUri); dag.addOperator("QueryResult", wsOut); // Set remaining dag options dag.setAttribute(store, Context.OperatorContext.COUNTERS_AGGREGATOR, new BasicCounters.LongAggregator<MutableLong>()); return wsOut; }
/** * @inheritDoc */ public Session startSession(String id) { if (isClosing()) { throw new ClosingException(); } // create a non portable session object if this is a clustered environment NonPortableSession nPS = new MyNonPortableSession(); // create a new MutableLong object representing the current time that both // the Session and SessionManager can see. MutableLong currentTime = currentTimeMutableLong(); // create a new session Session s = new MySession(this,id,threadLocalManager(),idManager(),this,sessionListener,m_defaultInactiveInterval,nPS,currentTime,rebuildBreakdownService()); // Place session into the main Session Storage, capture any old id Session old = m_sessions.put(s.getId(), s); // Place an entry in the expirationTimeSuggestionMap that corresponds to the entry in m_sessions expirationTimeSuggestionMap.put(id, currentTime); // check for id conflict if (old != null) { log.warn("startSession: duplication id: " + s.getId()); } return s; }
protected Session newSessionWithBlockableInvalidate(final CountDownLatch opStarted, final CountDownLatch opBlocker, final CountDownLatch opCompleted) { // unfortunately, the Maintenance implementation compels us to // use MySession rather than an interface. String uuid = nextUuid(); final MySession session = new MySession(sessionComponent,uuid,threadLocalManager,idManager, sessionComponent,sessionListener,sessionComponent.getInactiveInterval(),new MyNonPortableSession(), new MutableLong(System.currentTimeMillis()), null) { // Make eclipse warnings go away and define this private static final long serialVersionUID = 1L; @Override public void invalidate() { Callable<Boolean> callback = new Callable<Boolean>() { public Boolean call() throws Exception { return superInvalidate(); } }; execBlockableSessionOp(opStarted, opBlocker, opCompleted, callback); } private boolean superInvalidate() { log.debug("**cris** invalidate"); super.invalidate(); return true; } }; return session; }