/** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedEntryQueue(long maxSize, long blockSize) { int initialSize = (int) (maxSize / blockSize); if (initialSize == 0) { initialSize++; } queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() { public int compare(Entry<BlockCacheKey, BucketEntry> entry1, Entry<BlockCacheKey, BucketEntry> entry2) { return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue()); } }).expectedSize(initialSize).create(); cacheSize = 0; this.maxSize = maxSize; }
public CachedPostingListCounter rebuildCache() { MinMaxPriorityQueue<Entry> mostExpensive = MinMaxPriorityQueue .maximumSize(32).expectedSize(32).create(); synchronized (this) { for (ObjectLongPair<int[]> p : frequency.keyValuesView()) { mostExpensive.add(new Entry(p.getOne(), p.getTwo())); } } ObjectIntHashMap<int[]> postingListMapping = new ObjectIntHashMap<>(); int[] bitVector = new int[nDocuments]; int length = mostExpensive.size(); for (int i = 0; i < length; i++) { Entry e = mostExpensive.removeFirst(); int[] docIds = e.docIds; postingListMapping.put(docIds, i); for (int docId : docIds) { bitVector[docId] |= (1 << i); } } return new CachedPostingListCounter(postingListMapping, bitVector); }
public Queue<QueryMatch> queryKNN(double lat, double lon, int n) throws IOException { DistanceComparator comp = new DistanceComparator(lon, lat); Queue<QueryMatch> ret = MinMaxPriorityQueue.orderedBy(comp) .maximumSize(n) .create(); GeoHash target = GeoHash.withCharacterPrecision(lat, lon, precision); ret.addAll(takeN(comp, target.toBase32(), n)); for (GeoHash h : target.getAdjacent()) { ret.addAll(takeN(comp, h.toBase32(), n)); } return ret; }
/** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedEntryQueue(long maxSize, long blockSize) { int initialSize = (int) (maxSize / blockSize); if (initialSize == 0) initialSize++; queue = MinMaxPriorityQueue .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() { public int compare(Entry<BlockCacheKey, BucketEntry> entry1, Entry<BlockCacheKey, BucketEntry> entry2) { return entry1.getValue().compareTo(entry2.getValue()); } }).expectedSize(initialSize).create(); cacheSize = 0; this.maxSize = maxSize; }
@Override public List<LinkRelevance> getSelectedLinks() { List<LinkRelevance> links = new ArrayList<>(); while (links.size() < numberOfLinks && !topkLinksPerDomain.isEmpty()) { // adds the URL with max score of each domain MinMaxPriorityQueue<LinkRelevance> topk = newPriorityQueue(numberOfLinks); Iterator<Entry<String, MinMaxPriorityQueue<LinkRelevance>>> it = topkLinksPerDomain.entrySet().iterator(); while (it.hasNext()) { MinMaxPriorityQueue<LinkRelevance> domain = it.next().getValue(); topk.add(domain.poll()); if (domain.isEmpty()) { it.remove(); } } for(LinkRelevance link : topk) { links.add(link); } } this.topkLinksPerDomain = null; // clean-up reference return links; }
@Override public List<MiruPartition> getPartitionsForTenant(MiruTenantId tenantId) throws Exception { NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId); List<MiruPartition> partitions = new ArrayList<>(); for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) { MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId); for (HostAndTimestamp hat : got) { EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host); byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId)); MiruPartitionCoordInfo info; if (rawInfo == null) { info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory); } else { MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo); info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage); } partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info)); } } return partitions; }
private NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> tenantPartitionsLatestTopologies(MiruTenantId tenantId, Collection<MiruPartitionId> partitionIds) throws Exception { final NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = new TreeMap<>(); for (HostHeartbeat hostHeartbeat : getAllHosts()) { EmbeddedClient registryClient = registryClient(hostHeartbeat.host); for (MiruPartitionId partitionId : partitionIds) { byte[] got = registryClient.getValue(Consistency.quorum, null, toTopologyKey(tenantId, partitionId)); if (got != null) { MinMaxPriorityQueue<HostAndTimestamp> latest = partitionIdToLatest.get(partitionId); if (latest == null) { // TODO defaultNumberOfReplicas should come from config? latest = MinMaxPriorityQueue.maximumSize(defaultNumberOfReplicas) .expectedSize(defaultNumberOfReplicas) .<HostAndTimestamp>create(); partitionIdToLatest.put(partitionId, latest); } latest.add(new HostAndTimestamp(hostHeartbeat.host, FilerIO.bytesLong(got))); } } } return partitionIdToLatest; }
@Override public List<MiruPartition> getPartitionsForTenantHost(MiruTenantId tenantId, MiruHost host) throws Exception { NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId); List<MiruPartition> partitions = new ArrayList<>(); for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) { MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId); for (HostAndTimestamp hat : got) { if (hat.host.equals(host)) { EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host); byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId)); MiruPartitionCoordInfo info; if (rawInfo == null) { info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory); } else { MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo); info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage); } partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info)); } } } return partitions; }
@Override public MiruReplicaSet getReplicaSet(MiruTenantId tenantId, MiruPartitionId partitionId) throws Exception { MinMaxPriorityQueue<HostAndTimestamp> latest = tenantLatestTopology(tenantId, partitionId); List<MiruPartition> partitions = Lists.newArrayList(); Set<MiruHost> replicaHosts = Sets.newHashSet(); for (HostAndTimestamp hat : latest) { EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host); byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId)); MiruPartitionCoordInfo info; if (rawInfo == null) { info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory); } else { MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo); info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage); } partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info)); replicaHosts.add(hat.host); } int missing = defaultNumberOfReplicas - replicaHosts.size(); // TODO expose to config? return new MiruReplicaSet(extractPartitionsByState(partitions), replicaHosts, missing, defaultNumberOfReplicas); }
private <BM extends IBM, IBM> RecoAnswer composeAnswer(MiruRequestContext<BM, IBM, ?> requestContext, MiruRequest<RecoQuery> request, MiruFieldDefinition fieldDefinition, MinMaxPriorityQueue<MiruTermCount> heap, StackBuffer stackBuffer) throws Exception { MiruSchema schema = requestContext.getSchema(); MiruTermComposer termComposer = requestContext.getTermComposer(); List<Recommendation> results = new ArrayList<>(); for (MiruTermCount result : heap) { MiruValue term = new MiruValue(termComposer.decompose(schema, fieldDefinition, stackBuffer, result.termId)); results.add(new Recommendation(term, result.count)); } log.debug("score: results.size={}", results.size()); boolean resultsExhausted = request.query.timeRange.smallestTimestamp > requestContext.getTimeIndex().getLargestTimestamp(); return new RecoAnswer(results, 1, resultsExhausted); }
public void completeCommittingSegments(String realtimeTableName, List<String> segmentIds) { Comparator<LLCSegmentName> comparator = new Comparator<LLCSegmentName>() { @Override public int compare(LLCSegmentName o1, LLCSegmentName o2) { return o2.compareTo(o1); } }; Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments = new HashMap<>(); for (String segmentId : segmentIds) { LLCSegmentName segmentName = new LLCSegmentName(segmentId); final int partitionId = segmentName.getPartitionId(); MinMaxPriorityQueue latestSegments = partitionToLatestSegments.get(partitionId); if (latestSegments == null) { latestSegments = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(2).create(); partitionToLatestSegments.put(partitionId, latestSegments); } latestSegments.offer(segmentName); } completeCommittingSegmentsInternal(realtimeTableName, partitionToLatestSegments); }
@Test public void comparatorTest() throws Exception { MinMaxPriorityQueue<DimensionValueMetricPair> testQueue = MinMaxPriorityQueue.maximumSize(2).create(); DimensionValueMetricPair d1 = new DimensionValueMetricPair("d1", 1); DimensionValueMetricPair d2 = new DimensionValueMetricPair("d2", 2); DimensionValueMetricPair d3 = new DimensionValueMetricPair(30, 3); DimensionValueMetricPair d4 = new DimensionValueMetricPair("d4", 4); testQueue.add(d1); testQueue.add(d2); testQueue.add(d3); testQueue.add(d4); for (DimensionValueMetricPair pair : testQueue) { Assert.assertEquals(pair.getMetricValue().intValue() > 2, true, "Incorrect comparator for DimensionValueMetricPair, queue must retain highest metric values"); } }
/** * Add a region from the head or tail to the List of regions to return. */ private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove, final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) { RegionPlan rp = null; if (!fetchFromTail) rp = regionsToMove.remove(); else rp = regionsToMove.removeLast(); rp.setDestination(sn); regionsToReturn.add(rp); }
/** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public LruCachedBlockQueue(long maxSize, long blockSize) { int initialSize = (int)(maxSize / blockSize); if(initialSize == 0) initialSize++; queue = MinMaxPriorityQueue.expectedSize(initialSize).create(); heapSize = 0; this.maxSize = maxSize; }
public ReservoirSampler(final int k, final Random random, final ToDoubleFunction<Integer> computeWeight) { this.minQueue = MinMaxPriorityQueue .<Pair<Double, T>>orderedBy((x, y) -> Double.compare(x.first(), y.first())) .maximumSize(k).create(); this.computeWeight = computeWeight; this.random = random; this.count = new AtomicInteger(0); }
/** * Add a region from the head or tail to the List of regions to return. */ void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove, final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) { RegionPlan rp = null; if (!fetchFromTail) rp = regionsToMove.remove(); else rp = regionsToMove.removeLast(); rp.setDestination(sn); regionsToReturn.add(rp); }
/** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedBlockQueue(long maxSize, long blockSize) { int initialSize = (int)(maxSize / blockSize); if(initialSize == 0) initialSize++; queue = MinMaxPriorityQueue.expectedSize(initialSize).create(); heapSize = 0; this.maxSize = maxSize; }
public MonoReilSolver(final IInstructionGraph instructionGraph, final AnalysisDirection analysisDirection, final ILattice<LatticeElementType> lattice) { m_graph = Preconditions.checkNotNull(instructionGraph, "Error: instruction graph argument can not be null"); m_direction = Preconditions.checkNotNull(analysisDirection, "Error: analysis direction argument can not be null"); m_lattice = Preconditions.checkNotNull(lattice, "Error: latice argument can not be null"); m_workList = MinMaxPriorityQueue.expectedSize(m_graph.size()).create(); }
/** * Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s, * using the worst-fit-decreasing algorithm. * * Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load. */ protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) { // Sort workunit groups by data size desc Collections.sort(groups, LOAD_DESC_COMPARATOR); MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR).expectedSize(numOfMultiWorkUnits).create(); for (int i = 0; i < numOfMultiWorkUnits; i++) { MultiWorkUnit multiWorkUnit = new MultiWorkUnit(); setWorkUnitEstSize(multiWorkUnit, 0); pQueue.add(multiWorkUnit); } for (WorkUnit group : groups) { MultiWorkUnit lightestMultiWorkUnit = pQueue.poll(); addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit); pQueue.add(lightestMultiWorkUnit); } logMultiWorkUnitInfo(pQueue); double minLoad = getWorkUnitEstLoad(pQueue.peekFirst()); double maxLoad = getWorkUnitEstLoad(pQueue.peekLast()); LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%", minLoad, maxLoad, (maxLoad - minLoad) / maxLoad * 100.0)); this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad); this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad); List<WorkUnit> multiWorkUnits = Lists.newArrayList(); multiWorkUnits.addAll(pQueue); return multiWorkUnits; }
Queue<QueryMatch> takeN(Comparator<QueryMatch> comp, String prefix, int n) throws IOException { Queue<QueryMatch> candidates = MinMaxPriorityQueue.orderedBy(comp) .maximumSize(n) .create(); Scan scan = new Scan(prefix.getBytes()); scan.setFilter(new PrefixFilter(prefix.getBytes())); scan.addFamily(FAMILY); scan.setMaxVersions(1); scan.setCaching(50); HTableInterface table = pool.getTable(TABLE); int cnt = 0; ResultScanner scanner = table.getScanner(scan); for (Result r : scanner) { String hash = new String(r.getRow()); String id = new String(r.getValue(FAMILY, ID)); String lon = new String(r.getValue(FAMILY, X_COL)); String lat = new String(r.getValue(FAMILY, Y_COL)); candidates.add(new QueryMatch(id, hash, Double.parseDouble(lon), Double.parseDouble(lat))); cnt++; } table.close(); System.out.println( String.format("Scan over '%s' returned %s candidates.", prefix, cnt)); return candidates; }
public AbstractBucketManager() { eventQueue = new LinkedBlockingQueue<Long>(); evictionCandidates = Sets.newHashSet(); dirtyBuckets = Maps.newConcurrentMap(); bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<AbstractBucket<T>>() { @Override public int compare(AbstractBucket<T> bucket1, AbstractBucket<T> bucket2) { if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) { return -1; } if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) { return 1; } return 0; } }).create(); lock = new Lock(); committedWindow = -1; noOfBuckets = DEF_NUM_BUCKETS; noOfBucketsInMemory = DEF_NUM_BUCKETS_MEM; maxNoOfBucketsInMemory = DEF_NUM_BUCKETS_MEM + 100; millisPreventingBucketEviction = DEF_MILLIS_PREVENTING_EVICTION; writeEventKeysOnly = true; bucketsToDelete = Sets.newHashSet(); }
@Override protected List<ConfigIssue> init() { // Validate configuration values and open any required resources. List<ConfigIssue> issues = gcsOriginConfig.init(getContext(), super.init()); minMaxPriorityQueue = MinMaxPriorityQueue.orderedBy((Blob o1, Blob o2) -> { int result = o1.getUpdateTime().compareTo(o2.getUpdateTime()); if(result != 0) { return result; } //same modified time. Use name to sort return o1.getName().compareTo(o2.getName()); }).maximumSize(gcsOriginConfig.maxResultQueueSize).create(); antPathMatcher = new AntPathMatcher(); gcsOriginConfig.credentials.getCredentialsProvider(getContext(), issues) .ifPresent(p -> credentialsProvider = p); try { storage = StorageOptions.newBuilder() .setCredentials(credentialsProvider.getCredentials()) .build() .getService(); } catch (IOException e) { LOG.error("Error when initializing storage. Reason : {}", e); issues.add(getContext().createConfigIssue( Groups.CREDENTIALS.name(), "gcsOriginConfig.credentials.credentialsProvider", Errors.GCS_01, e )); } rateLimitElEval = FileRefUtil.createElEvalForRateLimit(getContext()); rateLimitElVars = getContext().createELVars(); errorBlobHandler = new GcsObjectPostProcessingHandler(storage, gcsOriginConfig.gcsOriginErrorConfig); return issues; }
@Override public void startSelection(int numberOfLinks) { this.topkLinks = MinMaxPriorityQueue .orderedBy(LinkRelevance.DESC_ORDER_COMPARATOR) .maximumSize(numberOfLinks) // keep only top-k items .create(); }
@Override public void startSelection(int numberOfLinks) { links = MinMaxPriorityQueue .orderedBy(new Comparator<RandomLink>() { @Override public int compare(RandomLink o1, RandomLink o2) { return Double.compare(o1.relevance, o2.relevance); } }) .maximumSize(numberOfLinks) // keep only top-k items .create(); }
@Override public void evaluateLink(LinkRelevance link) { if (link.getRelevance() > 0) { String domainName = link.getTopLevelDomainName(); MinMaxPriorityQueue<LinkRelevance> domainQueue = topkLinksPerDomain.get(domainName); if (domainQueue == null) { domainQueue = newPriorityQueue(MAX_LINKS_PER_DOMAIN); topkLinksPerDomain.put(domainName, domainQueue); } domainQueue.add(link); } }
public List<TranslationCandidate> alignDistributional(TermService sourceTerm, int nbCandidates, int minCandidateFrequency) { Queue<TranslationCandidate> alignedCandidateQueue = MinMaxPriorityQueue.maximumSize(nbCandidates).create(); ContextVector sourceVector = sourceTerm.getContext(); if(sourceVector == null) return new ArrayList<>(); ContextVector translatedSourceVector = translateVector( sourceVector, dico, TRANSLATION_STRATEGY_MOST_SPECIFIC, targetTermino); ExplainedValue v; int nbVectorsNotComputed = 0; int nbVectorsComputed = 0; for(TermService targetTerm:targetTermino.terms().filter(TermService::isSingleWord).collect(Collectors.toList())) { if(targetTerm.getFrequency() < minCandidateFrequency) continue; if(targetTerm.getContext() != null) { nbVectorsComputed++; v = distance.getExplainedValue(translatedSourceVector, targetTerm.getContext()); TranslationCandidate candidate = new TranslationCandidate( AlignmentMethod.DISTRIBUTIONAL, targetTerm, v.getValue(), sourceTerm, v.getExplanation()); alignedCandidateQueue.add(candidate); } }; if(nbVectorsNotComputed > 0) { LOGGER.warn(MSG_SEVERAL_VECTORS_NOT_COMPUTED, nbVectorsComputed, nbVectorsNotComputed); } // sort alignedCandidates List<TranslationCandidate> alignedCandidates = Lists.newArrayListWithCapacity(alignedCandidateQueue.size()); alignedCandidates.addAll(alignedCandidateQueue); normalizeCandidateScores(alignedCandidates); return Lists.newArrayList(alignedCandidateQueue); }
/** * Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s, * using the worst-fit-decreasing algorithm. * * Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load. */ protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) { // Sort workunit groups by data size desc Collections.sort(groups, LOAD_DESC_COMPARATOR); MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR).expectedSize(numOfMultiWorkUnits).create(); for (int i = 0; i < numOfMultiWorkUnits; i++) { MultiWorkUnit multiWorkUnit = MultiWorkUnit.createEmpty(); setWorkUnitEstSize(multiWorkUnit, 0); pQueue.add(multiWorkUnit); } for (WorkUnit group : groups) { MultiWorkUnit lightestMultiWorkUnit = pQueue.poll(); addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit); pQueue.add(lightestMultiWorkUnit); } logMultiWorkUnitInfo(pQueue); double minLoad = getWorkUnitEstLoad(pQueue.peekFirst()); double maxLoad = getWorkUnitEstLoad(pQueue.peekLast()); LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%", minLoad, maxLoad, (maxLoad - minLoad) / maxLoad * 100.0)); this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad); this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad); List<WorkUnit> multiWorkUnits = Lists.newArrayList(); multiWorkUnits.addAll(pQueue); return multiWorkUnits; }