public NSImageIconCache() { if(0 == PreferencesFactory.get().getInteger("icon.cache.size")) { cache = new HashMap<String, NSImage>() { @Override public NSImage put(String key, NSImage value) { return value; } }; } else { cache = new LRUMap<String, NSImage>(PreferencesFactory.get().getInteger("icon.cache.size")) { @Override protected boolean removeLRU(LinkEntry entry) { if(log.isDebugEnabled()) { log.debug("Removing from cache:" + entry); } return true; } }; } }
IpBlocker(long timeWindowMs, int count, BlockingStatus status, long blockingTime, int maxIpSize, int initSize) { this.timeWindowMs = timeWindowMs; this.blockingTime = blockingTime; this.count = count; this.status = status; log.trace("timeWindowMs={}, blockingTime={}, count={}, maxIpSize={}", timeWindowMs, blockingTime, count, maxIpSize); this.ipMap = Collections.synchronizedMap(new LRUMap<String,IpBlockData>(maxIpSize, initSize) { protected boolean removeLRU(final LinkEntry<String,IpBlockData> entry) { long time = currentTimeMs(); if ((time-entry.getValue().lastAccessTime) > entry.getValue().lastAccessTime) { log.trace("remote block {}", entry.getKey()); return true; } log.debug("no remove {}", entry.getKey()); return false; } }); }
@Override public void onScanEnd(LRUMap<ID, Beacon> beacons, int reqState, long interval, int fresh, int dead) { if(mListener != null) { mListener.onScanEnd(beacons, reqState, interval, fresh, dead); } int mode = mScanManager.getMode(); if(Build.VERSION.SDK_INT <= Build.VERSION_CODES.LOLLIPOP) { //Below API 21 (including) we should stop service, //except when app is visible / extremely short interval+duration (below 5s) //if(mode != SCAN_FOREGROUND && ) } else { } //if(mode == SCAN_BACKGROUND || mode == SCAN_LOCKED && interval+duration < 60 000) { // stopSelfAndCleanup(); //} }
public ROTransaction(StorageAccessIF access) { super("TX" + access.getId(), access); // initialize shared data cache StorageCacheIF scache = access.getStorage().getStorageCache(); this.txncache = new ROLocalCache(this, scache); // initialize identity map this.lrusize = PropertyUtils.getInt(access.getProperty("net.ontopia.topicmaps.impl.rdbms.Cache.shared.identitymap.lru"), 5000); this.lru = new LRUMap(this.lrusize); // instrument transaction cache int dinterval = PropertyUtils.getInt(access.getStorage().getProperty("net.ontopia.topicmaps.impl.rdbms.Cache.local.debug"), -1); if (dinterval > 0) { log.info("Instrumenting local cache."); this.txncache = new StatisticsCache("lcache", this.txncache, dinterval); } // Get access registrar from transaction cache (currently the // local data cache) this.registrar = txncache.getRegistrar(); // Use IdentityIF object access this.oaccess = new PersistentObjectAccess(this); }
public RWTransaction(StorageAccessIF access) { super("TX" + access.getId(), access); // initialize shared data cache StorageCacheIF scache = access.getStorage().getStorageCache(); if (scache != null) trackall = true; this.txncache = new RWLocalCache(this, scache); // initialize identity map this.lrusize = PropertyUtils.getInt(access.getProperty("net.ontopia.topicmaps.impl.rdbms.Cache.identitymap.lru"), 300); this.lru = new LRUMap(lrusize); // instrument transaction cache int dinterval = PropertyUtils.getInt(access.getStorage().getProperty("net.ontopia.topicmaps.impl.rdbms.Cache.local.debug"), -1); if (dinterval > 0) { log.info("Instrumenting local cache."); this.txncache = new StatisticsCache("lcache", this.txncache, dinterval); } // Get access registrar from transaction cache (currently the // local data cache) this.registrar = txncache.getRegistrar(); // Use IdentityIF object access this.oaccess = new PersistentObjectAccess(this); }
public void initSettings(Path stateDir, LuceneSettings settings) throws Exception { if (this.settings != null) throw new Exception("Init settings is only allowed once"); this.settings = settings; MMapDirectory indexDirectory = new MMapDirectory(stateDir.resolve("index")); indexDirectory.setUseUnmap(false); MMapDirectory taxoDirectory = new MMapDirectory(stateDir.resolve("taxo")); taxoDirectory.setUseUnmap(false); IndexWriterConfig config = new IndexWriterConfig(settings.analyzer); config.setSimilarity(settings.similarity); config.setMergePolicy(settings.getMergePolicy()); this.indexWriter = new IndexWriter(indexDirectory, config); this.indexWriter.commit(); this.taxoWriter = new DirectoryTaxonomyWriter(taxoDirectory, IndexWriterConfig.OpenMode.CREATE_OR_APPEND, new LruTaxonomyWriterCache(settings.lruTaxonomyWriterCacheSize)); this.taxoWriter.commit(); this.scoreCollectorCache = Collections.synchronizedMap(new LRUMap<KeyNameQuery, ScoreSuperCollector>(50)); this.keyCollectorCache = Collections.synchronizedMap(new LRUMap<KeyNameQuery, FixedBitSet>(50)); this.manager = new SearcherTaxonomyManager(indexDirectory, taxoDirectory, new MerescoSearchFactory(indexDirectory, taxoDirectory, settings)); this.manager.addListener(refreshListener); }
/** * Sets the max number of entries to cache */ public ReadCache<Key, Value> withMaxCapacity(int maxCapacity) { return withCache(new LRUMap<Key, Value>(maxCapacity) { @Override protected boolean removeLRU(LinkEntry<Key, Value> entry) { cacheRemoved(entry.getKey(), entry.getValue()); return super.removeLRU(entry); } }); }
public ReadCache.BytesKey<V> withMaxCapacity(int maxCapacity) { withCache(new ByteArrayMap<V>(new LRUMap<ByteArrayWrapper, V>(maxCapacity) { @Override protected boolean removeLRU(LinkEntry<ByteArrayWrapper, V> entry) { cacheRemoved(entry.getKey().getData(), entry.getValue()); return super.removeLRU(entry); } })); return this; }
public AbstractCache(int size) { if(size == Integer.MAX_VALUE) { // Unlimited impl = Collections.synchronizedMap(new LinkedHashMap<T, AttributedList<T>>()); } else if(size == 0) { impl = Collections.emptyMap(); } else { // Will inflate to the given size impl = Collections.synchronizedMap(new LRUMap<T, AttributedList<T>>(size)); } }
/** * Sets the new size of the parsing cache. * Note that this will also wipe the existing cache. * * @param newCacheSize The size of the new LRU cache. As size of 0 will disable caching. */ public void setCacheSize(int newCacheSize) { cacheSize = newCacheSize > 0 ? newCacheSize : 0; if (cacheSize >= 1) { parseCache = new LRUMap<>(cacheSize); } else { parseCache = null; } }
private int getAllocatedCacheSize(UserAgentAnalyzer uaa) throws IllegalAccessException { LRUMap<?, ?> cache = getCache(uaa); if (cache == null) { return 0; } return cache.maxSize(); }
protected JWNLLemmatizer(String modelFile, int cacheSize) { super(modelFile); this.mCache = Collections.synchronizedMap(new LRUMap<String, String>(cacheSize)); this.mLegalPattern = Pattern.compile("^[a-z\\-_ \\.\\/']*$"); this.mMultiWordsPattern = Pattern.compile("[\\- _]"); this.mEfficentPattern = Pattern.compile("(^[\\- _]|[\\- _]{2}|[\\- _]$)"); }
/** * Sets how many groups you want to keep in memory. If the value is zero then all groups will be kept in memory. * * Default value is {@value #DEFAULT_BufferSize}. * * @param bufferSize */ public void setGroupsBufferSize(final int bufferSize) { assertConfigurable(); // // Case: infinite memory if (bufferSize <= 0) { this._itemHashKey2group = new HashMap<>(); } // // Case: finite buffer size else { this._itemHashKey2group = new LRUMap<Object, GroupType>(bufferSize) { private static final long serialVersionUID = -1342388742697440633L; /** * When a group is removed from LRU map, because it does not fit it buffer anymore, we need to close that group. */ @Override protected boolean removeLRU(final org.apache.commons.collections4.map.AbstractLinkedMap.LinkEntry<Object, GroupType> entry) { final GroupType group = entry.getValue(); if (group != null) { closeGroup(group); } return true; // accept to remove it } }; } }
public GoogleDriveClient() { this.cacheNameId = new LRUMap(2000); this.cacheIdName = new LRUMap(2000); authenticated = false; httpTransport = new NetHttpTransport(); jsonFactory = new JacksonFactory(); }
protected synchronized boolean switchToLRUMap() { if (!usingLRUMap) { if (size() > maxEntries) { lruMap = Collections.synchronizedMap(new LRUMap<K, V>(maxEntries)); lruMap.putAll(concurrentMap); usingLRUMap = true; concurrentMap.clear(); } } return usingLRUMap; // this could be set by another thread }
public void testMapSwitch() { EfficientLRUMap<String, String> testMap = new EfficientLRUMap<String, String>(5); // Test basics for a single name value pair testMap.put("key1", "value1"); assertEquals("The value for key1 should be value 1", "value1", testMap.get("key1")); assertEquals("The size() for the map should be 1", 1, testMap.size()); assertEquals("The type of Map should be ConcurrentHashMap", testMap.getUnderlyingMapClass(), ConcurrentHashMap.class); // Add keys up to the limit testMap.put("key2", "value2"); testMap.put("key3", "value3"); testMap.put("key4", "value4"); testMap.put("key5", "value5"); // Validate last items and map type. assertEquals("The value for key5 should be value5", "value5", testMap.get("key5")); assertEquals("The size() for the map should be 5", 5, testMap.size()); assertEquals("The type of Map should be ConcurrentHashMap", testMap.getUnderlyingMapClass(), ConcurrentHashMap.class); // Updating an item shouldn't change the map type testMap.put("key5", "value5b"); assertEquals("The value for key5 should now be value5b", "value5b", testMap.get("key5")); assertEquals("The size() for the map should be 5", 5, testMap.size()); assertEquals("The type of Map should be ConcurrentHashMap", testMap.getUnderlyingMapClass(), ConcurrentHashMap.class); // Add another item which should trigger a switch in the map type testMap.put("key6", "value6"); assertEquals("The value for key1 should still be value1", "value1", testMap.get("key1")); assertEquals("The value for key6 should be value6", "value6", testMap.get("key6")); assertEquals("The size() for the map should be 5 since we are now LRU", 5, testMap.size()); assertTrue("The type of Map should not be a ConcurrentHashMap. It should be a synchronized map", !testMap.getUnderlyingMapClass().equals(LRUMap.class)); }
/** * Constructs a new {@code AbstractBackendController} instance. */ @SuppressWarnings("unchecked") protected AbstractBackendController() { unitOfWork = createUnitOfWork(); sessionUnitOfWork = createUnitOfWork(); sessionUnitOfWork.begin(); moduleConnectors = new LRUMap<>(20); securityContextBuilder = new SecurityContextBuilder(); throwExceptionOnBadUsage = true; asyncExecutors = new LinkedHashSet<>(); setAsyncExecutorsMaxCount(10); }
QueryCache(DetachedQueryIF query, CacheIF<K, E> cache, int lrusize, E nullObject) { this.query = query; this.cache = cache; this.lru = Collections.synchronizedMap(new LRUMap<K, E>(lrusize)); this.lrusize = lrusize; NULLOBJECT = nullObject; }
public LocatorLookup(String qname, TransactionIF txn, TopicMapIF tm, int lrusize, E nullObject) { this.qname = qname; this.txn = txn; this.tm = tm; this.lrusize = lrusize; this.cache = new ReferenceMap<LocatorIF, E>(AbstractReferenceMap.ReferenceStrength.SOFT, AbstractReferenceMap.ReferenceStrength.HARD); this.lru = new LRUMap<LocatorIF, E>(lrusize); NULLOBJECT = nullObject; }
public QueryLookup(String qname, TransactionIF txn, int lrusize, V nullObject) { this.qname = qname; this.txn = txn; this.cache = new ReferenceMap(AbstractReferenceMap.ReferenceStrength.SOFT, AbstractReferenceMap.ReferenceStrength.HARD); this.lru = new LRUMap(lrusize); NULLOBJECT = nullObject; }
@SuppressWarnings("rawtypes") public InMemoryCache(long crunchifyTimeToLive, final long crunchifyTimerInterval, int maxItems) { this.timeToLive = crunchifyTimeToLive * 1000; cacheMap = new LRUMap(maxItems); if (timeToLive > 0 && crunchifyTimerInterval > 0) { createCleanUpThread(crunchifyTimerInterval); } }
@Override public void onScanEnd(LRUMap<ID, Beacon> beacons, int reqState, long interval, int fresh, int dead) { mListener.onScanEnd(beacons, mUserRequest, interval, fresh, dead); mScheduler.schedule(intervals[mMode], mMode, mNotify.isHighPriority(), mExact); mNotify.issueIfShown(mService); }
public OneDriveClient() { this.cacheNameId = new LRUMap(4000); }
public OneDriveClient(String token) { this.cacheNameId = new LRUMap(4000); accessToken = token; authenticated = true; }
public TransactionalLRULookupIndex(CacheIF cache, int lrusize) { this.cache = cache; this.lru = Collections.synchronizedMap(new LRUMap(lrusize)); this.lrusize = lrusize; }
public FunctionMapCache( CacheTracker tracker ) { this.tracker = Objects.requireNonNull( tracker ); this.createdFromWeak = new WeakReference<>( null ); cache = new LRUMap<String, String>( 50 ); }
private void init() { mSafeguardCache = new LRUMap<Object, Object>(mCacheSize); }
/** * Callback for Scanner bulk delivery mode. * Called when actual LE scan is finished AND all beacons found in scan cycle are processed. * * @param incoming ConcurrentHashMap containing recently seen beacons. * @param reqState * @param interval * @param fresh Fresh beacons count. * @param dead Dead beacons count. */ void onScanEnd(LRUMap<ID, Beacon> incoming, int reqState, long interval, int fresh, int dead);