Java 类com.google.common.cache.Cache 实例源码

项目:Reer    文件:InMemoryTaskArtifactCache.java   
private Cache<Object, Object> createInMemoryCache(String cacheId, String cacheName) {
    Cache<Object, Object> inMemoryCache = this.cache.getIfPresent(cacheId);
    if (inMemoryCache != null) {
        LOG.info("In-memory cache of {}: Size{{}}, {}", cacheId, inMemoryCache.size(), inMemoryCache.stats());
    } else {
        Integer maxSize = cacheCapSizer.getMaxSize(cacheName);
        assert maxSize != null : "Unknown cache.";
        LOG.debug("Creating In-memory cache of {}: MaxSize{{}}", cacheId, maxSize);
        LoggingEvictionListener evictionListener = new LoggingEvictionListener(cacheId, maxSize);
        final CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder().maximumSize(maxSize).recordStats().removalListener(evictionListener);
        inMemoryCache = cacheBuilder.build();
        evictionListener.setCache(inMemoryCache);
        this.cache.put(cacheId, inMemoryCache);
    }
    return inMemoryCache;
}
项目:Chronetic    文件:ChronoRange.java   
/**
 * Create a ChronoRange for the given ChronoSeries and sequence of ChronoGenes.
 *
 * @param chronoSeries ChronoSeries to create ChronoRange for
 * @param genes ChronoGene sequence containing ChronoPattern(s) to use for creating ChronoRange
 * @return ChronoRange for given ChronoSeries and ChronoGene sequence
 */
@NotNull
public static ChronoRange getChronoRange(@NotNull ChronoSeries chronoSeries, @NotNull ISeq<ChronoGene> genes) {
    ChronoRange range = new ChronoRange(requireNonNull(chronoSeries), requireNonNull(genes));
    Cache<ISeq<ChronoPattern>, ChronoRange> cacheChronoRange = cacheMap.get(chronoSeries);
    if (cacheChronoRange == null) {
        cacheChronoRange = CacheBuilder.newBuilder().build();
        cacheMap.put(chronoSeries, cacheChronoRange);
    }

    ChronoRange cacheRange = cacheChronoRange.getIfPresent(range.chronoPatternSeq);
    if (cacheRange != null) {
        return cacheRange;
    } else {
        if (range.validRange) {
            range.calculateTimestampRanges();
        }

        cacheChronoRange.put(range.chronoPatternSeq, range);
        return range;
    }
}
项目:dremio-oss    文件:HandlerToPreparePlan.java   
public HandlerToPreparePlan(
    QueryContext context,
    SqlNode sqlNode,
    SqlToPlanHandler handler,
    Cache<Long, PreparedPlan> planCache,
    String sql,
    AttemptObserver observer,
    SqlHandlerConfig config) {
  this.context = context;
  this.sqlNode = sqlNode;
  this.handler = handler;
  this.planCache = planCache;
  this.sql = sql;
  this.observer = observer;
  this.config = config;
}
项目:calcite-avatica    文件:RemoteDriverTest.java   
@Override public Cache<Integer, Object>
getRemoteStatementMap(AvaticaConnection connection) throws Exception {
  Field metaF = AvaticaConnection.class.getDeclaredField("meta");
  metaF.setAccessible(true);
  Meta clientMeta = (Meta) metaF.get(connection);
  Field remoteMetaServiceF = clientMeta.getClass().getDeclaredField("service");
  remoteMetaServiceF.setAccessible(true);
  LocalProtobufService remoteMetaService =
      (LocalProtobufService) remoteMetaServiceF.get(clientMeta);
  // Use the explicitly class to avoid issues with LoggingLocalJsonService
  Field remoteMetaServiceServiceF = LocalProtobufService.class.getDeclaredField("service");
  remoteMetaServiceServiceF.setAccessible(true);
  LocalService remoteMetaServiceService =
      (LocalService) remoteMetaServiceServiceF.get(remoteMetaService);
  Field remoteMetaServiceServiceMetaF =
      remoteMetaServiceService.getClass().getDeclaredField("meta");
  remoteMetaServiceServiceMetaF.setAccessible(true);
  JdbcMeta serverMeta = (JdbcMeta) remoteMetaServiceServiceMetaF.get(remoteMetaServiceService);
  Field jdbcMetaStatementMapF = JdbcMeta.class.getDeclaredField("statementCache");
  jdbcMetaStatementMapF.setAccessible(true);
  //noinspection unchecked
  @SuppressWarnings("unchecked")
  Cache<Integer, Object> cache = (Cache<Integer, Object>) jdbcMetaStatementMapF.get(serverMeta);
  return cache;
}
项目:gold    文件:SchemaDefinitionCacheTest.java   
public void schemaCacheItemsGetAddedUponInstanceAccess() {
    // Initially there should be no entries on the schemaDescriptionIdCache
    Cache<SchemaKey, Set<SchemaKey>> schemaDescriptionIdCache = cacheService.getSchemaDescriptionIdCache();
    assertEquals(schemaDescriptionIdCache.size(), 0);

    // Creating auxiliary objects prior to execute queries
    Map<String, Object> variableMap = buildVariableMap(CACHE_TEST_NAMESPACE, MEDIA_DOMAIN_NAME);

    SchemaWriteAccess access = buildSchemaWriteAccessCacheTestNamespace();

    // Run a query on media that should populate the reference cache for it
    GraphQLResult addMediaInstanceResult = instanceService.executeQuery(addMediaInstance, variableMap, access, DEFAULT_MAX_RECURSE_DEPTH);
    assertTrue(addMediaInstanceResult.isSuccessful());

    // Lookup for "Media" referenced types
    Set<SchemaKey> schemaKeySets = schemaDescriptionIdCache.getIfPresent(new SchemaKey(MEDIA_DOMAIN_NAME, SCHEMA_NAME_SPACE));

    // Media should have 2 referenced types (Video and Audio)
    assertNotNull(schemaKeySets);
    assertEquals(schemaKeySets.size(), 2);

    assertTrue(schemaKeySets.contains(new SchemaKey(VIDEO_DOMAIN_NAME, SCHEMA_NAME_SPACE)));
    assertTrue(schemaKeySets.contains(new SchemaKey(AUDIO_DOMAIN_NAME, SCHEMA_NAME_SPACE)));
}
项目:calcite-avatica    文件:RemoteDriverTest.java   
@Override public Cache<Integer, Object>
getRemoteStatementMap(AvaticaConnection connection) throws Exception {
  Field metaF = AvaticaConnection.class.getDeclaredField("meta");
  metaF.setAccessible(true);
  Meta clientMeta = (Meta) metaF.get(connection);
  Field remoteMetaServiceF = clientMeta.getClass().getDeclaredField("service");
  remoteMetaServiceF.setAccessible(true);
  LocalJsonService remoteMetaService = (LocalJsonService) remoteMetaServiceF.get(clientMeta);
  // Use the explicitly class to avoid issues with LoggingLocalJsonService
  Field remoteMetaServiceServiceF = LocalJsonService.class.getDeclaredField("service");
  remoteMetaServiceServiceF.setAccessible(true);
  LocalService remoteMetaServiceService =
      (LocalService) remoteMetaServiceServiceF.get(remoteMetaService);
  Field remoteMetaServiceServiceMetaF =
      remoteMetaServiceService.getClass().getDeclaredField("meta");
  remoteMetaServiceServiceMetaF.setAccessible(true);
  JdbcMeta serverMeta = (JdbcMeta) remoteMetaServiceServiceMetaF.get(remoteMetaServiceService);
  Field jdbcMetaStatementMapF = JdbcMeta.class.getDeclaredField("statementCache");
  jdbcMetaStatementMapF.setAccessible(true);
  //noinspection unchecked
  @SuppressWarnings("unchecked")
  Cache<Integer, Object> cache = (Cache<Integer, Object>) jdbcMetaStatementMapF.get(serverMeta);
  return cache;
}
项目:Equella    文件:OAuthWebServiceImpl.java   
@Override
public OAuthUserState getUserState(String tokenData, HttpServletRequest request)
{
    Cache<String, OAuthUserState> userCache = getUserCache(CurrentInstitution.get());
    OAuthUserState oauthUserState = userCache.getIfPresent(tokenData);
    if( oauthUserState == null )
    {
        // find the token and the user associated with it
        final OAuthToken token = oauthService.getToken(tokenData);
        if( token == null )
        {
            // FIXME: Need to fall back on server language since
            // LocaleEncodingFilter has not run yet...
            throw new OAuthException(403, OAuthConstants.ERROR_ACCESS_DENIED, languageService
                .getResourceBundle(Locale.getDefault(), "resource-centre").getString(KEY_TOKEN_NOT_FOUND));
        }

        final UserState userState = userService.authenticateAsUser(token.getUsername(),
            userService.getWebAuthenticationDetails(request));

        oauthUserState = new OAuthUserStateImpl(userState, token);
        userCache.put(tokenData, oauthUserState);
    }
    return (OAuthUserState) oauthUserState.clone();
}
项目:apollo-custom    文件:AbstractConfig.java   
private <T> T getValueAndStoreToCache(String key, Function<String, T> parser, Cache<String, T> cache, T defaultValue) {
  long currentConfigVersion = m_configVersion.get();
  String value = getProperty(key, null);

  if (value != null) {
    T result = parser.apply(value);

    if (result != null) {
      synchronized (this) {
        if (m_configVersion.get() == currentConfigVersion) {
          cache.put(key, result);
        }
      }
      return result;
    }
  }

  return defaultValue;
}
项目:dremio-oss    文件:Foreman.java   
protected Foreman(
        final SabotContext context,
        final Executor executor,
        final CompletionListener listener,
        final ExternalId externalId,
        final QueryObserver observer,
        final UserSession session,
        final UserRequest request,
        final OptionProvider config,
        final ReAttemptHandler attemptHandler,
        final CoordToExecTunnelCreator tunnelCreator,
        Cache<Long, PreparedPlan> plans) {
  this.attemptId = AttemptId.of(externalId);
  this.executor = executor;
  this.context = context;
  this.listener = listener;
  this.session = session;
  this.request = request;
  this.config = config;
  this.observer = observer;
  this.attemptHandler = attemptHandler;
  this.tunnelCreator = tunnelCreator;
  this.plans = plans;
}
项目:calcite-avatica    文件:RemoteDriverTest.java   
@Override public Cache<String, Connection>
getRemoteConnectionMap(AvaticaConnection connection) throws Exception {
  Field metaF = AvaticaConnection.class.getDeclaredField("meta");
  metaF.setAccessible(true);
  Meta clientMeta = (Meta) metaF.get(connection);
  Field remoteMetaServiceF = clientMeta.getClass().getDeclaredField("service");
  remoteMetaServiceF.setAccessible(true);
  LocalProtobufService remoteMetaService =
      (LocalProtobufService) remoteMetaServiceF.get(clientMeta);
  // Get the field explicitly off the correct class to avoid LocalLoggingJsonService.class
  Field remoteMetaServiceServiceF = LocalProtobufService.class.getDeclaredField("service");
  remoteMetaServiceServiceF.setAccessible(true);
  LocalService remoteMetaServiceService =
      (LocalService) remoteMetaServiceServiceF.get(remoteMetaService);
  Field remoteMetaServiceServiceMetaF =
      remoteMetaServiceService.getClass().getDeclaredField("meta");
  remoteMetaServiceServiceMetaF.setAccessible(true);
  JdbcMeta serverMeta = (JdbcMeta) remoteMetaServiceServiceMetaF.get(remoteMetaServiceService);
  Field jdbcMetaConnectionCacheF = JdbcMeta.class.getDeclaredField("connectionCache");
  jdbcMetaConnectionCacheF.setAccessible(true);
  //noinspection unchecked
  @SuppressWarnings("unchecked")
  Cache<String, Connection> cache =
      (Cache<String, Connection>) jdbcMetaConnectionCacheF.get(serverMeta);
  return cache;
}
项目:CustomWorldGen    文件:ForgeChunkManager.java   
public static Chunk fetchDormantChunk(long coords, World world)
{
    if (dormantChunkCacheSize == 0) return null; // Don't bother with maps at all if its never gonna get a response
    Cache<Long, Chunk> cache = dormantChunkCache.get(world);
    if (cache == null)
    {
        return null;
    }
    Chunk chunk = cache.getIfPresent(coords);
    if (chunk != null)
    {
        for (ClassInheritanceMultiMap<Entity> eList : chunk.getEntityLists())
        {
            Iterator<Entity> itr = eList.iterator();
            while (itr.hasNext())
            {
                (itr.next()).resetEntityId();
            }
        }
    }
    return chunk;
}
项目:calcite-avatica    文件:RemoteDriverTest.java   
@Test public void testStatementLifecycle() throws Exception {
  ConnectionSpec.getDatabaseLock().lock();
  try (AvaticaConnection connection = (AvaticaConnection) getLocalConnection()) {
    Map<Integer, AvaticaStatement> clientMap = connection.statementMap;
    Cache<Integer, Object> serverMap = getLocalConnectionInternals()
        .getRemoteStatementMap(connection);
    // Other tests being run might leave statements in the cache.
    // The lock guards against more statements being cached during the test.
    serverMap.invalidateAll();
    assertEquals(0, clientMap.size());
    assertEquals(0, serverMap.size());
    Statement stmt = connection.createStatement();
    assertEquals(1, clientMap.size());
    assertEquals(1, serverMap.size());
    stmt.close();
    assertEquals(0, clientMap.size());
    assertEquals(0, serverMap.size());
  } finally {
    ConnectionSpec.getDatabaseLock().unlock();
  }
}
项目:Elasticsearch    文件:BitsetFilterCache.java   
@Override
public void onRemoval(RemovalNotification<Object, Cache<Query, Value>> notification) {
    Object key = notification.getKey();
    if (key == null) {
        return;
    }

    Cache<Query, Value> valueCache = notification.getValue();
    if (valueCache == null) {
        return;
    }

    for (Value value : valueCache.asMap().values()) {
        listener.onRemoval(value.shardId, value.bitset);
        // if null then this means the shard has already been removed and the stats are 0 anyway for the shard this key belongs to
    }
}
项目:uavstack    文件:DoTestRuleFilterFactory.java   
@Test
public void guavaCache() throws InterruptedException {

    TesTicker ticker = new TesTicker();
    Cache<String, Pojo> collection = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.SECONDS).ticker(ticker)
            .<String, Pojo> build();
    Pojo p1 = new Pojo("p1name", "p1val");
    Pojo p2 = new Pojo("p2name", "p2val");
    collection.put("p1", p1);
    collection.put("p2", p2);
    ticker.advance(3, TimeUnit.SECONDS);
    Map<String, Pojo> map = collection.asMap();
    assertTrue(map.containsKey("p1"));
    // map.get("p1");
    ticker.advance(3, TimeUnit.SECONDS);
    assertEquals(2, collection.size());
    assertFalse(map.containsKey("p1"));// 有清除过期操作
    assertEquals(1, collection.size());
    assertNull(collection.getIfPresent("p2"));
    assertNull(collection.getIfPresent("p1"));// 有清除过期操作
    assertEquals(0, collection.size());
}
项目:calcite-avatica    文件:RemoteDriverTest.java   
@Override public Cache<String, Connection>
getRemoteConnectionMap(AvaticaConnection connection) throws Exception {
  Field metaF = AvaticaConnection.class.getDeclaredField("meta");
  metaF.setAccessible(true);
  Meta clientMeta = (Meta) metaF.get(connection);
  Field remoteMetaServiceF = clientMeta.getClass().getDeclaredField("service");
  remoteMetaServiceF.setAccessible(true);
  LocalJsonService remoteMetaService = (LocalJsonService) remoteMetaServiceF.get(clientMeta);
  // Get the field explicitly off the correct class to avoid LocalLoggingJsonService.class
  Field remoteMetaServiceServiceF = LocalJsonService.class.getDeclaredField("service");
  remoteMetaServiceServiceF.setAccessible(true);
  LocalService remoteMetaServiceService =
      (LocalService) remoteMetaServiceServiceF.get(remoteMetaService);
  Field remoteMetaServiceServiceMetaF =
      remoteMetaServiceService.getClass().getDeclaredField("meta");
  remoteMetaServiceServiceMetaF.setAccessible(true);
  JdbcMeta serverMeta = (JdbcMeta) remoteMetaServiceServiceMetaF.get(remoteMetaServiceService);
  Field jdbcMetaConnectionCacheF = JdbcMeta.class.getDeclaredField("connectionCache");
  jdbcMetaConnectionCacheF.setAccessible(true);
  //noinspection unchecked
  @SuppressWarnings("unchecked")
  Cache<String, Connection> cache =
      (Cache<String, Connection>) jdbcMetaConnectionCacheF.get(serverMeta);
  return cache;
}
项目:dremio-oss    文件:CommandCreator.java   
public CommandCreator(
    SabotContext dbContext,
    QueryContext context,
    CoordToExecTunnelCreator tunnelCreator,
    UserRequest request,
    AttemptObserver observer,
    Cache<Long, PreparedPlan> plans,
    Pointer<QueryId> prepareId,
    int attemptNumber) {
  this.context = context;
  this.tunnelCreator = tunnelCreator;
  this.request = request;
  this.observer = observer;
  this.dbContext = dbContext;
  this.plans = plans;
  this.prepareId = prepareId;
  this.attemptNumber = attemptNumber;
}
项目:happylifeplat-transaction    文件:SchemaCache.java   
private Schema<?> get(final Class<?> cls, Cache<Class<?>, Schema<?>> cache) {
    try {
        return cache.get(cls, () -> RuntimeSchema.createFrom(cls));
    } catch (ExecutionException e) {
        return null;
    }
}
项目:Reer    文件:InMemoryCachingPluginResolutionServiceClient.java   
private <K, V> Response<V> getResponse(Key<K> key, Cache<Key<K>, Response<V>> cache, Factory<Response<V>> responseFactory, Transformer<Key<K>, ? super Response<V>> keyGenerator) {
    Response<V> response = key == null ? null : cache.getIfPresent(key);
    if (response != null) {
        return response;
    } else {
        response = responseFactory.create();
        if (!response.isError()) {
            Key<K> actualKey = keyGenerator.transform(response);
            cache.put(actualKey, response);
        }
        return response;
    }
}
项目:apollo-custom    文件:AbstractConfig.java   
@Override
public String[] getArrayProperty(String key, final String delimiter, String[] defaultValue) {
  try {
    if (!m_arrayCache.containsKey(delimiter)) {
      synchronized (this) {
        if (!m_arrayCache.containsKey(delimiter)) {
          m_arrayCache.put(delimiter, this.<String[]>newCache());
        }
      }
    }

    Cache<String, String[]> cache = m_arrayCache.get(delimiter);
    String[] result = cache.getIfPresent(key);

    if (result != null) {
      return result;
    }

    return getValueAndStoreToCache(key, new Function<String, String[]>() {
      @Override
      public String[] apply(String input) {
        return input.split(delimiter);
      }
    }, cache, defaultValue);
  } catch (Throwable ex) {
    Tracer.logError(new ApolloConfigException(
        String.format("getArrayProperty for %s failed, return default value", key), ex));
  }
  return defaultValue;
}
项目:athena    文件:SimpleFlowRuleStore.java   
@Modified
public void modified(ComponentContext context) {

    readComponentConfiguration(context);

    // Reset Cache and copy all.
    Cache<Integer, SettableFuture<CompletedBatchOperation>> prevFutures = pendingFutures;
    pendingFutures = CacheBuilder.newBuilder()
            .expireAfterWrite(pendingFutureTimeoutMinutes, TimeUnit.MINUTES)
            .removalListener(new TimeoutFuture())
            .build();

    pendingFutures.putAll(prevFutures.asMap());
}
项目:cas-5.1.0    文件:CasPersonDirectoryConfiguration.java   
private IPersonAttributeDao composeMergedAndCachedAttributeRepositories(final List<IPersonAttributeDao> list) {
    final MergingPersonAttributeDaoImpl mergingDao = new MergingPersonAttributeDaoImpl();

    final String merger = StringUtils.defaultIfBlank(casProperties.getAuthn().getAttributeRepository().getMerger(), "replace".trim());
    LOGGER.debug("Configured merging strategy for attribute sources is [{}]", merger);
    switch (merger.toLowerCase()) {
        case "merge":
            mergingDao.setMerger(new MultivaluedAttributeMerger());
            break;
        case "add":
            mergingDao.setMerger(new NoncollidingAttributeAdder());
            break;
        case "replace":
        default:
            mergingDao.setMerger(new ReplacingAttributeAdder());
            break;
    }

    final CachingPersonAttributeDaoImpl impl = new CachingPersonAttributeDaoImpl();
    impl.setCacheNullResults(false);

    final Cache graphs = CacheBuilder.newBuilder()
            .concurrencyLevel(2)
            .weakKeys()
            .maximumSize(casProperties.getAuthn().getAttributeRepository().getMaximumCacheSize())
            .expireAfterWrite(casProperties.getAuthn().getAttributeRepository().getExpireInMinutes(), TimeUnit.MINUTES)
            .build();
    impl.setUserInfoCache(graphs.asMap());
    mergingDao.setPersonAttributeDaos(list);
    impl.setCachedPersonAttributesDao(mergingDao);

    if (list.isEmpty()) {
        LOGGER.debug("No attribute repository sources are available/defined to merge together.");
    } else {
        LOGGER.debug("Configured attribute repository sources to merge together: [{}]", list);
        LOGGER.debug("Configured cache expiration policy for merging attribute sources to be [{}] minute(s)",
                casProperties.getAuthn().getAttributeRepository().getExpireInMinutes());
    }
    return impl;
}
项目:apollo-custom    文件:AbstractConfig.java   
private <T> Cache<String, T> newCache() {
  Cache<String, T> cache = CacheBuilder.newBuilder()
      .maximumSize(m_configUtil.getMaxConfigCacheSize())
      .expireAfterAccess(m_configUtil.getConfigCacheExpireTime(), m_configUtil.getConfigCacheExpireTimeUnit())
      .build();
  allCaches.add(cache);
  return cache;
}
项目:CustomWorldGen    文件:ForgeChunkManager.java   
public static void putDormantChunk(long coords, Chunk chunk)
{
    if (dormantChunkCacheSize == 0) return; // Skip if we're not dormant caching chunks
    Cache<Long, Chunk> cache = dormantChunkCache.get(chunk.getWorld());
    if (cache != null)
    {
        cache.put(coords, chunk);
    }
}
项目:alfresco-repository    文件:LockStoreImpl.java   
private static ConcurrentMap<NodeRef, LockState> createMap(long expiry, TimeUnit timeUnit)
{
    Cache<NodeRef, LockState> cache = CacheBuilder.newBuilder()
                .concurrencyLevel(32)
                .expireAfterWrite(expiry, timeUnit)
                .build();
    return cache.asMap();
}
项目:alfresco-repository    文件:DefaultSimpleCache.java   
/**
 * Construct a cache using the specified capacity and name.
 * 
 * @param maxItems The cache capacity. 0 = use {@link #DEFAULT_CAPACITY}
 * @param useMaxItems Whether the maxItems value should be applied as a size-cap for the cache.
 * @param cacheName An arbitrary cache name.
 */
@SuppressWarnings("unchecked")
public DefaultSimpleCache(int maxItems, boolean useMaxItems, int ttlSecs, int maxIdleSecs, String cacheName)
{
    if (maxItems == 0)
    {
        maxItems = DEFAULT_CAPACITY;
    }
    else if (maxItems < 0)
    {
        throw new IllegalArgumentException("maxItems may not be negative, but was " + maxItems);
    }
    this.maxItems = maxItems;
    this.useMaxItems = useMaxItems;
    this.ttlSecs = ttlSecs;
    this.maxIdleSecs = maxIdleSecs;
    setBeanName(cacheName);

    // The map will have a bounded size determined by the maxItems member variable.
    @SuppressWarnings("rawtypes")
    CacheBuilder builder = CacheBuilder.newBuilder();

    if (useMaxItems)
    {
        builder.maximumSize(maxItems);
    }
    if (ttlSecs > 0)
    {
        builder.expireAfterWrite(ttlSecs, TimeUnit.SECONDS);
    }
    if (maxIdleSecs > 0)
    {
        builder.expireAfterAccess(maxIdleSecs, TimeUnit.SECONDS);
    }
    builder.concurrencyLevel(32);

    cache = (Cache<K, AbstractMap.SimpleImmutableEntry<K, V>>) builder.build();
}
项目:micrometer    文件:GuavaCacheMetrics.java   
@Override
public void bindTo(MeterRegistry registry) {
    Gauge.builder(name + ".estimated.size", cache, Cache::size)
        .tags(tags)
        .description("The approximate number of entries in this cache")
        .register(registry);

    FunctionCounter.builder(name + ".requests", cache, c -> c.stats().missCount())
        .tags(tags).tags("result", "miss")
        .description("The number of times cache lookup methods have returned an uncached (newly loaded) value, or null")
        .register(registry);

    FunctionCounter.builder(name + ".requests", cache, c -> c.stats().hitCount())
        .tags(tags).tags("result", "hit")
        .description("The number of times cache lookup methods have returned a cached value")
        .register(registry);

    FunctionCounter.builder(name + ".evictions", cache, c -> c.stats().evictionCount())
        .tags(tags)
        .description("Cache evictions")
        .register(registry);

    if (cache instanceof LoadingCache) {
        // dividing these gives you a measure of load latency
        TimeGauge.builder(name + ".load.duration", cache, TimeUnit.NANOSECONDS, c -> c.stats().totalLoadTime())
            .tags(tags)
            .description("The time the cache has spent loading new values")
            .register(registry);

        FunctionCounter.builder(name + ".load",cache, c -> c.stats().loadSuccessCount())
            .tags(tags).tags("result", "success")
            .description("The number of times cache lookup methods have successfully loaded a new value")
            .register(registry);

        FunctionCounter.builder(name + ".load", cache, c -> c.stats().loadExceptionCount())
            .tags(tags).tags("result", "failure")
            .description("The number of times cache lookup methods threw an exception while loading a new value")
            .register(registry);
    }
}
项目:MooProject    文件:MultiCache.java   
public MultiCache(long expireAfterWrite, long expireAfterAccess) {
    this.handle = CacheBuilder.<F, Cache<K, V>>newBuilder()
            .expireAfterWrite(expireAfterWrite, TimeUnit.SECONDS)
            .expireAfterAccess(expireAfterAccess, TimeUnit.SECONDS)
            .build();
    this.removalListenerMap = new HashMap<>();
}
项目:sentry    文件:CacheHealthIndicator.java   
private Map<String, Object> buildStats(String name, Cache<Object, Object> cache) {
    Map<String, Object> map = new LinkedHashMap<>();
    map.put("name", name);
    map.put("status", "UP");
    map.put("size", cache.size());
    CacheStats stats = cache.stats();
    map.put("hitRate", stats.hitRate());
    map.put("hitCount", stats.hitCount());
    map.put("missCount", stats.missRate());
    map.put("loadSuccessCount", stats.loadSuccessCount());
    map.put("loadExceptionCount", stats.loadExceptionCount());
    map.put("totalLoadTime", stats.totalLoadTime());
    map.put("evictionCount", stats.evictionCount());
    return map;
}
项目:ProjectAres    文件:CacheUtils.java   
static <K, V> V getUnchecked(Cache<K, V> cache, K key, Supplier<V> supplier) {
    try {
        return cache.get(key, supplier::get);
    } catch(ExecutionException e) {
        throw Throwables.propagate(e);
    }
}
项目:myth    文件:SchemaCache.java   
private Schema<?> get(final Class<?> cls, Cache<Class<?>, Schema<?>> cache) {
    try {
        return cache.get(cls, () -> RuntimeSchema.createFrom(cls));
    } catch (ExecutionException e) {
        return null;
    }
}
项目:Equella    文件:PortletWebServiceImpl.java   
private DefaultSectionTree getTree(String userId)
{
    long institutionId = CurrentInstitution.get().getUniqueId();
    Cache<String, DefaultSectionTree> instMap = sectionCache.getIfPresent(institutionId);
    if( instMap == null )
    {
        instMap = CacheBuilder.newBuilder().softValues().expireAfterAccess(30, TimeUnit.MINUTES).build();
        sectionCache.put(institutionId, instMap);
    }
    return instMap.getIfPresent(userId);
}
项目:Equella    文件:PortletWebServiceImpl.java   
private void clearTree(Institution inst, String userId)
{
    if( userId == null )
    {
        sectionCache.invalidate(inst.getUniqueId());
    }
    else
    {
        Cache<String, DefaultSectionTree> instMap = sectionCache.getIfPresent(inst.getUniqueId());
        if( instMap != null )
        {
            instMap.invalidate(userId);
        }
    }
}
项目:Equella    文件:OAuthWebServiceImpl.java   
@Inject
public void setInstitutionService(InstitutionService service)
{
    userStateMap = service.newInstitutionAwareCache(new CacheLoader<Institution, Cache<String, OAuthUserState>>()
    {
        @Override
        public Cache<String, OAuthUserState> load(Institution key) throws Exception
        {
            return CacheBuilder.newBuilder().concurrencyLevel(10).maximumSize(50000)
                .expireAfterWrite(30, TimeUnit.MINUTES).build();
        }
    });
}
项目:Equella    文件:ItemFileServiceImpl.java   
@Inject
public void setInstitutionService(InstitutionService service)
{
    cache = service.newInstitutionAwareCache(new CacheLoader<Institution, Cache<ItemId, CollectionStorage>>()
    {
        @Override
        public Cache<ItemId, CollectionStorage> load(Institution key)
        {
            return CacheBuilder.newBuilder().maximumSize(10000).expireAfterAccess(30, TimeUnit.MINUTES).build();
        }
    });
}
项目:Equella    文件:ConfigurationServiceImpl.java   
@Inject
public void setInstitutionService(InstitutionService service)
{
    cache = service.newInstitutionAwareCache(new CacheLoader<Institution, Cache<Object, Object>>()
    {
        @Override
        public Cache<Object, Object> load(Institution key)
        {
            return CacheBuilder.newBuilder().concurrencyLevel(12).build();
        }
    });
}
项目:dremio-oss    文件:AttemptManager.java   
/**
 * Constructor. Sets up the AttemptManager, but does not initiate any execution.
 *
 * @param attemptId the id for the query
 * @param queryRequest the query to execute
 */
public AttemptManager(
  final SabotContext context,
  final AttemptId attemptId,
  final UserRequest queryRequest,
  final AttemptObserver observer,
  final UserSession session,
  final OptionProvider options,
  final CoordToExecTunnelCreator tunnelCreator,
  final Cache<Long, PreparedPlan> plans,
  final FragmentsStateListener fragmentsStateListener
  ) {
  this.attemptId = attemptId;
  this.queryId = attemptId.toQueryId();
  this.queryIdString = QueryIdHelper.getQueryId(queryId);
  this.queryRequest = queryRequest;
  this.sabotContext = context;
  this.tunnelCreator = tunnelCreator;
  this.plans = plans;
  this.prepareId = new Pointer<>();

  final QueryPriority priority = queryRequest.getPriority();
  final long maxAllocation = queryRequest.getMaxAllocation();
  this.queryContext = new QueryContext(session, sabotContext, queryId, priority, maxAllocation);
  this.observers = AttemptObservers.of(observer);
  this.queryManager = new QueryManager(queryId, queryContext, new CompletionListenerImpl(), prepareId,
    observers, context.getOptionManager().getOption(PlannerSettings.VERBOSE_PROFILE), queryContext.getSchemaTreeProvider(),
    fragmentsStateListener);

  final OptionManager optionManager = queryContext.getOptions();
  if(options != null){
    options.applyOptions(optionManager);
  }
  this.queuingEnabled = optionManager.getOption(ExecConstants.ENABLE_QUEUE);
  this.reflectionQueuingEnabled = optionManager.getOption(ExecConstants.REFLECTION_ENABLE_QUEUE);

  final QueryState initialState = queuingEnabled ? QueryState.ENQUEUED : QueryState.STARTING;
  recordNewState(initialState);
}
项目:Equella    文件:TaxonomyServiceImpl.java   
private TaxonomyDataSource getDataSource(final String uuid)
{
    synchronized( cacheLock )
    {
        Institution inst = CurrentInstitution.get();
        Cache<String, TaxonomyDataSource> instEntry = dataSourceCache.getIfPresent(inst);
        if( instEntry == null )
        {
            instEntry = CacheBuilder.newBuilder().softValues().expireAfterAccess(1, TimeUnit.HOURS).build();
            dataSourceCache.put(inst, instEntry);
        }

        TaxonomyDataSource tds = instEntry.getIfPresent(uuid);
        if( tds == null )
        {
            final Taxonomy taxonomy = getDao().getByUuid(uuid);
            if( taxonomy == null )
            {
                throw new NotFoundException("Could not find taxonomy with UUID " + uuid);
            }

            tds = getDataSourceNoCache(taxonomy);
            instEntry.put(uuid, tds);
        }
        return tds;
    }
}
项目:Equella    文件:TaxonomyServiceImpl.java   
@Override
public void taxonomyModifiedEvent(TaxonomyModifiedEvent event)
{
    synchronized( cacheLock )
    {
        final Cache<String, TaxonomyDataSource> instEntry = dataSourceCache.getIfPresent(CurrentInstitution.get());
        if( instEntry != null )
        {
            instEntry.invalidate(event.getTaxonomyUuid());
        }
    }
}
项目:Equella    文件:UserDirectoryUtils.java   
/**
 * User, group and role information is already cached in the UDC, but there
 * may be a need for caching specific information in a UD, for example,
 * TLEGroupWrapper caches group UUIDs for a user during initState to speed
 * up repeated logins.
 */
public static <T> Cache<String, T> makeCache()
{
    // Maximum size to so that we don't fill up the memory. Entries expire
    // 10 minutes after being *added* (not accessed) so that the details
    // can't be stale for too long. Soft values allow for GC to throw away
    // values if it needs to in LRU order, which is really nice and
    // automatic for us!
    return CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).maximumSize(5000).softValues().build();
}
项目:sponge    文件:Deduplication.java   
public synchronized Cache<Pair<String, Object>, Boolean> getCache() {
    if (cache == null) {
        cache = cacheBuilder.build();
    }

    return cache;
}