Java 类com.google.common.cache.CacheBuilder 实例源码

项目:private-WeChat    文件:AccessTokenJob.java   
public AccessTokenJob() {
    logger.info("init");
    accessTokenCache = CacheBuilder.newBuilder()
            // 设置并发级别为200,并发级别是指可以同时写缓存的线程数
            .concurrencyLevel(200)
            // 设置写缓存后1分钟过期
            .expireAfterWrite(90, TimeUnit.MINUTES).initialCapacity(10).maximumSize(100)
            // 设置要统计缓存的命中率
            .recordStats()
            // 设置缓存的移除通知
            .removalListener(new RemovalListener<AppIdSecret, String>() {
                @Override
                public void onRemoval(RemovalNotification<AppIdSecret, String> notification) {
                    logger.info(notification.getKey() + " was removed, cause by " + notification.getCause());
                }
            }).build(new CacheLoader<AppIdSecret, String>() {
                // build方法中可以指定CacheLoader,在缓存不存在时通过CacheLoader的实现自动加载缓存
                @Override
                public String load(AppIdSecret appIdSecret) throws Exception {
                    Token token = CommonUtil.getAccessToken(appIdSecret.getAppId(), appIdSecret.getAppSecret());
                    return token.getToken();
                }
            });
}
项目:hadoop    文件:KMSAudit.java   
/**
 * Create a new KMSAudit.
 *
 * @param windowMs Duplicate events within the aggregation window are quashed
 *                 to reduce log traffic. A single message for aggregated
 *                 events is printed at the end of the window, along with a
 *                 count of the number of aggregated events.
 */
KMSAudit(long windowMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
      .removalListener(
          new RemovalListener<String, AuditEvent>() {
            @Override
            public void onRemoval(
                RemovalNotification<String, AuditEvent> entry) {
              AuditEvent event = entry.getValue();
              if (event.getAccessCount().get() > 0) {
                KMSAudit.this.logEvent(event);
                event.getAccessCount().set(0);
                KMSAudit.this.cache.put(entry.getKey(), event);
              }
            }
          }).build();
  executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
  executor.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      cache.cleanUp();
    }
  }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
项目:Chronetic    文件:ChronoRange.java   
/**
 * Create a ChronoRange for the given ChronoSeries and sequence of ChronoGenes.
 *
 * @param chronoSeries ChronoSeries to create ChronoRange for
 * @param genes ChronoGene sequence containing ChronoPattern(s) to use for creating ChronoRange
 * @return ChronoRange for given ChronoSeries and ChronoGene sequence
 */
@NotNull
public static ChronoRange getChronoRange(@NotNull ChronoSeries chronoSeries, @NotNull ISeq<ChronoGene> genes) {
    ChronoRange range = new ChronoRange(requireNonNull(chronoSeries), requireNonNull(genes));
    Cache<ISeq<ChronoPattern>, ChronoRange> cacheChronoRange = cacheMap.get(chronoSeries);
    if (cacheChronoRange == null) {
        cacheChronoRange = CacheBuilder.newBuilder().build();
        cacheMap.put(chronoSeries, cacheChronoRange);
    }

    ChronoRange cacheRange = cacheChronoRange.getIfPresent(range.chronoPatternSeq);
    if (cacheRange != null) {
        return cacheRange;
    } else {
        if (range.validRange) {
            range.calculateTimestampRanges();
        }

        cacheChronoRange.put(range.chronoPatternSeq, range);
        return range;
    }
}
项目:hadoop    文件:KeyProviderCache.java   
public KeyProviderCache(long expiryMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterAccess(expiryMs, TimeUnit.MILLISECONDS)
      .removalListener(new RemovalListener<URI, KeyProvider>() {
        @Override
        public void onRemoval(
            RemovalNotification<URI, KeyProvider> notification) {
          try {
            notification.getValue().close();
          } catch (Throwable e) {
            LOG.error(
                "Error closing KeyProvider with uri ["
                    + notification.getKey() + "]", e);
            ;
          }
        }
      })
      .build();
}
项目:hadoop    文件:DFSClientCache.java   
DFSClientCache(NfsConfiguration config, int clientCache) {
  this.config = config;
  this.clientCache = CacheBuilder.newBuilder()
      .maximumSize(clientCache)
      .removalListener(clientRemovalListener())
      .build(clientLoader());

  this.inputstreamCache = CacheBuilder.newBuilder()
      .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE)
      .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
      .removalListener(inputStreamRemovalListener())
      .build(inputStreamLoader());

  ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
      SHUTDOWN_HOOK_PRIORITY);
}
项目:cas-5.1.0    文件:CasEventsInMemoryRepositoryConfiguration.java   
@Bean
public CasEventRepository casEventRepository() {
    final LoadingCache<String, CasEvent> storage = CacheBuilder.newBuilder()
            .initialCapacity(INITIAL_CACHE_SIZE)
            .maximumSize(MAX_CACHE_SIZE)
            .recordStats()
            .expireAfterWrite(EXPIRATION_TIME, TimeUnit.HOURS)
            .build(new CacheLoader<String, CasEvent>() {
                @Override
                public CasEvent load(final String s) throws Exception {
                    LOGGER.error("Load operation of the cache is not supported.");
                    return null;
                }
            });
    LOGGER.debug("Created an in-memory event repository to store CAS events for [{}] hours", EXPIRATION_TIME);
    return new InMemoryCasEventRepository(storage);
}
项目:xsharing-services-router    文件:RouteDataGuavaCache.java   
@PostConstruct
public void initialize() {
    AppConfiguration.Cache config = AppConfiguration.CONFIG.getCache();

    cache = CacheBuilder.newBuilder()
                        .maximumSize(config.getMaxSize())
                        .expireAfterWrite(config.getLifeTime(), TimeUnit.MINUTES)
                        .recordStats() // This is costly! But we need it because of getCacheStatus().
                        .build();

    // https://github.com/google/guava/wiki/CachesExplained#when-does-cleanup-happen
    //
    // If we do not clean-up expired objects ourselves, the insertion of objects seems to get slower when
    // the size approaches the limit. This is because small clean-ups happen which block the operation.
    //
    scheduler.scheduleAtFixedRate(
            this::cleanUpCache,
            config.getCleanUpInterval(),
            config.getCleanUpInterval(),
            TimeUnit.MINUTES
    );
}
项目:modName    文件:GrassColours.java   
public static void init() {
    ExampleMod.logger.info("ATTEMPTING TO COMMIT GREAT EVIL:");
    try {
        doImmenseEvil();
    } catch(Throwable e) {
        e.printStackTrace();
    }
    MinecraftForge.EVENT_BUS.register(new Listener());

    grassCache = CacheBuilder.newBuilder()
        .maximumSize(2048)
        .build(
            new CacheLoader<GrassCacheKey, Biome>() {
                @Override
                public Biome load(GrassCacheKey key) {
                    return DimensionManager.getWorld(key.dim).getBiome(new BlockPos(key.x, 63, key.z));
                }
            }
        );
}
项目:sponge    文件:CachedScriptClassInstancePovider.java   
public CachedScriptClassInstancePovider(Engine engine, Function<String, S> createScriptFunction, String format,
        BiFunction<S, Class<T>, T> createInstanceFunction) {
    this.createScriptFunction = createScriptFunction;
    this.format = format;
    this.createInstanceFunction = createInstanceFunction;

    long cacheExpireTime = engine.getDefaultParameters().getScriptClassInstancePoviderCacheExpireTime();
    if (cacheExpireTime >= 0) {
        // Turn on the cache.
        CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
        if (cacheExpireTime > 0) {
            builder.expireAfterAccess(cacheExpireTime, TimeUnit.MILLISECONDS);
        }

        cache = builder.build(new CacheLoader<String, S>() {

            @Override
            public S load(String className) throws Exception {
                return createScript(className);
            }
        });
    }
}
项目:CustomWorldGen    文件:B3DLoader.java   
public BakedWrapper(final Node<?> node, final IModelState state, final boolean smooth, final boolean gui3d, final VertexFormat format, final ImmutableSet<String> meshes, final ImmutableMap<String, TextureAtlasSprite> textures)
{
    this(node, state, smooth, gui3d, format, meshes, textures, CacheBuilder.newBuilder()
        .maximumSize(128)
        .expireAfterAccess(2, TimeUnit.MINUTES)
        .<Integer, B3DState>build(new CacheLoader<Integer, B3DState>()
        {
            public B3DState load(Integer frame) throws Exception
            {
                IModelState parent = state;
                Animation newAnimation = node.getAnimation();
                if(parent instanceof B3DState)
                {
                    B3DState ps = (B3DState)parent;
                    parent = ps.getParent();
                }
                return new B3DState(newAnimation, frame, frame, 0, parent);
            }
        }));
}
项目:hadoop-oss    文件:CachingReEncryptionKeyProvider.java   
public CachingReEncryptionKeyProvider(AbstractReEncryptionKeyProvider prov, long keyTimeoutMillis,
      long eekTimeoutMillis) {
    super(prov.getConf());
    this.provider = prov;
    reEncryptionKeyCache =
        CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
            TimeUnit.MILLISECONDS)
            .build(new CacheLoader<ReEncryptionKeyCacheKey, ReEncryptionKeyInstance>() {
              @Override
              public ReEncryptionKeyInstance load(ReEncryptionKeyCacheKey key) throws Exception {
                ReEncryptionKeyInstance kv = provider.createReEncryptionKey(
                    key.getSrcKeyName(), key.getDstKeyName());
                if (kv == null) {
                  throw new KeyNotFoundException();
                }
                return kv;
              }
            });
  transformedEEKCache =
        CacheBuilder.newBuilder().expireAfterAccess(eekTimeoutMillis,
            TimeUnit.MILLISECONDS)
            .build();
}
项目:hadoop-oss    文件:KMSAudit.java   
/**
 * Create a new KMSAudit.
 *
 * @param windowMs Duplicate events within the aggregation window are quashed
 *                 to reduce log traffic. A single message for aggregated
 *                 events is printed at the end of the window, along with a
 *                 count of the number of aggregated events.
 */
KMSAudit(long windowMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
      .removalListener(
          new RemovalListener<String, AuditEvent>() {
            @Override
            public void onRemoval(
                RemovalNotification<String, AuditEvent> entry) {
              AuditEvent event = entry.getValue();
              if (event.getAccessCount().get() > 0) {
                KMSAudit.this.logEvent(event);
                event.getAccessCount().set(0);
                KMSAudit.this.cache.put(entry.getKey(), event);
              }
            }
          }).build();
  executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
  executor.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      cache.cleanUp();
    }
  }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
项目:hadoop-oss    文件:RENAudit.java   
/**
 * Create a new KMSAudit.
 *
 * @param windowMs Duplicate events within the aggregation window are quashed
 *                 to reduce log traffic. A single message for aggregated
 *                 events is printed at the end of the window, along with a
 *                 count of the number of aggregated events.
 */
RENAudit(long windowMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
      .removalListener(
          new RemovalListener<String, AuditEvent>() {
            @Override
            public void onRemoval(
                RemovalNotification<String, AuditEvent> entry) {
              AuditEvent event = entry.getValue();
              if (event.getAccessCount().get() > 0) {
                RENAudit.this.logEvent(event);
                event.getAccessCount().set(0);
                RENAudit.this.cache.put(entry.getKey(), event);
              }
            }
          }).build();
  executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat(REN_LOGGER_NAME + "_thread").build());
  executor.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      cache.cleanUp();
    }
  }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
项目:ditb    文件:RegionReplicaReplicationEndpoint.java   
public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
    ExecutorService pool, int operationTimeout) {
  this.sink = sink;
  this.connection = connection;
  this.operationTimeout = operationTimeout;
  this.rpcRetryingCallerFactory
    = RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
  this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
  this.pool = pool;

  int nonExistentTableCacheExpiryMs = connection.getConfiguration()
    .getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
  // A cache for non existing tables that have a default expiry of 5 sec. This means that if the
  // table is created again with the same name, we might miss to replicate for that amount of
  // time. But this cache prevents overloading meta requests for every edit from a deleted file.
  disabledAndDroppedTables = CacheBuilder.newBuilder()
    .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
    .initialCapacity(10)
    .maximumSize(1000)
    .build();
}
项目:waggle-dance    文件:StaticDatabaseMappingService.java   
public StaticDatabaseMappingService(
    MetaStoreMappingFactory metaStoreMappingFactory,
    List<AbstractMetaStore> initialMetastores) {
  this.metaStoreMappingFactory = metaStoreMappingFactory;
  primaryDatabasesCache = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).maximumSize(1).build(
      new CacheLoader<String, List<String>>() {

        @Override
        public List<String> load(String key) throws Exception {
          if (primaryDatabaseMapping != null) {
            return primaryDatabaseMapping.getClient().get_all_databases();
          } else {
            return Lists.newArrayList();
          }
        }
      });
  init(initialMetastores);
}
项目:athena    文件:AtomixLeaderElector.java   
public AtomixLeaderElector(CopycatClient client, Properties properties) {
    super(client, properties);
    cache = CacheBuilder.newBuilder()
            .maximumSize(1000)
            .build(CacheLoader.from(topic -> this.client.submit(new GetLeadership(topic))));

    cacheUpdater = change -> {
        Leadership leadership = change.newValue();
        cache.put(leadership.topic(), CompletableFuture.completedFuture(leadership));
    };
    statusListener = status -> {
        if (status == Status.SUSPENDED || status == Status.INACTIVE) {
            cache.invalidateAll();
        }
    };
    addStatusChangeListener(statusListener);
}
项目:sponge    文件:BaseProcessingUnit.java   
/**
 * Creates a new processing unit.
 *
 * @param name name.
 * @param engine the engine.
 * @param inQueue input queue.
 * @param outQueue output queue.
 */
public BaseProcessingUnit(String name, Engine engine, EventQueue inQueue, EventQueue outQueue) {
    super(name, engine);
    this.inQueue = inQueue;
    this.outQueue = outQueue;

    long cacheExpireTime = engine.getDefaultParameters().getProcessingUnitEventProcessorCacheExpireTime();
    if (cacheExpireTime >= 0) {
        // Turn on the cache.
        CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
        if (cacheExpireTime > 0) {
            builder.expireAfterAccess(cacheExpireTime, TimeUnit.MILLISECONDS);
        }

        eventNameProcessorsCache = builder.build(new CacheLoader<String, Set<AtomicReference<T>>>() {

            @Override
            public Set<AtomicReference<T>> load(String eventName) throws Exception {
                return resolveEventProcessors(eventName);
            }
        });
    }
}
项目:Equella    文件:MetadataServiceImpl.java   
@Override
public Map<String, Map<String, String>> getMetadata(File f)
{
    LoadingCache<String, Map<String, String>> metadata = CacheBuilder.newBuilder().build(
        CacheLoader.from(new Function<String, Map<String, String>>()
    {
        @Override
        public Map<String, String> apply(String input)
        {
            return Maps.newHashMap();
        }
    }));

    for( MetadataHandler handler : pluginTracker.getBeanList() )
    {
        handler.getMetadata(metadata, f);
    }

    return metadata.asMap();
}
项目:athena    文件:CachingAsyncConsistentMap.java   
/**
 * Constructor to configure cache size.
 *
 * @param backingMap a distributed, strongly consistent map for backing
 * @param cacheSize the maximum size of the cache
 */
public CachingAsyncConsistentMap(AsyncConsistentMap<K, V> backingMap, int cacheSize) {
    super(backingMap);
    cache = CacheBuilder.newBuilder()
                        .maximumSize(cacheSize)
                        .build(CacheLoader.from(CachingAsyncConsistentMap.super::get));
    cacheUpdater = event -> {
        Versioned<V> newValue = event.newValue();
        if (newValue == null) {
            cache.invalidate(event.key());
        } else {
            cache.put(event.key(), CompletableFuture.completedFuture(newValue));
        }
    };
    statusListener = status -> {
        log.debug("{} status changed to {}", this.name(), status);
        // If the status of the underlying map is SUSPENDED or INACTIVE
        // we can no longer guarantee that the cache will be in sync.
        if (status == SUSPENDED || status == INACTIVE) {
            cache.invalidateAll();
        }
    };
    super.addListener(cacheUpdater);
    super.addStatusChangeListener(statusListener);
}
项目:uavstack    文件:ReliableTaildirEventReader.java   
/**
 * Create a ReliableTaildirEventReader to watch the given directory. map<serverid.appid.logid, logpath>
 */
private ReliableTaildirEventReader(Map<String, CollectTask> tasks, Table<String, String, String> headerTable,
        boolean skipToEnd, boolean addByteOffset) throws IOException {
    Map<String, LogPatternInfo> filePaths = getFilePaths(tasks);

    // Sanity checks
    Preconditions.checkNotNull(filePaths);
    // get operation system info
    if (log.isDebugEnable()) {
        log.debug(this, "Initializing {" + ReliableTaildirEventReader.class.getSimpleName() + "} with directory={"
                + filePaths + "}");
    }

    // tailFile
    this.tailFileTable = CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.DAYS)
            .<String, LogPatternInfo> build();
    this.headerTable = headerTable;
    this.addByteOffset = addByteOffset;
    this.os = JVMToolHelper.isWindows() ? OS_WINDOWS : null;

    updatelog(filePaths);
    updateTailFiles(skipToEnd);

    log.info(this, "tailFileTable: " + tailFileTable.toString());
    log.info(this, "headerTable: " + headerTable.toString());
}
项目:wayf-cloud    文件:WayfGuiceModule.java   
@Provides
@Named("authenticatableCache")
@Singleton
public LoadingCache<AuthenticationCredentials, AuthenticatedEntity> getLoadingCache(
        @Named("authenticatableRedisDao") RedisDao<AuthenticationCredentials, AuthenticatedEntity> authenticatableRedisDao,
        AuthenticationFacade authenticationFacade,
        CacheManager cacheManager,
        @Named("authenticationCacheGroup") String authenticationCacheGroupName
) {
    LoadingCacheRedisImpl<AuthenticationCredentials, AuthenticatedEntity> l2Cache = new LoadingCacheRedisImpl<>();
    l2Cache.setRedisDao(authenticatableRedisDao);
    l2Cache.setCacheLoader((key) -> authenticationFacade.determineDao(key).authenticate(key));
    l2Cache.setName("AUTHENTICATION_REDIS_CACHE");

    LoadingCacheGuavaImpl<AuthenticationCredentials, AuthenticatedEntity> l1Cache = new LoadingCacheGuavaImpl<>();
    l1Cache.setGuavaCache(CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.DAYS).build());
    l1Cache.setCacheLoader((key) -> l2Cache.get(key));
    l2Cache.setName("AUTHENTICATION_GUAVA_CACHE");

    cacheManager.registerCacheGroup(authenticationCacheGroupName, l1Cache, l2Cache);

    return l1Cache;
}
项目:QDrill    文件:DrillHiveMetaStoreClient.java   
private NonCloseableHiveClientWithCaching(final HiveConf hiveConf,
    final Map<String, String> hiveConfigOverride) throws MetaException {
  super(hiveConf, hiveConfigOverride);

  databases = CacheBuilder //
      .newBuilder() //
      .expireAfterAccess(1, TimeUnit.MINUTES) //
      .build(new DatabaseLoader());

  tableNameLoader = CacheBuilder //
      .newBuilder() //
      .expireAfterAccess(1, TimeUnit.MINUTES) //
      .build(new TableNameLoader());

  tableLoaders = CacheBuilder //
      .newBuilder() //
      .expireAfterAccess(4, TimeUnit.HOURS) //
      .maximumSize(20) //
      .build(new TableLoaderLoader());
}
项目:Backmemed    文件:SkinManager.java   
public SkinManager(TextureManager textureManagerInstance, File skinCacheDirectory, MinecraftSessionService sessionService)
{
    this.textureManager = textureManagerInstance;
    this.skinCacheDir = skinCacheDirectory;
    this.sessionService = sessionService;
    this.skinCacheLoader = CacheBuilder.newBuilder().expireAfterAccess(15L, TimeUnit.SECONDS).<GameProfile, Map<Type, MinecraftProfileTexture>>build(new CacheLoader<GameProfile, Map<Type, MinecraftProfileTexture>>()
    {
        public Map<Type, MinecraftProfileTexture> load(GameProfile p_load_1_) throws Exception
        {
            try
            {
                return Minecraft.getMinecraft().getSessionService().getTextures(p_load_1_, false);
            }
            catch (Throwable var3)
            {
                return Maps.<Type, MinecraftProfileTexture>newHashMap();
            }
        }
    });
}
项目:wayf-cloud    文件:WayfGuiceModule.java   
@Provides
@Named("passwordSaltCache")
@Singleton
public Cache<String, String> getAdminSaltLoadingCache(
        CacheManager cacheManager,
        PasswordCredentialsFacade passwordCredentialsFacade,
        @Named("passwordSaltRedisDao") RedisDao<String, String> passwordSaltRedisDao,
        @Named("passwordSaltCacheGroup") String passwordSaltCacheGroup) {

    LoadingCacheRedisImpl<String, String> l2Cache = new LoadingCacheRedisImpl<>();
    l2Cache.setRedisDao(passwordSaltRedisDao);
    l2Cache.setCacheLoader((email) -> passwordCredentialsFacade.getSaltForEmail(email).toMaybe());

    LoadingCacheGuavaImpl<String, String> l1Cache = new LoadingCacheGuavaImpl<>();
    l1Cache.setGuavaCache(CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.DAYS).build());
    l1Cache.setCacheLoader((key) -> l2Cache.get(key));

    cacheManager.registerCacheGroup(passwordSaltCacheGroup, l1Cache, l2Cache);

    return l1Cache;
}
项目:hadoop    文件:CodecPool.java   
private static <T> LoadingCache<Class<T>, AtomicInteger> createCache(
    Class<T> klass) {
  return CacheBuilder.newBuilder().build(
      new CacheLoader<Class<T>, AtomicInteger>() {
        @Override
        public AtomicInteger load(Class<T> key) throws Exception {
          return new AtomicInteger();
        }
      });
}
项目:app-auth-example    文件:AuthenticationService.java   
@Autowired
public AuthenticationService(SymphonyClientFactory symphonyClientFactory, TokenGenerator tokenGenerator) {
    this.symphonyClientFactory = symphonyClientFactory;
    this.tokenGenerator = tokenGenerator;

    // Tokens are short lived.  Max is just protection from DDoS attacks
    tokenCache = CacheBuilder.newBuilder()
            .maximumSize(1000)
            .expireAfterWrite(5, TimeUnit.MINUTES)
            .build();
}
项目:athena    文件:AbstractCorsaPipeline.java   
@Override
public void init(DeviceId deviceId, PipelinerContext context) {
    this.serviceDirectory = context.directory();
    this.deviceId = deviceId;

    pendingGroups = CacheBuilder.newBuilder()
            .expireAfterWrite(20, TimeUnit.SECONDS)
            .removalListener((RemovalNotification<GroupKey, NextObjective> notification) -> {
                if (notification.getCause() == RemovalCause.EXPIRED) {
                    fail(notification.getValue(), ObjectiveError.GROUPINSTALLATIONFAILED);
                }
            }).build();

    groupChecker.scheduleAtFixedRate(new GroupChecker(), 0, 500, TimeUnit.MILLISECONDS);

    coreService = serviceDirectory.get(CoreService.class);
    flowRuleService = serviceDirectory.get(FlowRuleService.class);
    groupService = serviceDirectory.get(GroupService.class);
    meterService = serviceDirectory.get(MeterService.class);
    deviceService = serviceDirectory.get(DeviceService.class);
    flowObjectiveStore = context.store();

    groupService.addListener(new InnerGroupListener());

    appId = coreService.registerApplication(APPID);

    initializePipeline();
}
项目:Reer    文件:DefaultPluginRegistry.java   
private DefaultPluginRegistry(PluginRegistry parent, final PluginInspector pluginInspector, ClassLoaderScope classLoaderScope) {
    this.parent = parent;
    this.pluginInspector = pluginInspector;
    this.classLoaderScope = classLoaderScope;
    this.classMappings = CacheBuilder.newBuilder().build(new PotentialPluginCacheLoader(pluginInspector));
    this.idMappings = CacheBuilder.newBuilder().build(new CacheLoader<PluginIdLookupCacheKey, Optional<PluginImplementation<?>>>() {
        @Override
        public Optional<PluginImplementation<?>> load(@SuppressWarnings("NullableProblems") PluginIdLookupCacheKey key) throws Exception {
            PluginId pluginId = key.getId();
            ClassLoader classLoader = key.getClassLoader();

            PluginDescriptorLocator locator = new ClassloaderBackedPluginDescriptorLocator(classLoader);

            PluginDescriptor pluginDescriptor = locator.findPluginDescriptor(pluginId.toString());
            if (pluginDescriptor == null) {
                return Optional.absent();
            }

            String implClassName = pluginDescriptor.getImplementationClassName();
            if (!GUtil.isTrue(implClassName)) {
                throw new InvalidPluginException(String.format("No implementation class specified for plugin '%s' in %s.", pluginId, pluginDescriptor));
            }

            final Class<?> implClass;
            try {
                implClass = classLoader.loadClass(implClassName);
            } catch (ClassNotFoundException e) {
                throw new InvalidPluginException(String.format(
                        "Could not find implementation class '%s' for plugin '%s' specified in %s.", implClassName, pluginId,
                        pluginDescriptor), e);
            }

            PotentialPlugin<?> potentialPlugin = pluginInspector.inspect(implClass);
            PluginImplementation<Object> withId = new RegistryAwarePluginImplementation(classLoader, pluginId, potentialPlugin);
            return Cast.uncheckedCast(Optional.of(withId));
        }
    });
}
项目:dooo    文件:RedisSessionDao.java   
RedisSessionDao() {
    this.lastModifiedTimes = CacheBuilder.newBuilder().expireAfterAccess(60L, TimeUnit.MINUTES).maximumSize(8192L).build();
    this.INTERNAL_MILLIS = TimeUnit.MINUTES.toMillis(2L);
    Properties properties = ReadResourceUtils.getPropertyFile("redis.properties");

    this.redisConfig = new RedisConfig(properties);

}
项目:pac4j-plus    文件:LocalCachingAuthenticator.java   
@Override
protected void internalInit(final WebContext context) {
    CommonHelper.assertNotNull("delegate", this.delegate);
    CommonHelper.assertTrue(cacheSize > 0, "cacheSize must be > 0");
    CommonHelper.assertTrue(timeout > 0, "timeout must be > 0");
    CommonHelper.assertNotNull("timeUnit", this.timeUnit);

    if (delegate instanceof InitializableWebObject) {
        ((InitializableWebObject) delegate).init(context);
    }

    this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize)
            .expireAfterWrite(timeout, timeUnit).build();
}
项目:Charrizard    文件:CGuild.java   
public CGuild(Guild guild, Charrizard charrizard) {
    this.guild = guild;
    this.charrizard = charrizard;
    this.settings = charrizard.getSettings();
    this.redisConnection = charrizard.getRedisConnection();
    this.executor = new ThreadPoolExecutor(2, 16, 60, TimeUnit.SECONDS, new SynchronousQueue<>());
    this.userCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();
    this.textChannelCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();
    this.voiceChannelCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();
    this.audio = new CAudio(this);
}
项目:MicroServiceProject    文件:SparseMatrix.java   
/**
 * create a row cache of a matrix in {row, row-specific columns}
 *
 * @param cacheSpec cache specification
 * @return a matrix row cache in {row, row-specific columns}
 */
public LoadingCache<Integer, List<Integer>> rowColumnsCache(String cacheSpec) {
    LoadingCache<Integer, List<Integer>> cache = CacheBuilder.from(cacheSpec).build(
            new CacheLoader<Integer, List<Integer>>() {

                @Override
                public List<Integer> load(Integer rowId) throws Exception {
                    return getColumns(rowId);
                }
            });

    return cache;
}
项目:springboot-shiro-cas-mybatis    文件:CachingPrincipalAttributesRepository.java   
/**
 * Instantiates a new caching attributes principal factory.
 * @param maxCacheSize the max cache size
 * @param timeUnit the time unit
 * @param expiryDuration the expiry duration
 */
public CachingPrincipalAttributesRepository(final long maxCacheSize,
                                            final TimeUnit timeUnit,
                                            final long expiryDuration) {
    super(expiryDuration, timeUnit);
    this.maxCacheSize = maxCacheSize;

    this.cache = CacheBuilder.newBuilder().maximumSize(maxCacheSize)
            .expireAfterWrite(expiryDuration, timeUnit).build(this.cacheLoader);
}
项目:Equella    文件:TaxonomyServiceImpl.java   
private TaxonomyDataSource getDataSource(final String uuid)
{
    synchronized( cacheLock )
    {
        Institution inst = CurrentInstitution.get();
        Cache<String, TaxonomyDataSource> instEntry = dataSourceCache.getIfPresent(inst);
        if( instEntry == null )
        {
            instEntry = CacheBuilder.newBuilder().softValues().expireAfterAccess(1, TimeUnit.HOURS).build();
            dataSourceCache.put(inst, instEntry);
        }

        TaxonomyDataSource tds = instEntry.getIfPresent(uuid);
        if( tds == null )
        {
            final Taxonomy taxonomy = getDao().getByUuid(uuid);
            if( taxonomy == null )
            {
                throw new NotFoundException("Could not find taxonomy with UUID " + uuid);
            }

            tds = getDataSourceNoCache(taxonomy);
            instEntry.put(uuid, tds);
        }
        return tds;
    }
}
项目:BaseClient    文件:SkinManager.java   
public SkinManager(TextureManager textureManagerInstance, File skinCacheDirectory, MinecraftSessionService sessionService)
{
    this.textureManager = textureManagerInstance;
    this.skinCacheDir = skinCacheDirectory;
    this.sessionService = sessionService;
    this.skinCacheLoader = CacheBuilder.newBuilder().expireAfterAccess(15L, TimeUnit.SECONDS).<GameProfile, Map<Type, MinecraftProfileTexture>>build(new CacheLoader<GameProfile, Map<Type, MinecraftProfileTexture>>()
    {
        public Map<Type, MinecraftProfileTexture> load(GameProfile p_load_1_) throws Exception
        {
            return Minecraft.getMinecraft().getSessionService().getTextures(p_load_1_, false);
        }
    });
}
项目:QDrill    文件:DrillHiveMetaStoreClient.java   
@Override
public LoadingCache<String, HiveReadEntry> load(String key) throws Exception {
  return CacheBuilder
      .newBuilder()
      .expireAfterAccess(1, TimeUnit.MINUTES)
      .build(new TableLoader(key));
}
项目:cas-5.1.0    文件:OidcConfiguration.java   
@Bean
public LoadingCache<OidcRegisteredService, Optional<RsaJsonWebKey>> oidcServiceJsonWebKeystoreCache() {
    final OidcProperties oidc = casProperties.getAuthn().getOidc();
    final LoadingCache<OidcRegisteredService, Optional<RsaJsonWebKey>> cache =
            CacheBuilder.newBuilder().maximumSize(1)
                    .expireAfterWrite(oidc.getJwksCacheInMinutes(), TimeUnit.MINUTES)
                    .build(oidcServiceJsonWebKeystoreCacheLoader());
    return cache;
}
项目:cas-5.1.0    文件:OidcConfiguration.java   
@Bean
public LoadingCache<String, Optional<RsaJsonWebKey>> oidcDefaultJsonWebKeystoreCache() {
    final OidcProperties oidc = casProperties.getAuthn().getOidc();
    final LoadingCache<String, Optional<RsaJsonWebKey>> cache =
            CacheBuilder.newBuilder().maximumSize(1)
                    .expireAfterWrite(oidc.getJwksCacheInMinutes(), TimeUnit.MINUTES)
                    .build(oidcDefaultJsonWebKeystoreCacheLoader());
    return cache;
}
项目:cas-5.1.0    文件:DefaultDelegatingAuditTrailManager.java   
public DefaultDelegatingAuditTrailManager(final AuditTrailManager manager) {
    this.manager = manager;
    this.storage = CacheBuilder.newBuilder()
            .initialCapacity(INITIAL_CACHE_SIZE)
            .maximumSize(MAX_CACHE_SIZE)
            .recordStats()
            .expireAfterWrite(this.expirationDuration, this.expirationTimeUnit)
            .build(new CacheLoader<String, AuditActionContext>() {
                @Override
                public AuditActionContext load(final String s) throws Exception {
                    LOGGER.error("Load operation of the audit cache is not supported.");
                    return null;
                }
            });
}
项目:accumulate    文件:GuavaTest.java   
@Test
public void test_001() throws ExecutionException {
    LoadingCache<String,Object> failedCache = CacheBuilder.newBuilder().
            softValues().maximumSize(10000)
            .build(new CacheLoader<String, Object>() {

                @Override
                public Object load(String s) throws Exception {
                    return new AtomicInteger(0);
                }
            });

    failedCache.put("00",((AtomicInteger)failedCache.get("00")).incrementAndGet());
    System.out.println(failedCache.get("00"));
}