Java 类org.hibernate.cache.CacheException 实例源码

项目:lams    文件:ActionQueue.java   
public void afterTransactionCompletion(boolean success) {
    while ( !processes.isEmpty() ) {
        try {
            processes.poll().doAfterTransactionCompletion( success, session );
        }
        catch (CacheException ce) {
            LOG.unableToReleaseCacheLock( ce );
            // continue loop
        }
        catch (Exception e) {
            throw new AssertionFailure( "Exception releasing cache locks", e );
        }
    }

    if ( session.getFactory().getSettings().isQueryCacheEnabled() ) {
        session.getFactory().getUpdateTimestampsCache().invalidate(
                querySpacesToInvalidate.toArray( new String[querySpacesToInvalidate.size()] ),
                session
        );
    }
    querySpacesToInvalidate.clear();
}
项目:lams    文件:CollectionAction.java   
@Override
public final void beforeExecutions() throws CacheException {
    // we need to obtain the lock before any actions are executed, since this may be an inverse="true"
    // bidirectional association and it is one of the earlier entity actions which actually updates
    // the database (this action is responsible for second-level cache invalidation only)
    if ( persister.hasCache() ) {
        final CacheKey ck = session.generateCacheKey(
                key,
                persister.getKeyType(),
                persister.getRole()
        );
        final SoftLock lock = persister.getCacheAccessStrategy().lockItem( ck, null );
        // the old behavior used key as opposed to getKey()
        afterTransactionProcess = new CacheCleanupProcess( key, persister, lock );
    }
}
项目:lams    文件:EntityUpdateAction.java   
@Override
public void doAfterTransactionCompletion(boolean success, SessionImplementor session) throws CacheException {
    final EntityPersister persister = getPersister();
    if ( persister.hasCache() ) {

        final CacheKey ck = getSession().generateCacheKey(
                getId(),
                persister.getIdentifierType(), 
                persister.getRootEntityName()
        );

        if ( success && cacheEntry!=null /*!persister.isCacheInvalidationRequired()*/ ) {
            final boolean put = cacheAfterUpdate( persister, ck );

            if ( put && getSession().getFactory().getStatistics().isStatisticsEnabled() ) {
                getSession().getFactory().getStatisticsImplementor().secondLevelCachePut( getPersister().getCacheAccessStrategy().getRegion().getName() );
            }
        }
        else {
            persister.getCacheAccessStrategy().unlockItem( ck, lock );
        }
    }
    postCommitUpdate( success );
}
项目:hazelcast-hibernate    文件:HazelcastEntityRegion.java   
@Override
public EntityRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException {
    if (AccessType.READ_ONLY.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new ReadOnlyAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new NonStrictReadWriteAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    if (AccessType.READ_WRITE.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new ReadWriteAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    throw new CacheException("AccessType \"" + accessType + "\" is not currently supported by Hazelcast.");
}
项目:hazelcast-hibernate    文件:HazelcastInstanceLoader.java   
@Override
public void unloadInstance() throws CacheException {
    if (instance == null) {
        return;
    }
    if (!shutDown) {
        LOGGER.warning(CacheEnvironment.SHUTDOWN_ON_STOP + " property is set to 'false'. "
                + "Leaving current HazelcastInstance active! (Warning: Do not disable Hazelcast "
                + CacheEnvironment.HAZELCAST_SHUTDOWN_HOOK_ENABLED + " property!)");
        return;
    }
    try {
        instance.getLifecycleService().shutdown();
        instance = null;
    } catch (Exception e) {
        throw new CacheException(e);
    }
}
项目:hazelcast-hibernate5    文件:HazelcastCollectionRegion.java   
@Override
public CollectionRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException {
    if (AccessType.READ_ONLY.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new ReadOnlyAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new NonStrictReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    if (AccessType.READ_WRITE.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new ReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    throw new CacheException("AccessType \"" + accessType + "\" is not currently supported by Hazelcast.");
}
项目:hazelcast-hibernate    文件:HazelcastInstanceLoader.java   
@Override
public void unloadInstance() throws CacheException {
    if (instance == null) {
        return;
    }
    if (!shutDown) {
        LOGGER.warning(CacheEnvironment.SHUTDOWN_ON_STOP + " property is set to 'false'. "
                + "Leaving current HazelcastInstance active! (Warning: Do not disable Hazelcast "
                + CacheEnvironment.HAZELCAST_SHUTDOWN_HOOK_ENABLED + " property!)");
        return;
    }
    try {
        instance.getLifecycleService().shutdown();
        instance = null;
    } catch (Exception e) {
        throw new CacheException(e);
    }
}
项目:hazelcast-hibernate    文件:CustomPropertiesTest.java   
@Test
public void testNamedClient_noInstance() throws Exception {
    exception.expect(ServiceException.class);
    exception.expectCause(allOf(isA(CacheException.class), new BaseMatcher<CacheException>() {
        @Override
        public boolean matches(Object item) {
            return ((CacheException) item).getMessage().contains("No client with name [dev-custom] could be found.");
        }

        @Override
        public void describeTo(Description description) {
        }
    }));

    Properties props = new Properties();
    props.setProperty(Environment.CACHE_REGION_FACTORY, HazelcastCacheRegionFactory.class.getName());
    props.setProperty(CacheEnvironment.USE_NATIVE_CLIENT, "true");
    props.setProperty(CacheEnvironment.NATIVE_CLIENT_INSTANCE_NAME, "dev-custom");
    props.setProperty("hibernate.dialect", "org.hibernate.dialect.HSQLDialect");

    Configuration configuration = new Configuration();
    configuration.addProperties(props);

    SessionFactory sf = configuration.buildSessionFactory();
    sf.close();
}
项目:hazelcast-hibernate    文件:HazelcastInstanceLoader.java   
@Override
public void configure(Properties props) {
    String instanceName = CacheEnvironment.getInstanceName(props);

    if (!StringUtil.isNullOrEmptyAfterTrim(instanceName)) {
        LOGGER.info("Using existing HazelcastInstance [" + instanceName + "].");
        this.existingInstanceName = instanceName;
    } else {
        String configResourcePath = CacheEnvironment.getConfigFilePath(props);
        if (!StringUtil.isNullOrEmptyAfterTrim(configResourcePath)) {
            try {
                this.config = ConfigLoader.load(configResourcePath);
            } catch (IOException e) {
                LOGGER.warning("IOException: " + e.getMessage());
            }
            if (config == null) {
                throw new CacheException("Could not find configuration file: " + configResourcePath);
            }
        } else {
            this.config = new XmlConfigBuilder().build();
        }
    }

    this.shutDown = CacheEnvironment.shutdownOnStop(props, (instanceName == null));
}
项目:alfresco-repository    文件:DefaultCacheProvider.java   
@Override
public Cache buildCache(String regionName, Properties properties) throws CacheException
{
    if (log.isDebugEnabled())
    {
        log.debug("building cache for regionName=" + regionName + ", with properties: " + properties);
    }
    DefaultSimpleCache<Serializable, Object> cache = new DefaultSimpleCache<Serializable, Object>(defaultMaxItems, null);
    Cache hibCache = new HibernateSimpleCacheAdapter(cache, regionName);
    return hibCache;
}
项目:lams    文件:BasicCollectionPersister.java   
public BasicCollectionPersister(
        Collection collection,
        CollectionRegionAccessStrategy cacheAccessStrategy,
        Configuration cfg,
        SessionFactoryImplementor factory) throws MappingException, CacheException {
    super( collection, cacheAccessStrategy, cfg, factory );
}
项目:lams    文件:OneToManyPersister.java   
public OneToManyPersister(
        Collection collection,
        CollectionRegionAccessStrategy cacheAccessStrategy,
        Configuration cfg,
        SessionFactoryImplementor factory) throws MappingException, CacheException {
    super( collection, cacheAccessStrategy, cfg, factory );
    cascadeDeleteEnabled = collection.getKey().isCascadeDeleteEnabled() &&
            factory.getDialect().supportsCascadeDelete();
    keyIsNullable = collection.getKey().isNullable();
    keyIsUpdateable = collection.getKey().isUpdateable();
}
项目:lams    文件:UpdateTimestampsCache.java   
/**
 * Perform pre-invalidation.
 *
 *
 * @param spaces The spaces to pre-invalidate
 *
 * @param session
 * @throws CacheException Indicated problem delegating to underlying region.
 */
public void preInvalidate(Serializable[] spaces, SessionImplementor session) throws CacheException {
    final boolean stats = factory != null && factory.getStatistics().isStatisticsEnabled();

    final Long ts = region.nextTimestamp() + region.getTimeout();

    for ( Serializable space : spaces ) {
        if ( DEBUG_ENABLED ) {
            LOG.debugf( "Pre-invalidating space [%s], timestamp: %s", space, ts );
        }

        try {
            session.getEventListenerManager().cachePutStart();

            //put() has nowait semantics, is this really appropriate?
            //note that it needs to be async replication, never local or sync
            region.put( space, ts );
        }
        finally {
            session.getEventListenerManager().cachePutEnd();
        }

        if ( stats ) {
            factory.getStatisticsImplementor().updateTimestampsCachePut();
        }
    }
}
项目:lams    文件:UpdateTimestampsCache.java   
/**
 * Perform invalidation.
 *
 *
 * @param spaces The spaces to pre-invalidate
 *
 * @param session
 * @throws CacheException Indicated problem delegating to underlying region.
 */
public void invalidate(Serializable[] spaces, SessionImplementor session) throws CacheException {
    final boolean stats = factory != null && factory.getStatistics().isStatisticsEnabled();

    final Long ts = region.nextTimestamp();

    for (Serializable space : spaces) {
        if ( DEBUG_ENABLED ) {
            LOG.debugf( "Invalidating space [%s], timestamp: %s", space, ts );
        }

        try {
            session.getEventListenerManager().cachePutStart();

            //put() has nowait semantics, is this really appropriate?
            //note that it needs to be async replication, never local or sync
            region.put( space, ts );
        }
        finally {
            session.getEventListenerManager().cachePutEnd();
        }

        if ( stats ) {
            factory.getStatisticsImplementor().updateTimestampsCachePut();
        }
    }
}
项目:lams    文件:UpdateTimestampsCache.java   
/**
 * Perform an up-to-date check for the given set of query spaces.
 *
 *
 * @param spaces The spaces to check
 * @param timestamp The timestamp against which to check.
 *
 * @param session
 * @return Whether all those spaces are up-to-date
 *
 * @throws CacheException Indicated problem delegating to underlying region.
 */
public boolean isUpToDate(Set<Serializable> spaces, Long timestamp, SessionImplementor session) throws CacheException {
    final boolean stats = factory != null && factory.getStatistics().isStatisticsEnabled();

    for ( Serializable space : spaces ) {
        final Long lastUpdate = getLastUpdateTimestampForSpace( space, session );
        if ( lastUpdate == null ) {
            if ( stats ) {
                factory.getStatisticsImplementor().updateTimestampsCacheMiss();
            }
            //the last update timestamp was lost from the cache
            //(or there were no updates since startup!)
            //updateTimestamps.put( space, new Long( updateTimestamps.nextTimestamp() ) );
            //result = false; // safer
        }
        else {
            if ( DEBUG_ENABLED ) {
                LOG.debugf(
                        "[%s] last update timestamp: %s",
                        space,
                        lastUpdate + ", result set timestamp: " + timestamp
                );
            }
            if ( stats ) {
                factory.getStatisticsImplementor().updateTimestampsCacheHit();
            }
            if ( lastUpdate >= timestamp ) {
                return false;
            }
        }
    }
    return true;
}
项目:lams    文件:CollectionAction.java   
protected final void evict() throws CacheException {
    if ( persister.hasCache() ) {
        final CacheKey ck = session.generateCacheKey(
                key, 
                persister.getKeyType(), 
                persister.getRole()
        );
        persister.getCacheAccessStrategy().remove( ck );
    }
}
项目:jhipster-ng-admin    文件:SpringCacheRegionFactory.java   
@Override
public void start(SessionFactoryOptions options, Properties properties) throws CacheException {
    // Translate the Spring URI to a real URI
    String uri = properties.getProperty(CONFIG_URI);
    Resource resource = new DefaultResourceLoader().getResource(uri);
    try {
        properties.setProperty(CONFIG_URI, resource.getURI().toString());
    }
    catch(IOException e) {
        throw new CacheException(e);
    }
    super.start(options, properties);
}
项目:hazelcast-hibernate    文件:ReadOnlyAccessDelegate.java   
/**
 * @throws UnsupportedOperationException Always thrown as we cannot update an item in a read-only cache
 */
@Override
public boolean afterUpdate(final Object key, final Object value, final Object currentVersion,
                           final Object previousVersion, final SoftLock lock) throws CacheException {
    throw new UnsupportedOperationException("Cannot update an item in a read-only cache: "
            + getHazelcastRegion().getName());
}
项目:hazelcast-hibernate5    文件:HazelcastLocalCacheRegionFactory.java   
@Override
public EntityRegion buildEntityRegion(final String regionName, final Properties properties,
                                      final CacheDataDescription metadata) throws CacheException {
    final HazelcastEntityRegion<LocalRegionCache> region = new HazelcastEntityRegion<LocalRegionCache>(instance,
            regionName, properties, metadata, new LocalRegionCache(regionName, instance, metadata));
    cleanupService.registerCache(region.getCache());
    return region;
}
项目:hazelcast-hibernate    文件:NonStrictReadWriteAccessDelegate.java   
@Override
public void remove(final Object key) throws CacheException {
    try {
        cache.remove(key);
    } catch (HazelcastException e) {
        throw new CacheException("Operation timeout during remove operation from cache!", e);
    }
}
项目:hazelcast-hibernate    文件:ReadOnlyAccessDelegate.java   
/**
 * @throws UnsupportedOperationException Always thrown as we cannot update an item in a read-only cache
 */
@Override
public boolean afterUpdate(final Object key, final Object value, final Object currentVersion,
                           final Object previousVersion, final SoftLock lock) throws CacheException {
    throw new UnsupportedOperationException("Cannot update an item in a read-only cache: "
            + getHazelcastRegion().getName());
}
项目:hazelcast-hibernate    文件:ReadWriteAccessDelegate.java   
/**
 * {@inheritDoc}
 * <p>
 * Called after <code>com.hazelcast.ReadWriteAccessDelegate.lockItem()</code>
 * </p>
 */
@Override
public boolean afterUpdate(final Object key, final Object value, final Object currentVersion, final Object previousVersion,
                           final SoftLock lock) throws CacheException {
    try {
        return cache.update(key, value, currentVersion, lock);
    } catch (HazelcastException e) {
        if (log.isFinestEnabled()) {
            log.finest("Could not update Cache[" + hazelcastRegion.getName() + "]: " + e.getMessage());
        }
        return false;
    }
}
项目:hazelcast-hibernate    文件:AbstractHazelcastCacheRegionFactory.java   
@Override
public final QueryResultsRegion buildQueryResultsRegion(final String regionName, final Properties properties)
        throws CacheException {
    HazelcastQueryResultsRegion region = new HazelcastQueryResultsRegion(instance, regionName, properties);
    cleanupService.registerCache(region.getCache());
    return region;
}
项目:hazelcast-hibernate    文件:HazelcastCollectionRegion.java   
public CollectionRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException {
    if (AccessType.READ_ONLY.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new ReadOnlyAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new NonStrictReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    if (AccessType.READ_WRITE.equals(accessType)) {
        return new CollectionRegionAccessStrategyAdapter(
                new ReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props));
    }
    throw new CacheException("AccessType \"" + accessType + "\" is not currently supported by Hazelcast.");
}
项目:hazelcast-hibernate    文件:HazelcastLocalCacheRegionFactory.java   
public CollectionRegion buildCollectionRegion(final String regionName, final Properties properties,
                                              final CacheDataDescription metadata) throws CacheException {
    final HazelcastCollectionRegion<LocalRegionCache> region = new HazelcastCollectionRegion<LocalRegionCache>(instance,
            regionName, properties, metadata, new LocalRegionCache(regionName, instance, metadata));
    cleanupService.registerCache(region.getCache());
    return region;
}
项目:hazelcast-hibernate5    文件:ReadOnlyAccessDelegate.java   
/**
 * @throws UnsupportedOperationException
 */
@Override
public boolean afterUpdate(final Object key, final Object value, final Object currentVersion,
                           final Object previousVersion, final SoftLock lock) throws CacheException {
    throw new UnsupportedOperationException("Cannot update an item in a read-only cache: "
            + getHazelcastRegion().getName());
}
项目:hazelcast-hibernate    文件:ReadWriteAccessDelegate.java   
@Override
public boolean afterInsert(final Object key, final Object value, final Object version) throws CacheException {
    try {
        return cache.insert(key, value, version);
    } catch (HazelcastException e) {
        if (log.isFinestEnabled()) {
            log.finest("Could not insert into Cache[" + hazelcastRegion.getName() + "]: " + e.getMessage());
        }
        return false;
    }
}
项目:hazelcast-hibernate    文件:ReadOnlyAccessDelegate.java   
/**
 * @throws UnsupportedOperationException Thrown always because update is not possible on a read-only cache
 */
@Override
public boolean update(final Object key, final Object value, final Object currentVersion,
                      final Object previousVersion) throws CacheException {
    throw new UnsupportedOperationException("Attempting to update an item in a read-only cache: "
            + getHazelcastRegion().getName());
}
项目:hazelcast-hibernate    文件:HazelcastLocalCacheRegionFactory.java   
public EntityRegion buildEntityRegion(final String regionName, final Properties properties,
                                      final CacheDataDescription metadata) throws CacheException {
    final HazelcastEntityRegion<LocalRegionCache> region = new HazelcastEntityRegion<LocalRegionCache>(instance,
            regionName, properties, metadata, new LocalRegionCache(regionName, instance, metadata));
    cleanupService.registerCache(region.getCache());
    return region;
}
项目:hazelcast-hibernate    文件:HazelcastEntityRegion.java   
public EntityRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException {
    if (AccessType.READ_ONLY.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new ReadOnlyAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new NonStrictReadWriteAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    if (AccessType.READ_WRITE.equals(accessType)) {
        return new EntityRegionAccessStrategyAdapter(
                new ReadWriteAccessDelegate<HazelcastEntityRegion>(this, props));
    }
    throw new CacheException("AccessType \"" + accessType + "\" is not currently supported by Hazelcast.");
}
项目:hazelcast-hibernate5    文件:AbstractGeneralRegion.java   
@Override
public Object get(final SharedSessionContractImplementor session, final Object key) throws CacheException {
    try {
        return getCache().get(key, nextTimestamp());
    } catch (OperationTimeoutException e) {
        return null;
    }
}
项目:hazelcast-hibernate5    文件:AbstractAccessDelegate.java   
@Override
public Object get(final Object key, final long txTimestamp) throws CacheException {
    try {
        return cache.get(key, txTimestamp);
    } catch (HazelcastException e) {
        if (log.isFinestEnabled()) {
            log.finest("Could not read from Cache[" + hazelcastRegion.getName() + "]: " + e.getMessage());
        }
        return null;
    }
}
项目:hazelcast-hibernate5    文件:AbstractAccessDelegate.java   
@Override
public boolean putFromLoad(final Object key, final Object value, final long txTimestamp,
                           final Object version) throws CacheException {
    try {
        return cache.put(key, value, txTimestamp, version);
    } catch (HazelcastException e) {
        if (log.isFinestEnabled()) {
            log.finest("Could not put into Cache[" + hazelcastRegion.getName() + "]: " + e.getMessage());
        }
        return false;
    }
}
项目:hazelcast-hibernate    文件:HazelcastClientLoader.java   
@Override
public HazelcastInstance loadInstance() throws CacheException {
    if (instanceName != null) {
        client = HazelcastClient.getHazelcastClientByName(instanceName);
        if (client == null) {
            throw new CacheException("No client with name [" + instanceName + "] could be found.");
        }
    } else {
        client = HazelcastClient.newHazelcastClient(clientConfig);
    }
    return client;
}
项目:hazelcast-hibernate    文件:HazelcastLocalCacheRegionFactory.java   
@Override
public CollectionRegion buildCollectionRegion(final String regionName, final Properties properties,
                                              final CacheDataDescription metadata) throws CacheException {
    final HazelcastCollectionRegion<LocalRegionCache> region = new HazelcastCollectionRegion<LocalRegionCache>(instance,
            regionName, properties, metadata, new LocalRegionCache(regionName, instance, metadata));
    cleanupService.registerCache(region.getCache());
    return region;
}
项目:hazelcast-hibernate    文件:AbstractHazelcastCacheRegionFactory.java   
public void start(final Settings settings, final Properties properties) throws CacheException {
    log.info("Starting up " + getClass().getSimpleName());
    if (instance == null || !instance.getLifecycleService().isRunning()) {
        instanceLoader = HazelcastInstanceFactory.createInstanceLoader(properties);
        instance = instanceLoader.loadInstance();
    }
    cleanupService = new CleanupService(instance.getName());
}
项目:hazelcast-hibernate    文件:HazelcastClientLoader.java   
@Override
public void unloadInstance() throws CacheException {
    if (client == null) {
        return;
    }

    try {
        client.getLifecycleService().shutdown();
        client = null;
    } catch (Exception e) {
        throw new CacheException(e);
    }
}
项目:hazelcast-hibernate5    文件:AbstractGeneralRegion.java   
@Override
public void evictAll() throws CacheException {
    try {
        getCache().clear();
    } catch (OperationTimeoutException e) {
        Logger.getLogger(AbstractGeneralRegion.class).finest(e);
    }
}
项目:stroom-stats    文件:EventStore.java   
public void purgeStatisticDataSourceData(final UniqueIdCache uniqueIdCache,
                                         final List<StatisticConfiguration> statisticConfigurations,
                                         final PurgeMode purgeMode) {
    final long startTime = getCurrentTimeMs();

    final Consumer<StatisticConfiguration> consumer;
    if (PurgeMode.OUTSIDE_RETENTION.equals(purgeMode)) {
        consumer = (statisticStore) -> {
            final long purgeUpToTimeMs = calculatePurgeUpToTimeMs(startTime);
            LOGGER.info(
                    "Purging store [{}] with data source count [{}] and row key interval size [{}].  Purging up to [{}]",
                    timeInterval.longName(), statisticConfigurations.size(), this.timeInterval.getRowKeyIntervalAsString(),
                    DateUtil.createNormalDateTimeString(purgeUpToTimeMs));

            // generate roll up masks based on the number of tags on the
            // stat and whether roll ups are enabled or
            // not
            final Set<RollUpBitMask> bitMasks = RollUpBitMask
                    .getRollUpBitMasks(statisticStore.getRollUpType().equals(StatisticRollUpType.ALL)
                            ? statisticStore.getFieldNames().size() : 0);

            for (final RollUpBitMask rollUpBitMask : bitMasks) {
                eventStoreTable.purgeUntilTime(uniqueIdCache, statisticStore, rollUpBitMask,
                        purgeUpToTimeMs);
            }

        };
    } else {
        consumer = (statisticStore) -> {
            eventStoreTable.purgeAll(uniqueIdCache, statisticStore);
        };
    }

    for (final StatisticConfiguration statisticConfiguration : statisticConfigurations) {
        try {
            consumer.accept(statisticConfiguration);
        } catch (final CacheException ce) {
            if (ce.getMessage().contains(statisticConfiguration.getName())) {
                LOGGER.info("Unable to purge statistics for [{}] in store [{}] due to there being no entry in the UID cache for it.  With no entry in the cache there should be no statistics to purge",
                        statisticConfiguration.getName(), timeInterval.longName());
            } else {
                throw ce;
            }
        }
    }

    final long runTime = System.currentTimeMillis() - startTime;

    LOGGER.info(() -> String.format("Purged event store [%s] in %.2f mins", timeInterval.longName(),
            new Double(runTime / 1000d / 60d)));
}
项目:alfresco-repository    文件:HibernateSimpleCacheAdapter.java   
@Override
public Object read(Object key) throws CacheException
{
    return cache.get(serializable(key));
}