@Test public void testThreeArgConstructorRegistersTopicListener() { MapConfig mapConfig = mock(MapConfig.class); Config config = mock(Config.class); when(config.findMapConfig(eq(CACHE_NAME))).thenReturn(mapConfig); ITopic<Object> topic = mock(ITopic.class); when(topic.addMessageListener(isNotNull(MessageListener.class))).thenReturn("ignored"); HazelcastInstance instance = mock(HazelcastInstance.class); when(instance.getConfig()).thenReturn(config); when(instance.getTopic(eq(CACHE_NAME))).thenReturn(topic); new LocalRegionCache(CACHE_NAME, instance, null); verify(config).findMapConfig(eq(CACHE_NAME)); verify(instance).getConfig(); verify(instance).getTopic(eq(CACHE_NAME)); verify(topic).addMessageListener(isNotNull(MessageListener.class)); }
private MapConfig createMapConfig(final TicketDefinition definition) { final HazelcastProperties hz = casProperties.getTicket().getRegistry().getHazelcast(); final HazelcastProperties.Cluster cluster = hz.getCluster(); final EvictionPolicy evictionPolicy = EvictionPolicy.valueOf(cluster.getEvictionPolicy()); LOGGER.debug("Creating Hazelcast map configuration for [{}] with idle timeout [{}] second(s)", definition.getProperties().getStorageName(), definition.getProperties().getStorageTimeout()); return new MapConfig() .setName(definition.getProperties().getStorageName()) .setMaxIdleSeconds((int) definition.getProperties().getStorageTimeout()) .setBackupCount(cluster.getBackupCount()) .setAsyncBackupCount(cluster.getAsyncBackupCount()) .setEvictionPolicy(evictionPolicy) .setMaxSizeConfig(new MaxSizeConfig() .setMaxSizePolicy(MaxSizeConfig.MaxSizePolicy.valueOf(cluster.getMaxSizePolicy())) .setSize(cluster.getMaxHeapSizePercentage())); }
@PostConstruct public void init() { Config config = this.hazelcastInstance.getConfig(); String mapName = this.sessionProperties.getMapName(); MapConfig mapConfig = config.getMapConfigOrNull(mapName); if (mapConfig == null) { // @formatter:off MapAttributeConfig principalNameAttributeConfig = new MapAttributeConfig() .setName(HazelcastSessionRepository.PRINCIPAL_NAME_ATTRIBUTE) .setExtractor(PrincipalNameExtractor.class.getName()); // @formatter:on MapIndexConfig principalNameIndexConfig = new MapIndexConfig( HazelcastSessionRepository.PRINCIPAL_NAME_ATTRIBUTE, false); // @formatter:off mapConfig = new MapConfig(mapName) .addMapAttributeConfig(principalNameAttributeConfig) .addMapIndexConfig(principalNameIndexConfig); // @formatter:on config.addMapConfig(mapConfig); } }
private MapConfig initializeDefaultMapConfig() { MapConfig mapConfig = new MapConfig(); /* * Number of backups. If 1 is set as the backup-count for example, then all entries of the * map will be copied to another JVM for fail-safety. Valid numbers are 0 (no backup), 1, 2, * 3. */ mapConfig.setBackupCount(1); /* * Valid values are: NONE (no eviction), LRU (Least Recently Used), LFU (Least Frequently * Used). NONE is the default. */ mapConfig.setEvictionPolicy(EvictionPolicy.LRU); /* * Maximum size of the map. When max size is reached, map is evicted based on the policy * defined. Any integer between 0 and Integer.MAX_VALUE. 0 means Integer.MAX_VALUE. Default * is 0. */ mapConfig .setMaxSizeConfig(new MaxSizeConfig(0, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE)); return mapConfig; }
public HazelcastLockMemory(IMap<String, LockValue> locksMap) { super(); this.locksMap = locksMap; LOG.info("HazelcastLockMemory ----- MAP_ID: " + locksMap.getId()); LOG.info("HazelcastLockMemory ----- MAP_NAME: " + locksMap.getName()); LOG.info("HazelcastLockMemory ----- MAP_STRING: " + locksMap.toString()); LOG.info("HazelcastLockMemory ----- MAP_INSTANCE_TYPE: " + locksMap.getInstanceType()); MapConfig mapConf = Hazelcast.getConfig().getMapConfig(DistributedMapNames.MAP.LOCK_MEMORY_LOCKS_MAP.toString()); MapStoreConfig mapStoreConf = mapConf.getMapStoreConfig(); if(mapStoreConf == null){ LOG.info("HazelcastLockMemory ----- MAPSTORE NULL"); }else{ LOG.info("HazelcastLockMemory ----- MAPSTORE IMPL: " + mapStoreConf.getImplementation()); } }
/** * Requires a hazelcast map. * * @param fileTrackerMemory * @param logTypeSet * stores the log types * @param agentSet */ public HazelcastFileTrackerStorage( IMap<FileTrackingStatusKey, FileTrackingStatus> fileTrackerMemory, IMap<String, LogTypeContact> logTypeSet, IMap<String, AgentContact> agentSet) { this.fileTrackerMemoryMap = fileTrackerMemory; this.logTypeSet = logTypeSet; this.agentSet = agentSet; MapConfig mapConf = Hazelcast.getConfig().getMapConfig( DistributedMapNames.MAP.FILE_TRACKER_MAP.toString()); MapStoreConfig mapStoreConf = mapConf.getMapStoreConfig(); if (mapStoreConf == null) { LOG.info("HazelcastFileTrackerStorage ----- MAPSTORE NULL"); } else { LOG.info("HazelcastFileTrackerStorage ----- MAPSTORE IMPL: " + mapStoreConf.getImplementation()); } }
void addMapConfig(Class<?> c) { if(!c.isAnnotationPresent(HzMapConfig.class)) throw new IllegalArgumentException(c+" not annotated with @"+HzMapConfig.class.getSimpleName()); HzMapConfig hc = c.getAnnotation(HzMapConfig.class); MapConfig mapC = new MapConfig(hc.name()); if(hzConfig.getMapConfigs().containsKey(hc.name())) { mapC = hzConfig.getMapConfig(hc.name()); } mapC.setAsyncBackupCount(hc.asyncBackupCount()); mapC.setBackupCount(hc.backupCount()); mapC.setEvictionPercentage(hc.evictPercentage()); mapC.setEvictionPolicy(EvictionPolicy.valueOf(hc.evictPolicy())); mapC.setInMemoryFormat(InMemoryFormat.valueOf(hc.inMemoryFormat())); mapC.setMaxIdleSeconds(hc.idleSeconds()); mapC.setMergePolicy(hc.evictPolicy()); mapC.setMinEvictionCheckMillis(hc.evictCheckMillis()); mapC.setTimeToLiveSeconds(hc.ttlSeconds()); mapC.setMaxSizeConfig(new MaxSizeConfig(hc.maxSize(), MaxSizePolicy.valueOf(hc.maxSizePolicy()))); mapC.setStatisticsEnabled(hc.statisticsOn()); hzConfig.getMapConfigs().put(mapC.getName(), mapC); }
@Before public void setUp() throws Exception { mapConfig = mock(MapConfig.class); when(mapConfig.getMaxSizeConfig()).thenReturn(new MaxSizeConfig(50, MaxSizeConfig.MaxSizePolicy.PER_NODE)); when(mapConfig.getTimeToLiveSeconds()).thenReturn(timeout); config = mock(Config.class); when(config.findMapConfig(eq(REGION_NAME))).thenReturn(mapConfig); Cluster cluster = mock(Cluster.class); when(cluster.getClusterTime()).thenAnswer(new Answer<Long>() { @Override public Long answer(InvocationOnMock invocation) throws Throwable { return System.currentTimeMillis(); } }); instance = mock(HazelcastInstance.class); when(instance.getConfig()).thenReturn(config); when(instance.getCluster()).thenReturn(cluster); cache = mock(RegionCache.class); region = new HazelcastTimestampsRegion<RegionCache>(instance, REGION_NAME, new Properties(), cache); }
@Before public void setUp() throws Exception { mapConfig = mock(MapConfig.class); when(mapConfig.getMaxSizeConfig()).thenReturn(new MaxSizeConfig(maxSize, MaxSizeConfig.MaxSizePolicy.PER_NODE)); when(mapConfig.getTimeToLiveSeconds()).thenReturn(timeout); config = mock(Config.class); when(config.findMapConfig(eq(REGION_NAME))).thenReturn(mapConfig); Cluster cluster = mock(Cluster.class); when(cluster.getClusterTime()).thenAnswer(new Answer<Long>() { @Override public Long answer(InvocationOnMock invocation) throws Throwable { return System.currentTimeMillis(); } }); instance = mock(HazelcastInstance.class); when(instance.getConfig()).thenReturn(config); when(instance.getCluster()).thenReturn(cluster); region = new HazelcastQueryResultsRegion(instance, REGION_NAME, new Properties()); }
@Test public void testQueryCacheCleanup() { MapConfig mapConfig = getHazelcastInstance(sf).getConfig().getMapConfig("org.hibernate.cache.*"); final float baseEvictionRate = 0.2f; final int numberOfEntities = 100; final int defaultCleanupPeriod = 60; final int maxSize = mapConfig.getMaxSizeConfig().getSize(); final int evictedItemCount = numberOfEntities - maxSize + (int) (maxSize * baseEvictionRate); insertDummyEntities(numberOfEntities); for (int i = 0; i < numberOfEntities; i++) { executeQuery(sf, i); } HazelcastQueryResultsRegion queryRegion = ((HazelcastQueryResultsRegion) (((SessionFactoryImpl) sf).getQueryCache()).getRegion()); assertEquals(numberOfEntities, queryRegion.getCache().size()); sleep(defaultCleanupPeriod); assertEquals(numberOfEntities - evictedItemCount, queryRegion.getCache().size()); }
@Test public void testQueryCacheCleanup() { MapConfig mapConfig = getHazelcastInstance(sf).getConfig().getMapConfig("org.hibernate.cache.*"); final float baseEvictionRate = 0.2f; final int numberOfEntities = 100; final int defaultCleanupPeriod = 60; final int maxSize = mapConfig.getMaxSizeConfig().getSize(); final int evictedItemCount = numberOfEntities - maxSize + (int) (maxSize * baseEvictionRate); insertDummyEntities(numberOfEntities); sleep(1); for (int i = 0; i < numberOfEntities; i++) { executeQuery(sf, i); } HazelcastQueryResultsRegion queryRegion = ((HazelcastQueryResultsRegion) (((SessionFactoryImpl) sf).getQueryCache()).getRegion()); assertEquals(numberOfEntities, queryRegion.getCache().size()); sleep(defaultCleanupPeriod); assertEquals(numberOfEntities - evictedItemCount, queryRegion.getCache().size()); }
static void configureJetService(JetConfig jetConfig) { if (!(jetConfig.getHazelcastConfig().getConfigPatternMatcher() instanceof MatchingPointConfigPatternMatcher)) { throw new UnsupportedOperationException("Custom config pattern matcher is not supported in Jet"); } jetConfig.getHazelcastConfig().getServicesConfig() .addServiceConfig(new ServiceConfig().setEnabled(true) .setName(JetService.SERVICE_NAME) .setClassName(JetService.class.getName()) .setConfigObject(jetConfig)); jetConfig.getHazelcastConfig().addMapConfig(new MapConfig(INTERNAL_JET_OBJECTS_PREFIX + "*") .setBackupCount(jetConfig.getInstanceConfig().getBackupCount()) .setStatisticsEnabled(false) .setMergePolicy(IgnoreMergingEntryMapMergePolicy.class.getName())); }
@Test public void when_defaultMapConfig_then_notUsed() { // When JetConfig config = new JetConfig(); config.getHazelcastConfig().getMapConfig("default") .setTimeToLiveSeconds(MapConfig.DEFAULT_TTL_SECONDS + 1); JetInstance instance = createJetMember(config); try { // Then int actualTTL = instance.getConfig().getHazelcastConfig().findMapConfig(INTERNAL_JET_OBJECTS_PREFIX + "fooMap") .getTimeToLiveSeconds(); assertEquals(MapConfig.DEFAULT_TTL_SECONDS, actualTTL); } finally { shutdownFactory(); } }
@Before public void setup() { factory = new JetTestInstanceFactory(); JetConfig config = new JetConfig(); config.getInstanceConfig().setCooperativeThreadCount(LOCAL_PARALLELISM); // force snapshots to fail by adding a failing map store configuration for snapshot data maps MapConfig mapConfig = new MapConfig(SnapshotRepository.SNAPSHOT_DATA_NAME_PREFIX + '*'); MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig(); mapStoreConfig.setEnabled(true); mapStoreConfig.setImplementation(new FailingMapStore()); config.getHazelcastConfig().addMapConfig(mapConfig); JetInstance[] instances = factory.newMembers(config, 2); instance1 = instances[0]; }
public DistMapConfig setup(Config cfg, String name, Object storeImplementation) { MapConfig mapConfig = new MapConfig(); //TODO: Refactor the config options mapConfig.setName(name); mapConfig.setBackupCount(1); if (storeImplementation != null) { MaxSizeConfig maxSizeConfig = new MaxSizeConfig(); //todo Refactor this to config maxSizeConfig.setSize(1000); MapStoreConfig store = new MapStoreConfig(); store.setImplementation(storeImplementation); mapConfig.setMaxSizeConfig(maxSizeConfig); mapConfig.setMapStoreConfig(store); } cfg.addMapConfig(mapConfig); return this; }
private static Config createConfig(String name) { Config config = new Config(name); ExecutorConfig executorConfig = config.getExecutorConfig(EXECUTOR_NAME); executorConfig.setPoolSize(10); executorConfig.setQueueCapacity(10000); // map without backup MapConfig mapConfig1 = config.getMapConfig(MAP1_NAME); mapConfig1.setBackupCount(0); MapConfig mapConfig2 = config.getMapConfig(MAP2_NAME); mapConfig2.setBackupCount(0); return config; }
public Cache createCache(String name) { // Check if cluster is being started up while (state == State.starting) { // Wait until cluster is fully started (or failed) try { Thread.sleep(250); } catch (InterruptedException e) { // Ignore } } if (state == State.stopped) { throw new IllegalStateException("Cannot create clustered cache when not in a cluster"); } // Determine the time to live. Note that in Hazelcast 0 means "forever", not -1 final long openfireLifetimeInMilliseconds = CacheFactory.getMaxCacheLifetime(name); final int hazelcastLifetimeInSeconds = openfireLifetimeInMilliseconds < 0 ? 0 : (int) (openfireLifetimeInMilliseconds / 1000); // Determine the max cache size. Note that in Hazelcast the max cache size must be positive final long openfireMaxCacheSize = CacheFactory.getMaxCacheSize(name); final int hazelcastMaxCacheSize = openfireMaxCacheSize < 0 ? Integer.MAX_VALUE : (int) openfireMaxCacheSize; final MapConfig mapConfig = hazelcast.getConfig().getMapConfig(name); mapConfig.setTimeToLiveSeconds(hazelcastLifetimeInSeconds); mapConfig.setMaxSizeConfig(new MaxSizeConfig(hazelcastMaxCacheSize, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE)); return new ClusteredCache(name, hazelcast.getMap(name), hazelcastLifetimeInSeconds); }
@Test public void test() { Properties props = getDefaultProperties(); props.put(CacheEnvironment.SHUTDOWN_ON_STOP, "false"); SessionFactory sf = createSessionFactory(props); HazelcastInstance hz = HazelcastAccessor.getHazelcastInstance(sf); assertNotSame(Hazelcast.getDefaultInstance(), hz); assertEquals(1, hz.getCluster().getMembers().size()); MapConfig cfg = hz.getConfig().getMapConfig("com.hazelcast.hibernate.entity.*"); assertNotNull(cfg); assertEquals(30, cfg.getTimeToLiveSeconds()); assertEquals(50, cfg.getMaxSizeConfig().getSize()); Hazelcast.getDefaultInstance().getLifecycleService().shutdown(); sf.close(); assertTrue(hz.getLifecycleService().isRunning()); hz.getLifecycleService().shutdown(); }
void syncForDeadCountDownLatches(Address deadAddress) { final CMap cmap = maps.get(MapConfig.COUNT_DOWN_LATCH_MAP_NAME); if (deadAddress != null && cmap != null) { for (Record record : cmap.mapRecords.values()) { DistributedCountDownLatch cdl = (DistributedCountDownLatch) record.getValue(); if (cdl != null && cdl.isOwnerOrMemberAddress(deadAddress)) { List<ScheduledAction> scheduledActions = record.getScheduledActions(); if (scheduledActions != null) { for (ScheduledAction sa : scheduledActions) { node.clusterManager.deregisterScheduledAction(sa); final Request sr = sa.getRequest(); sr.clearForResponse(); sr.lockAddress = deadAddress; sr.longValue = CountDownLatchProxy.OWNER_LEFT; returnResponse(sr); } scheduledActions.clear(); } cdl.setOwnerLeft(); } } } }
private long doAtomicOp(ClusterOperation op, Data name, long value, Data expected) { long begin = currentTimeMillis(); setLocal(op, MapConfig.ATOMIC_LONG_MAP_NAME, name, expected, 0, 0); request.longValue = value; doOp(); Data backup = (Data) getResultAsIs(); long responseValue = request.longValue; if (backup != null) { request.value = backup; request.longValue = 0L; backup(CONCURRENT_MAP_BACKUP_PUT); operationsCounter.incrementModified(currentTimeMillis() - begin); } else { operationsCounter.incrementNonModified(currentTimeMillis() - begin); } return responseValue; }
private int doSemaphoreOp(ClusterOperation op, Data name, long longValue, Object value, long timeout) { begin = currentTimeMillis(); int responseValue = 1; if (longValue != 0L) { setLocal(op, MapConfig.SEMAPHORE_MAP_NAME, name, value, timeout, -1); request.longValue = longValue; doOp(); Data backup = (Data) getResultAsIs(); responseValue = (int) request.longValue; if (backup != null) { request.value = backup; request.longValue = 0L; backup(CONCURRENT_MAP_BACKUP_PUT); operationsCounter.incrementModified(currentTimeMillis() - begin); } else { operationsCounter.incrementNonModified(currentTimeMillis() - begin); } } return responseValue; }
public void writeResponse(ManagementCenterService mcs, DataOutput dos) throws Exception { dos.writeBoolean(update); if (update) { mcs.callOnAllMembers(new UpdateMapConfigCallable(map, config)); dos.writeUTF("success"); } else { MapConfig cfg = (MapConfig) mcs.call(target, new GetMapConfigCallable(map)); if (cfg != null) { dos.writeBoolean(true); cfg.writeData(dos); } else { dos.writeBoolean(false); } } }
@Test public void testMapIndexInitialization() { Config config = new Config(); MapConfig mapConfig = config.getMapConfig("testMapIndexInitialization"); mapConfig.addMapIndexConfig(new MapIndexConfig("name", false)); mapConfig.addMapIndexConfig(new MapIndexConfig("age", true)); HazelcastInstance hz = Hazelcast.newHazelcastInstance(config); IMap map = hz.getMap(mapConfig.getName()); CMap cmap = TestUtil.getCMap(hz, mapConfig.getName()); Map<Expression, Index> indexes = cmap.getMapIndexService().getIndexes(); assertEquals(2, indexes.size()); for (Entry<Expression, Index> e : indexes.entrySet()) { Index index = e.getValue(); if ("name".equals(e.getKey().toString())) { assertFalse(index.isOrdered()); } else if ("age".equals(e.getKey().toString())) { assertTrue(index.isOrdered()); } else { fail("Unknown expression: " + e.getKey() + "! Has toString() of GetExpressionImpl changed?"); } } }
/** * Test for Issue 710 */ @Test public void testEvictedEntryNotNullAfterLockAndGet() throws Exception { String mapName = "testLock"; Config config = new XmlConfigBuilder().build(); MapConfig mapConfig = new MapConfig(); mapConfig.setName(mapName); mapConfig.setTimeToLiveSeconds(3); config.addMapConfig(mapConfig); HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config); IMap<Object, Object> m1 = h1.getMap(mapName); m1.put(1, 1); assertEquals(1, m1.get(1)); Thread.sleep(5000); assertEquals(null, m1.get(1)); m1.lock(1); assertEquals(null, m1.get(1)); m1.put(1, 1); assertEquals(1, m1.get(1)); }
public ClusterManager(HazelcastConnection connection, List<HealthCheck> healthChecks, HttpConfiguration httpConfiguration) throws Exception { this.hazelcastConnection = connection; this.healthChecks = healthChecks; MapConfig mapConfig = new MapConfig(MAP_NAME); mapConfig.setTimeToLiveSeconds(MAP_REFRESH_TIME + 2); //Reduce jitter mapConfig.setBackupCount(1); mapConfig.setAsyncBackupCount(2); mapConfig.setEvictionPolicy(EvictionPolicy.NONE); hazelcastConnection.getHazelcastConfig().getMapConfigs().put(MAP_NAME, mapConfig); String hostname = Inet4Address.getLocalHost().getCanonicalHostName(); executor = Executors.newScheduledThreadPool(1); clusterMember = new ClusterMember(hostname, httpConfiguration.getPort()); }
@Verify(global = false) public void verify() { if (isClient(targetInstance)) { return; } MapConfig mapConfig = targetInstance.getConfig().getMapConfig(name); logger.info(name + ": MapConfig: " + mapConfig); MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig(); logger.info(name + ": MapStoreConfig: " + mapStoreConfig); int sleepSeconds = mapConfig.getTimeToLiveSeconds() * 2 + mapStoreConfig.getWriteDelaySeconds() * 2; logger.info("Sleeping for " + sleepSeconds + " seconds to wait for delay and TTL values."); sleepSeconds(sleepSeconds); MapStoreWithCounterPerKey mapStore = (MapStoreWithCounterPerKey) mapStoreConfig.getImplementation(); logger.info(name + ": map size = " + map.size()); logger.info(name + ": map store = " + mapStore); logger.info(name + ": Checking if some keys where stored more than once"); for (Object key : mapStore.keySet()) { assertEquals("There were multiple calls to MapStore.store", 1, mapStore.valueOf(key)); } }
void syncForDeadCountDownLatches(Address deadAddress) { if (deadAddress == null) return; final CMap cmap = maps.get(MapConfig.COUNT_DOWN_LATCH_MAP_NAME); if (deadAddress != null && cmap != null) { for (Record record : cmap.mapRecords.values()) { DistributedCountDownLatch cdl = (DistributedCountDownLatch) record.getValue(); if (cdl != null && cdl.isOwnerOrMemberAddress(deadAddress)) { List<ScheduledAction> scheduledActions = record.getScheduledActions(); if (scheduledActions != null) { for (ScheduledAction sa : scheduledActions) { node.clusterManager.deregisterScheduledAction(sa); final Request sr = sa.getRequest(); sr.clearForResponse(); sr.lockAddress = deadAddress; sr.longValue = CountDownLatchProxy.OWNER_LEFT; returnResponse(sr); } scheduledActions.clear(); } cdl.setOwnerLeft(); } } } }
private long doAtomicOp(ClusterOperation op, Data name, long value, Data expected) { long begin = currentTimeMillis(); setLocal(op, MapConfig.ATOMIC_LONG_MAP_NAME, name, expected, -1, 0); request.longValue = value; doOp(); Data backup = (Data) getResultAsIs(); long responseValue = request.longValue; if (backup != null) { request.value = backup; request.longValue = 0L; backup(CONCURRENT_MAP_BACKUP_PUT); operationsCounter.incrementModified(currentTimeMillis() - begin); } else { operationsCounter.incrementNonModified(currentTimeMillis() - begin); } return responseValue; }