@Override public List<Setting<?>> getSettings() { return Arrays.asList( Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY, Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT, Netty4HttpServerTransport.SETTING_HTTP_TCP_NO_DELAY, Netty4HttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE, Netty4HttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS, Netty4HttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE, Netty4HttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE, Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN, Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX, Netty4Transport.WORKER_COUNT, Netty4Transport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY, Netty4Transport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, Netty4Transport.NETTY_RECEIVE_PREDICTOR_SIZE, Netty4Transport.NETTY_RECEIVE_PREDICTOR_MIN, Netty4Transport.NETTY_RECEIVE_PREDICTOR_MAX, Netty4Transport.NETTY_BOSS_COUNT ); }
static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) { Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); return allConcreteSettings.collect( Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); List<DiscoveryNode> nodes = new ArrayList<>(); for (InetSocketAddress address : concreteSetting.get(settings)) { TransportAddress transportAddress = new TransportAddress(address); DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(), transportAddress, Version.CURRENT.minimumCompatibilityVersion()); nodes.add(node); } return nodes; })); }
public void testListener() throws IOException { Setting<Boolean> booleanSetting = Setting.boolSetting("index.foo.bar", false, Property.Dynamic, Property.IndexScope); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings, booleanSetting), new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); Setting<Boolean> booleanSetting2 = Setting.boolSetting("index.foo.bar.baz", false, Property.Dynamic, Property.IndexScope); AtomicBoolean atomicBoolean = new AtomicBoolean(false); module.addSettingsUpdateConsumer(booleanSetting, atomicBoolean::set); try { module.addSettingsUpdateConsumer(booleanSetting2, atomicBoolean::set); fail("not registered"); } catch (IllegalArgumentException ex) { } IndexService indexService = newIndexService(module); assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey())); indexService.close("simon says", false); }
public void testRunListener() { Version version = VersionUtils.getPreviousVersion(); Settings theSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version) .put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); Setting<Integer> integerSetting = Setting.intSetting("index.test.setting.int", -1, Property.Dynamic, Property.IndexScope); IndexMetaData metaData = newIndexMeta("index", theSettings); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set); assertEquals(version, settings.getIndexVersionCreated()); assertEquals("0xdeadbeef", settings.getUUID()); assertFalse(settings.updateIndexMetaData(metaData)); assertEquals(metaData.getSettings().getAsMap(), settings.getSettings().getAsMap()); assertEquals(0, integer.get()); assertTrue(settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(theSettings).put("index.test.setting.int", 42) .build()))); assertEquals(42, integer.get()); }
public void testSettingsUpdateValidator() { Version version = VersionUtils.getPreviousVersion(); Settings theSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version) .put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); Setting<Integer> integerSetting = Setting.intSetting("index.test.setting.int", -1, Property.Dynamic, Property.IndexScope); IndexMetaData metaData = newIndexMeta("index", theSettings); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set, (i) -> {if (i == 42) throw new AssertionError("boom");}); assertEquals(version, settings.getIndexVersionCreated()); assertEquals("0xdeadbeef", settings.getUUID()); assertFalse(settings.updateIndexMetaData(metaData)); assertEquals(metaData.getSettings().getAsMap(), settings.getSettings().getAsMap()); assertEquals(0, integer.get()); expectThrows(IllegalArgumentException.class, () -> settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(theSettings).put("index.test.setting.int", 42).build()))); assertTrue(settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(theSettings).put("index.test.setting.int", 41) .build()))); assertEquals(41, integer.get()); }
public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsBackcompat() { Settings settings = Settings.builder() .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3_key") .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3_secret") .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "repositories_key", "repositories_secret"); assertSettingDeprecationsAndWarnings(new Setting<?>[]{ AwsS3Service.KEY_SETTING, AwsS3Service.SECRET_SETTING, AwsS3Service.CLOUD_S3.KEY_SETTING, AwsS3Service.CLOUD_S3.SECRET_SETTING, S3Repository.Repositories.KEY_SETTING, S3Repository.Repositories.SECRET_SETTING}); }
public void testAWSConfigurationWithAwsSettingsBackcompat() { Settings settings = Settings.builder() .put(AwsS3Service.PROTOCOL_SETTING.getKey(), "http") .put(AwsS3Service.PROXY_HOST_SETTING.getKey(), "aws_proxy_host") .put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 8080) .put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username") .put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password") .put(AwsS3Service.SIGNER_SETTING.getKey(), "AWS3SignerType") .put(AwsS3Service.READ_TIMEOUT.getKey(), "10s") .build(); launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", "AWS3SignerType", 3, false, 10000); assertSettingDeprecationsAndWarnings(new Setting<?>[]{ AwsS3Service.PROXY_USERNAME_SETTING, AwsS3Service.PROXY_PASSWORD_SETTING, AwsS3Service.PROTOCOL_SETTING, AwsS3Service.PROXY_HOST_SETTING, AwsS3Service.PROXY_PORT_SETTING, AwsS3Service.SIGNER_SETTING, AwsS3Service.READ_TIMEOUT}); }
@Override public List<Setting<?>> getSettings() { return Arrays.asList(SETTING_INXPROXY_DATA_PATH, // SETTING_INXPROXY_DATA_FILE_FORMAT, // SETTING_INXPROXY_DATA_FILE_SIZE, // SETTING_INXPROXY_SENDER_INTERVAL, // SETTING_INXPROXY_SENDER_RETRY_COUNT, // SETTING_INXPROXY_SENDER_REQUEST_RETRY_COUNT, // SETTING_INXPROXY_SENDER_SKIP_ERROR_FILE, // SETTING_INXPROXY_SENDER_LOOKUP_FILES, // SETTING_INXPROXY_MONITOR_INTERVAL, // SETTING_INXPROXY_WRITER_RETRY_COUNT, // SETTING_INXPROXY_SENDER_NODES, // SETTING_INXPROXY_WRITE_NODES, // SETTING_INXPROXY_FLUSH_PER_DOC, // SETTING_INXPROXY_NUMBER_OF_REPLICAS, // SETTING_INXPROXY_NUMBER_OF_SHARDS, // SETTING_INXPROXY_TARGET_INDICES, // SETTING_INXPROXY_RENEW_ACTIONS); }
public void testBadRequest() throws IOException { final Response response = client().performRequest("GET", "/_nodes/settings", Collections.emptyMap()); final ObjectPath objectPath = ObjectPath.createFromResponse(response); final Map<String, Object> map = objectPath.evaluate("nodes"); int maxMaxInitialLineLength = Integer.MIN_VALUE; final Setting<ByteSizeValue> httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; final String key = httpMaxInitialLineLength.getKey().substring("http.".length()); for (Map.Entry<String, Object> entry : map.entrySet()) { @SuppressWarnings("unchecked") final Map<String, Object> settings = (Map<String, Object>)((Map<String, Object>)entry.getValue()).get("settings"); final int maxIntialLineLength; if (settings.containsKey("http")) { @SuppressWarnings("unchecked") final Map<String, Object> httpSettings = (Map<String, Object>)settings.get("http"); if (httpSettings.containsKey(key)) { maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String)httpSettings.get(key), key).bytesAsInt(); } else { maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); } } else { maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); } maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); } final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); final ResponseException e = expectThrows( ResponseException.class, () -> client().performRequest(randomFrom("GET", "POST", "PUT"), path, Collections.emptyMap())); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); }
@Override public List<Setting<?>> getSettings() { return Arrays.asList( URLRepository.ALLOWED_URLS_SETTING, URLRepository.REPOSITORIES_URL_SETTING, URLRepository.SUPPORTED_PROTOCOLS_SETTING ); }
@Override public List<Setting<?>> getSettings() { return Arrays.asList( TestDeprecationHeaderRestAction.TEST_DEPRECATED_SETTING_TRUE1, TestDeprecationHeaderRestAction.TEST_DEPRECATED_SETTING_TRUE2, TestDeprecationHeaderRestAction.TEST_NOT_DEPRECATED_SETTING); }
/** * Convenience method to assert warnings for settings deprecations and general deprecation warnings. * * @param settings the settings that are expected to be deprecated * @param warnings other expected general deprecation warnings */ protected final void assertSettingDeprecationsAndWarnings(final Setting<?>[] settings, final String... warnings) { assertWarnings( Stream.concat( Arrays .stream(settings) .map(Setting::getKey) .map(k -> "[" + k + "] setting was deprecated in Elasticsearch and will be removed in a future release! " + "See the breaking changes documentation for the next major version."), Arrays.stream(warnings)) .toArray(String[]::new)); }
public static IndexSettings newIndexSettings(Index index, Settings indexSetting, Settings nodeSettings, Setting<?>... setting) { Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(indexSetting) .build(); IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).build(); Set<Setting<?>> settingSet = new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); if (setting.length > 0) { settingSet.addAll(Arrays.asList(setting)); } return new IndexSettings(metaData, nodeSettings, new IndexScopedSettings(Settings.EMPTY, settingSet)); }
public static IndexSettings newIndexSettings(final IndexMetaData indexMetaData, Setting<?>... setting) { Set<Setting<?>> settingSet = new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); if (setting.length > 0) { settingSet.addAll(Arrays.asList(setting)); } return new IndexSettings(indexMetaData, Settings.EMPTY, new IndexScopedSettings(Settings.EMPTY, settingSet)); }
@Override public List<Setting<?>> getSettings() { return Arrays.asList(INDEX_CHECK_INDEX_ON_CLOSE_SETTING, MockFSDirectoryService.CRASH_INDEX_SETTING, MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING, MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING, MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE_SETTING, MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING); }
public ScriptSettings(ScriptEngineRegistry scriptEngineRegistry, ScriptContextRegistry scriptContextRegistry) { Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap = contextSettings(scriptContextRegistry); this.scriptContextSettingMap = Collections.unmodifiableMap(scriptContextSettingMap); List<Setting<Boolean>> scriptLanguageSettings = languageSettings(SCRIPT_TYPE_SETTING_MAP, scriptContextSettingMap, scriptEngineRegistry, scriptContextRegistry); this.scriptLanguageSettings = Collections.unmodifiableList(scriptLanguageSettings); }
private static Map<ScriptContext, Setting<Boolean>> contextSettings(ScriptContextRegistry scriptContextRegistry) { Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap = new HashMap<>(); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { scriptContextSettingMap.put(scriptContext, Setting.boolSetting(ScriptModes.operationKey(scriptContext), false, Property.NodeScope)); } return scriptContextSettingMap; }
public List<Setting<?>> getSettings() { List<Setting<?>> settings = new ArrayList<>(); settings.addAll(SCRIPT_TYPE_SETTING_MAP.values()); settings.addAll(scriptContextSettingMap.values()); settings.addAll(scriptLanguageSettings); return settings; }
ScriptModes(ScriptSettings scriptSettings, Settings settings) { HashMap<String, Boolean> scriptModes = new HashMap<>(); for (Setting<Boolean> scriptModeSetting : scriptSettings.getScriptLanguageSettings()) { scriptModes.put(scriptModeSetting.getKey(), scriptModeSetting.get(settings)); } this.scriptEnabled = Collections.unmodifiableMap(scriptModes); }
/** * Adds a Setting and it's consumer for this index. */ public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) { ensureNotFrozen(); if (setting == null) { throw new IllegalArgumentException("setting must not be null"); } indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer); }
/** * Adds a Setting, it's consumer and validator for this index. */ public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) { ensureNotFrozen(); if (setting == null) { throw new IllegalArgumentException("setting must not be null"); } indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer, validator); }
/** * Builds node settings for a tribe client node from the tribe node's global settings, * combined with tribe specific settings. */ static Settings buildClientSettings(String tribeName, String parentNodeId, Settings globalSettings, Settings tribeSettings) { for (String tribeKey : tribeSettings.getAsMap().keySet()) { if (tribeKey.startsWith("path.")) { throw new IllegalArgumentException("Setting [" + tribeKey + "] not allowed in tribe client [" + tribeName + "]"); } } Settings.Builder sb = Settings.builder().put(tribeSettings); sb.put(Node.NODE_NAME_SETTING.getKey(), Node.NODE_NAME_SETTING.get(globalSettings) + "/" + tribeName); sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(globalSettings)); // pass through ES home dir if (Environment.PATH_CONF_SETTING.exists(globalSettings)) { sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(globalSettings)); } if (Environment.PATH_LOGS_SETTING.exists(globalSettings)) { sb.put(Environment.PATH_LOGS_SETTING.getKey(), Environment.PATH_LOGS_SETTING.get(globalSettings)); } if (Environment.PATH_SCRIPTS_SETTING.exists(globalSettings)) { sb.put(Environment.PATH_SCRIPTS_SETTING.getKey(), Environment.PATH_SCRIPTS_SETTING.get(globalSettings)); } for (Setting<?> passthrough : PASS_THROUGH_SETTINGS) { if (passthrough.exists(tribeSettings) == false && passthrough.exists(globalSettings)) { sb.put(passthrough.getKey(), globalSettings.get(passthrough.getKey())); } } sb.put(TRIBE_NAME_SETTING.getKey(), tribeName); if (sb.get(NetworkModule.HTTP_ENABLED.getKey()) == null) { sb.put(NetworkModule.HTTP_ENABLED.getKey(), false); } sb.put(Node.NODE_DATA_SETTING.getKey(), false); sb.put(Node.NODE_MASTER_SETTING.getKey(), false); sb.put(Node.NODE_INGEST_SETTING.getKey(), false); // node id of a tribe client node is determined by node id of parent node and tribe name final BytesRef seedAsString = new BytesRef(parentNodeId + "/" + tribeName); long nodeIdSeed = MurmurHash3.hash128(seedAsString.bytes, seedAsString.offset, seedAsString.length, 0, new MurmurHash3.Hash128()).h1; sb.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), nodeIdSeed); sb.put(Node.NODE_LOCAL_STORAGE_SETTING.getKey(), false); return sb.build(); }
/** * Updates the cluster block only iff the setting exists in the given settings */ private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting<Boolean> setting, Settings openSettings) { if (setting.exists(openSettings)) { final boolean updateReadBlock = setting.get(openSettings); for (String index : actualIndices) { if (updateReadBlock) { blocks.addIndexBlock(index, block); } else { blocks.removeIndexBlock(index, block); } } } }
static Setting<Integer> buildNumberOfShardsSetting() { /* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards per cluster. * this also prevents creating stuff like a new index with millions of shards by accident which essentially kills the entire cluster * with OOM on the spot.*/ final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024")); if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); } return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards, Property.IndexScope); }
/** * Construct a fixed executor builder. * * @param settings the node-level settings * @param name the name of the executor * @param size the fixed number of threads * @param queueSize the size of the backing queue, -1 for unbounded * @param prefix the prefix for the settings keys */ public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) { super(name); final String sizeKey = settingsKey(prefix, "size"); this.sizeSetting = new Setting<>( sizeKey, s -> Integer.toString(size), s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), Setting.Property.NodeScope); final String queueSizeKey = settingsKey(prefix, "queue_size"); this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope); }
@Override public List<Setting<?>> getSettings() { List<Setting<?>> settings = new ArrayList<>(); settings.addAll(super.getSettings()); settings.add(EXCEPTION_TOP_LEVEL_RATIO_SETTING); settings.add(EXCEPTION_LOW_LEVEL_RATIO_SETTING); return settings; }
public void testNativeScript() throws InterruptedException { Settings settings = Settings.builder() .put("node.name", "testNativeScript") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) .build(); ScriptModule scriptModule = new ScriptModule(settings, new Environment(settings), null, singletonList(new NativeScriptEngineService(settings, singletonMap("my", new MyNativeScriptFactory()))), emptyList()); List<Setting<?>> scriptSettings = scriptModule.getSettings(); scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); ExecutableScript executable = scriptModule.getScriptService().executable( new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "my", Collections.emptyMap()), ScriptContext.Standard.SEARCH); assertThat(executable.run().toString(), equalTo("test")); }
public void testSettingsAreProperlyPropogated() { ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService())); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); boolean enabled = randomBoolean(); Settings s = Settings.builder().put("script.inline", enabled).build(); for (Iterator<Setting<Boolean>> iter = scriptSettings.getScriptLanguageSettings().iterator(); iter.hasNext();) { Setting<Boolean> setting = iter.next(); if (setting.getKey().endsWith(".inline")) { assertThat("inline settings should have propagated", setting.get(s), equalTo(enabled)); assertThat(setting.getDefaultRaw(s), equalTo(Boolean.toString(enabled))); } } }
public IndexSettings newIndexSettings(IndexMetaData metaData, Settings nodeSettings, Setting<?>... settings) { Set<Setting<?>> settingSet = new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); if (settings.length > 0) { settingSet.addAll(Arrays.asList(settings)); } return new IndexSettings(metaData, nodeSettings, new IndexScopedSettings(Settings.EMPTY, settingSet)); }
public void testNodeSettingsAreContained() { final int numShards = randomIntBetween(1, 10); final int numReplicas = randomIntBetween(0, 10); Settings theSettings = Settings.builder(). put("index.foo.bar", 0) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards).build(); Settings nodeSettings = Settings.builder().put("index.foo.bar", 43).build(); final AtomicInteger indexValue = new AtomicInteger(0); Setting<Integer> integerSetting = Setting.intSetting("index.foo.bar", -1, Property.Dynamic, Property.IndexScope); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, indexValue::set); assertEquals(numReplicas, settings.getNumberOfReplicas()); assertEquals(numShards, settings.getNumberOfShards()); assertEquals(0, indexValue.get()); assertTrue(settings.updateIndexMetaData(newIndexMeta("index", Settings.builder(). put("index.foo.bar", 42) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas + 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards).build()))); assertEquals(42, indexValue.get()); assertSame(nodeSettings, settings.getNodeSettings()); assertTrue(settings.updateIndexMetaData(newIndexMeta("index", Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas + 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards).build()))); assertEquals(43, indexValue.get()); }
public void testInvalidIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); Setting<Settings> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); indexScopedSettings.updateDynamicSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192..168.1.1").build(), Settings.builder().put(Settings.EMPTY), Settings.builder(), "test ip validation"); }); assertEquals("invalid IP address [192..168.1.1] for [" + ipKey + "]", e.getMessage()); }
public void testInvalidIPFilterClusterSettings() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); Setting<Settings> filterSetting = randomFrom(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) .execute().actionGet()); assertEquals("invalid IP address [192.168.1.1.] for [" + ipKey + "]", e.getMessage()); }
public void testSecureSettings() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("foo", "secret"); Settings input = Settings.builder().put(baseEnvSettings).setSecureSettings(secureSettings).build(); Environment env = InternalSettingsPreparer.prepareEnvironment(input, null); Setting<SecureString> fakeSetting = SecureSetting.secureString("foo", null, false); assertEquals("secret", fakeSetting.get(env.settings()).toString()); }
@Override public List<Setting<?>> getSettings() { return Arrays.asList( // Register global cloud aws settings: cloud.aws (might have been registered in ec2 plugin) AwsEc2Service.KEY_SETTING, AwsEc2Service.SECRET_SETTING, AwsEc2Service.PROTOCOL_SETTING, AwsEc2Service.PROXY_HOST_SETTING, AwsEc2Service.PROXY_PORT_SETTING, AwsEc2Service.PROXY_USERNAME_SETTING, AwsEc2Service.PROXY_PASSWORD_SETTING, AwsEc2Service.SIGNER_SETTING, AwsEc2Service.REGION_SETTING, AwsEc2Service.READ_TIMEOUT, // Register EC2 specific settings: cloud.aws.ec2 AwsEc2Service.CLOUD_EC2.KEY_SETTING, AwsEc2Service.CLOUD_EC2.SECRET_SETTING, AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING, AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING, AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING, AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING, AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING, AwsEc2Service.CLOUD_EC2.SIGNER_SETTING, AwsEc2Service.CLOUD_EC2.REGION_SETTING, AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING, AwsEc2Service.CLOUD_EC2.READ_TIMEOUT, // Register EC2 discovery settings: discovery.ec2 AwsEc2Service.DISCOVERY_EC2.HOST_TYPE_SETTING, AwsEc2Service.DISCOVERY_EC2.ANY_GROUP_SETTING, AwsEc2Service.DISCOVERY_EC2.GROUPS_SETTING, AwsEc2Service.DISCOVERY_EC2.AVAILABILITY_ZONES_SETTING, AwsEc2Service.DISCOVERY_EC2.NODE_CACHE_TIME_SETTING, AwsEc2Service.DISCOVERY_EC2.TAG_SETTING, // Register cloud node settings: cloud.node AwsEc2Service.AUTO_ATTRIBUTE_SETTING); }
/** * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. */ static <T> T getSetting(Setting<T> setting, RepositoryMetaData metadata) { T value = setting.get(metadata.settings()); if (value == null) { throw new RepositoryException(metadata.name(), "Setting [" + setting.getKey() + "] is not defined for repository"); } if ((value instanceof String) && (Strings.hasText((String) value)) == false) { throw new RepositoryException(metadata.name(), "Setting [" + setting.getKey() + "] is empty for repository"); } return value; }
@Override public List<Setting<?>> getSettings() { return Arrays.asList(AzureComputeService.Discovery.REFRESH_SETTING, AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING, AzureComputeService.Management.KEYSTORE_PATH_SETTING, AzureComputeService.Management.KEYSTORE_TYPE_SETTING, AzureComputeService.Management.SUBSCRIPTION_ID_SETTING, AzureComputeService.Management.SERVICE_NAME_SETTING, AzureComputeService.Discovery.HOST_TYPE_SETTING, AzureComputeService.Discovery.DEPLOYMENT_NAME_SETTING, AzureComputeService.Discovery.DEPLOYMENT_SLOT_SETTING, AzureComputeService.Discovery.ENDPOINT_NAME_SETTING); }
private static String getRequiredSetting(Settings settings, Setting<String> setting) { String value = setting.get(settings); if (value == null || Strings.hasLength(value) == false) { throw new IllegalArgumentException("Missing required setting " + setting.getKey() + " for azure"); } return value; }
@Override public List<Setting<?>> getSettings() { return Arrays.asList(AzureStorageService.Storage.STORAGE_ACCOUNTS, AzureStorageService.Storage.ACCOUNT_SETTING, AzureStorageService.Storage.COMPRESS_SETTING, AzureStorageService.Storage.CONTAINER_SETTING, AzureStorageService.Storage.BASE_PATH_SETTING, AzureStorageService.Storage.CHUNK_SIZE_SETTING, AzureStorageService.Storage.LOCATION_MODE_SETTING); }
public static <T> T getValue(Settings repositorySettings, Settings globalSettings, Setting<T> repositorySetting, Setting<T> repositoriesSetting) { if (repositorySetting.exists(repositorySettings)) { return repositorySetting.get(repositorySettings); } else { return repositoriesSetting.get(globalSettings); } }