Java 类org.elasticsearch.index.query.TermsQueryBuilder 实例源码

项目:elasticsearch_my    文件:ContextAndHeaderTransportIT.java   
public void testThatTermsLookupGetRequestContainsContextAndHeaders() throws Exception {
    transportClient().prepareIndex(lookupIndex, "type", "1")
        .setSource(jsonBuilder().startObject().array("followers", "foo", "bar", "baz").endObject()).get();
    transportClient().prepareIndex(queryIndex, "type", "1")
        .setSource(jsonBuilder().startObject().field("username", "foo").endObject()).get();
    transportClient().admin().indices().prepareRefresh(queryIndex, lookupIndex).get();

    TermsLookup termsLookup = new TermsLookup(lookupIndex, "type", "1", "followers");
    TermsQueryBuilder termsLookupFilterBuilder = QueryBuilders.termsLookupQuery("username", termsLookup);
    BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(termsLookupFilterBuilder);

    SearchResponse searchResponse = transportClient()
        .prepareSearch(queryIndex)
        .setQuery(queryBuilder)
        .get();
    assertNoFailures(searchResponse);
    assertHitCount(searchResponse, 1);

    assertGetRequestsContainHeaders();
}
项目:onetwo    文件:SimpleSearchQueryBuilder.java   
public SimpleBooleanQueryBuilder<PB> doTerms(String field, Consumer<TermsQueryBuilder> consumer, Object... values){
            Assert.hasText(field);
            if(values==null || values.length==0){
                return this;
            }
            List<Object> listValue = Lists.newArrayList(values);
            listValue.removeIf(Objects::isNull);
            if(listValue.isEmpty()){
                return this;
            }
            TermsQueryBuilder termQueryBuilder = null;
            if(listValue.get(0) instanceof Collection){
                Collection<?> colValue = (Collection<?>) listValue.get(0);
                if(!colValue.isEmpty()){
                    termQueryBuilder = QueryBuilders.termsQuery(field, colValue);
                }
            }else{
                termQueryBuilder = QueryBuilders.termsQuery(field, listValue.toArray(new Object[0]));
            }
//                  mustNot(termQueryBuilder);
            consumer.accept(termQueryBuilder);
            return this;
        }
项目:elasticsearch_my    文件:TermsLookup.java   
public TermsLookup(String index, String type, String id, String path) {
    if (id == null) {
        throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the id.");
    }
    if (type == null) {
        throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the type.");
    }
    if (path == null) {
        throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query lookup element requires specifying the path.");
    }
    this.index = index;
    this.type = type;
    this.id = id;
    this.path = path;
}
项目:elasticsearch_my    文件:TermsLookup.java   
public static TermsLookup parseTermsLookup(XContentParser parser) throws IOException {
    String index = null;
    String type = null;
    String id = null;
    String path = null;
    String routing = null;
    XContentParser.Token token;
    String currentFieldName = "";
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token.isValue()) {
            switch (currentFieldName) {
            case "index":
                index = parser.textOrNull();
                break;
            case "type":
                type = parser.text();
                break;
            case "id":
                id = parser.text();
                break;
            case "routing":
                routing = parser.textOrNull();
                break;
            case "path":
                path = parser.text();
                break;
            default:
                throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME +
                    "] query does not support [" + currentFieldName + "] within lookup element");
            }
        } else {
            throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token ["
                + token + "] after [" + currentFieldName + "]");
        }
    }
    return new TermsLookup(index, type, id, path).routing(routing);
}
项目:sunbird-utils    文件:ElasticSearchUtil.java   
private static TermsQueryBuilder createTermsQuery(String key, List values, Float boost) {
  if (isNotNull(boost)) {
    return QueryBuilders.termsQuery(key, (values).stream().toArray(Object[]::new)).boost(boost);
  } else {
    return QueryBuilders.termsQuery(key, (values).stream().toArray(Object[]::new));
  }
}
项目:elasticsearch_my    文件:SearchModule.java   
private void registerQueryParsers(List<SearchPlugin> plugins) {
    registerQuery(new QuerySpec<>(MatchQueryBuilder.NAME, MatchQueryBuilder::new, MatchQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MatchPhraseQueryBuilder.NAME, MatchPhraseQueryBuilder::new, MatchPhraseQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MatchPhrasePrefixQueryBuilder.NAME, MatchPhrasePrefixQueryBuilder::new,
            MatchPhrasePrefixQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MultiMatchQueryBuilder.NAME, MultiMatchQueryBuilder::new, MultiMatchQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(NestedQueryBuilder.NAME, NestedQueryBuilder::new, NestedQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(HasChildQueryBuilder.NAME, HasChildQueryBuilder::new, HasChildQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(HasParentQueryBuilder.NAME, HasParentQueryBuilder::new, HasParentQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(DisMaxQueryBuilder.NAME, DisMaxQueryBuilder::new, DisMaxQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(IdsQueryBuilder.NAME, IdsQueryBuilder::new, IdsQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new, MatchAllQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(QueryStringQueryBuilder.NAME, QueryStringQueryBuilder::new, QueryStringQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(BoostingQueryBuilder.NAME, BoostingQueryBuilder::new, BoostingQueryBuilder::fromXContent));
    BooleanQuery.setMaxClauseCount(INDICES_MAX_CLAUSE_COUNT_SETTING.get(settings));
    registerQuery(new QuerySpec<>(BoolQueryBuilder.NAME, BoolQueryBuilder::new, BoolQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(TermQueryBuilder.NAME, TermQueryBuilder::new, TermQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(TermsQueryBuilder.NAME, TermsQueryBuilder::new, TermsQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(FuzzyQueryBuilder.NAME, FuzzyQueryBuilder::new, FuzzyQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(RegexpQueryBuilder.NAME, RegexpQueryBuilder::new, RegexpQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(RangeQueryBuilder.NAME, RangeQueryBuilder::new, RangeQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(PrefixQueryBuilder.NAME, PrefixQueryBuilder::new, PrefixQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(WildcardQueryBuilder.NAME, WildcardQueryBuilder::new, WildcardQueryBuilder::fromXContent));
    registerQuery(
            new QuerySpec<>(ConstantScoreQueryBuilder.NAME, ConstantScoreQueryBuilder::new, ConstantScoreQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanTermQueryBuilder.NAME, SpanTermQueryBuilder::new, SpanTermQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanNotQueryBuilder.NAME, SpanNotQueryBuilder::new, SpanNotQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanWithinQueryBuilder.NAME, SpanWithinQueryBuilder::new, SpanWithinQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanContainingQueryBuilder.NAME, SpanContainingQueryBuilder::new,
            SpanContainingQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(FieldMaskingSpanQueryBuilder.NAME, FieldMaskingSpanQueryBuilder::new,
            FieldMaskingSpanQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanFirstQueryBuilder.NAME, SpanFirstQueryBuilder::new, SpanFirstQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanNearQueryBuilder.NAME, SpanNearQueryBuilder::new, SpanNearQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(SpanOrQueryBuilder.NAME, SpanOrQueryBuilder::new, SpanOrQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new,
            MoreLikeThisQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(WrapperQueryBuilder.NAME, WrapperQueryBuilder::new, WrapperQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(CommonTermsQueryBuilder.NAME, CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent));
    registerQuery(
            new QuerySpec<>(SpanMultiTermQueryBuilder.NAME, SpanMultiTermQueryBuilder::new, SpanMultiTermQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(FunctionScoreQueryBuilder.NAME, FunctionScoreQueryBuilder::new,
            FunctionScoreQueryBuilder::fromXContent));
    registerQuery(
            new QuerySpec<>(SimpleQueryStringBuilder.NAME, SimpleQueryStringBuilder::new, SimpleQueryStringBuilder::fromXContent));
    registerQuery(new QuerySpec<>(TypeQueryBuilder.NAME, TypeQueryBuilder::new, TypeQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(ScriptQueryBuilder.NAME, ScriptQueryBuilder::new, ScriptQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(GeoDistanceQueryBuilder.NAME, GeoDistanceQueryBuilder::new, GeoDistanceQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(GeoBoundingBoxQueryBuilder.NAME, GeoBoundingBoxQueryBuilder::new,
            GeoBoundingBoxQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(GeoPolygonQueryBuilder.NAME, GeoPolygonQueryBuilder::new, GeoPolygonQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(ExistsQueryBuilder.NAME, ExistsQueryBuilder::new, ExistsQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(MatchNoneQueryBuilder.NAME, MatchNoneQueryBuilder::new, MatchNoneQueryBuilder::fromXContent));
    registerQuery(new QuerySpec<>(ParentIdQueryBuilder.NAME, ParentIdQueryBuilder::new, ParentIdQueryBuilder::fromXContent));

    if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
        registerQuery(new QuerySpec<>(GeoShapeQueryBuilder.NAME, GeoShapeQueryBuilder::new, GeoShapeQueryBuilder::fromXContent));
    }

    registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery);
}
项目:elasticsearch_my    文件:ShrinkIndexIT.java   
public void testCreateShrinkIndexToN() {
    int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
    int[] shardSplits = randomFrom(possibleShardSplits);
    assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);
    assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]);
    internalCluster().ensureAtLeastNumDataNodes(2);
    prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get();
    for (int i = 0; i < 20; i++) {
        client().prepareIndex("source", "t1", Integer.toString(i))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
        .getDataNodes();
    assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
    DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
    String mergeNode = discoveryNodes[0].getName();
    // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
    // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
    // to the require._name below.
    ensureGreen();
    // relocate all shards to one node such that we can merge it.
    client().admin().indices().prepareUpdateSettings("source")
        .setSettings(Settings.builder()
            .put("index.routing.allocation.require._name", mergeNode)
            .put("index.blocks.write", true)).get();
    ensureGreen();
    // now merge source into a 4 shard index
    assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink")
        .setSettings(Settings.builder()
            .put("index.number_of_replicas", 0)
            .put("index.number_of_shards", shardSplits[1]).build()).get());
    ensureGreen();
    assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);

    for (int i = 0; i < 20; i++) { // now update
        client().prepareIndex("first_shrink", "t1", Integer.toString(i))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    flushAndRefresh();
    assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);

    // relocate all shards to one node such that we can merge it.
    client().admin().indices().prepareUpdateSettings("first_shrink")
        .setSettings(Settings.builder()
            .put("index.routing.allocation.require._name", mergeNode)
            .put("index.blocks.write", true)).get();
    ensureGreen();
    // now merge source into a 2 shard index
    assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink")
        .setSettings(Settings.builder()
            .put("index.number_of_replicas", 0)
            .put("index.number_of_shards", shardSplits[2]).build()).get());
    ensureGreen();
    assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    // let it be allocated anywhere and bump replicas
    client().admin().indices().prepareUpdateSettings("second_shrink")
        .setSettings(Settings.builder()
            .putNull("index.routing.allocation.include._id")
            .put("index.number_of_replicas", 1)).get();
    ensureGreen();
    assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);

    for (int i = 0; i < 20; i++) { // now update
        client().prepareIndex("second_shrink", "t1", Integer.toString(i))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    flushAndRefresh();
    assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}
项目:elasticsearch_my    文件:ShrinkIndexIT.java   
public void testCreateShrinkIndex() {
    internalCluster().ensureAtLeastNumDataNodes(2);
    Version version = VersionUtils.randomVersion(random());
    prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
        .put("number_of_shards", randomIntBetween(2, 7))
        .put("index.version.created", version)
    ).get();
    for (int i = 0; i < 20; i++) {
        client().prepareIndex("source", randomFrom("t1", "t2", "t3"))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
        .getDataNodes();
    assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
    DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
    String mergeNode = discoveryNodes[0].getName();
    // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
    // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
    // to the require._name below.
    ensureGreen();
    // relocate all shards to one node such that we can merge it.
    client().admin().indices().prepareUpdateSettings("source")
        .setSettings(Settings.builder()
            .put("index.routing.allocation.require._name", mergeNode)
            .put("index.blocks.write", true)).get();
    ensureGreen();
    // now merge source into a single shard index

    final boolean createWithReplicas = randomBoolean();
    assertAcked(client().admin().indices().prepareShrinkIndex("source", "target")
        .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get());
    ensureGreen();
    assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);

    if (createWithReplicas == false) {
        // bump replicas
        client().admin().indices().prepareUpdateSettings("target")
            .setSettings(Settings.builder()
                .put("index.number_of_replicas", 1)).get();
        ensureGreen();
        assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    }

    for (int i = 20; i < 40; i++) {
        client().prepareIndex("target", randomFrom("t1", "t2", "t3"))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    flushAndRefresh();
    assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40);
    assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
    GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get();
    assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
}
项目:elasticsearch_my    文件:ShrinkIndexIT.java   
/**
 * Tests that we can manually recover from a failed allocation due to shards being moved away etc.
 */
public void testCreateShrinkIndexFails() throws Exception {
    internalCluster().ensureAtLeastNumDataNodes(2);
    prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
        .put("number_of_shards", randomIntBetween(2, 7))
        .put("number_of_replicas", 0)).get();
    for (int i = 0; i < 20; i++) {
        client().prepareIndex("source", randomFrom("t1", "t2", "t3"))
            .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
    }
    ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
        .getDataNodes();
    assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
    DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
    String spareNode = discoveryNodes[0].getName();
    String mergeNode = discoveryNodes[1].getName();
    // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
    // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
    // to the require._name below.
    ensureGreen();
    // relocate all shards to one node such that we can merge it.
    client().admin().indices().prepareUpdateSettings("source")
        .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)
            .put("index.blocks.write", true)).get();
    ensureGreen();

    // now merge source into a single shard index
    client().admin().indices().prepareShrinkIndex("source", "target")
        .setWaitForActiveShards(ActiveShardCount.NONE)
        .setSettings(Settings.builder()
            .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up
            .put("index.number_of_replicas", 0)
            .put("index.allocation.max_retries", 1).build()).get();
    client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get();

    // now we move all shards away from the merge node
    client().admin().indices().prepareUpdateSettings("source")
        .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode)
            .put("index.blocks.write", true)).get();
    ensureGreen("source");

    client().admin().indices().prepareUpdateSettings("target") // erase the forcefully fuckup!
        .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
    // wait until it fails
    assertBusy(() -> {
        ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
        RoutingTable routingTables = clusterStateResponse.getState().routingTable();
        assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
        assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED,
            routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
        assertEquals(1,
            routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
    });
    client().admin().indices().prepareUpdateSettings("source") // now relocate them all to the right node
        .setSettings(Settings.builder()
            .put("index.routing.allocation.require._name", mergeNode)).get();
    ensureGreen("source");

    final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class,
        internalCluster().getMasterName());
    infoService.refresh();
    // kick off a retry and wait until it's done!
    ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
    long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target")
        .shard(0).getShards().get(0).getExpectedShardSize();
    // we support the expected shard size in the allocator to sum up over the source index shards
    assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
    ensureGreen();
    assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}