OrdScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; sortsIndex = new SortedDocValues[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
OrdAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; sortsIndex = new SortedDocValues[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
ScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
@SuppressWarnings({"unchecked"}) public TermSecondPassGroupingCollector(String groupField, Collection<SearchGroup<BytesRef>> groups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException { super(groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); ordSet = new SentinelIntSet(groupMap.size(), -2); this.groupField = groupField; groupDocs = (SearchGroupDocs<BytesRef>[]) new SearchGroupDocs[ordSet.keys.length]; }
/** * Constructs {@link TermDistinctValuesCollector} instance. * * @param groupField The field to group by * @param countField The field to count distinct values for * @param groups The top N groups, collected during the first phase search */ public TermDistinctValuesCollector(String groupField, String countField, Collection<SearchGroup<BytesRef>> groups) { this.groupField = groupField; this.countField = countField; this.groups = new ArrayList<>(groups.size()); for (SearchGroup<BytesRef> group : groups) { this.groups.add(new GroupCount(group.groupValue)); } ordSet = new SentinelIntSet(groups.size(), -2); groupCounts = new GroupCount[ordSet.keys.length]; }
/** * Given a set of params, executes a cursor query using {@link #CURSOR_MARK_START} * and then continuously walks the results using {@link #CURSOR_MARK_START} as long * as a non-0 number of docs ar returned. This method records the the set of all id's * (must be positive ints) encountered and throws an assertion failure if any id is * encountered more than once, or if the set grows above maxSize */ public SentinelIntSet assertFullWalkNoDups(int maxSize, SolrParams params) throws Exception { SentinelIntSet ids = new SentinelIntSet(maxSize, -1); String cursorMark = CURSOR_MARK_START; int docsOnThisPage = Integer.MAX_VALUE; while (0 < docsOnThisPage) { String json = assertJQ(req(params, CURSOR_MARK_PARAM, cursorMark)); Map rsp = (Map) ObjectBuilder.fromJSON(json); assertTrue("response doesn't contain " + CURSOR_MARK_NEXT + ": " + json, rsp.containsKey(CURSOR_MARK_NEXT)); String nextCursorMark = (String)rsp.get(CURSOR_MARK_NEXT); assertNotNull(CURSOR_MARK_NEXT + " is null", nextCursorMark); List<Map<Object,Object>> docs = (List) (((Map)rsp.get("response")).get("docs")); docsOnThisPage = docs.size(); if (null != params.getInt(CommonParams.ROWS)) { int rows = params.getInt(CommonParams.ROWS); assertTrue("Too many docs on this page: " + rows + " < " + docsOnThisPage, docsOnThisPage <= rows); } if (0 == docsOnThisPage) { assertEquals("no more docs, but "+CURSOR_MARK_NEXT+" isn't same", cursorMark, nextCursorMark); } for (Map<Object,Object> doc : docs) { int id = ((Long)doc.get("id")).intValue(); assertFalse("walk already seen: " + id, ids.exists(id)); ids.put(id); assertFalse("id set bigger then max allowed ("+maxSize+"): " + ids.size(), maxSize < ids.size()); } cursorMark = nextCursorMark; } return ids; }
/** * test faceting with deep paging */ public void testFacetingWithRandomSorts() throws Exception { final int numDocs = TestUtil.nextInt(random(), 1000, 3000); String[] fieldsToFacetOn = { "int", "long", "str" }; String[] facetMethods = { "enum", "fc", "fcs" }; for (int i = 1; i <= numDocs; i++) { SolrInputDocument doc = buildRandomDocument(i); assertU(adoc(doc)); } assertU(commit()); Collection<String> allFieldNames = getAllSortFieldNames(); String[] fieldNames = new String[allFieldNames.size()]; allFieldNames.toArray(fieldNames); String f = fieldNames[TestUtil.nextInt(random(), 0, fieldNames.length - 1)]; String order = 0 == TestUtil.nextInt(random(), 0, 1) ? " asc" : " desc"; String sort = f + order + (f.equals("id") ? "" : ", id" + order); String rows = "" + TestUtil.nextInt(random(),13,50); String facetField = fieldsToFacetOn [TestUtil.nextInt(random(), 0, fieldsToFacetOn.length - 1)]; String facetMethod = facetMethods [TestUtil.nextInt(random(), 0, facetMethods.length - 1)]; SentinelIntSet ids = assertFullWalkNoDupsWithFacets (numDocs, params("q", "*:*", "fl", "id," + facetField, "facet", "true", "facet.field", facetField, "facet.method", facetMethod, "facet.missing", "true", "facet.limit", "-1", // unlimited "rows", rows, "sort", sort)); assertEquals(numDocs, ids.size()); }
OrdScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<GroupHead>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; sortsIndex = new SortedDocValues[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
OrdAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<GroupHead>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; sortsIndex = new SortedDocValues[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
ScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { super(groupField, sortWithinGroup.getSort().length); ordSet = new SentinelIntSet(initialSize, -2); collectedGroups = new ArrayList<GroupHead>(initialSize); final SortField[] sortFields = sortWithinGroup.getSort(); fields = new SortField[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { reversed[i] = sortFields[i].getReverse() ? -1 : 1; fields[i] = sortFields[i]; } }
@SuppressWarnings({"unchecked", "rawtypes"}) public TermSecondPassGroupingCollector(String groupField, Collection<SearchGroup<BytesRef>> groups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException { super(groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); ordSet = new SentinelIntSet(groupMap.size(), -2); this.groupField = groupField; groupDocs = (SearchGroupDocs<BytesRef>[]) new SearchGroupDocs[ordSet.keys.length]; }
/** * Constructs {@link TermDistinctValuesCollector} instance. * * @param groupField The field to group by * @param countField The field to count distinct values for * @param groups The top N groups, collected during the first phase search */ public TermDistinctValuesCollector(String groupField, String countField, Collection<SearchGroup<BytesRef>> groups) { this.groupField = groupField; this.countField = countField; this.groups = new ArrayList<GroupCount>(groups.size()); for (SearchGroup<BytesRef> group : groups) { this.groups.add(new GroupCount(group.groupValue)); } ordSet = new SentinelIntSet(groups.size(), -2); groupCounts = new GroupCount[ordSet.keys.length]; }
public SmallDocSet(int size) { intSet = new SentinelIntSet(size, -1); }
public ElevationComparatorSource(final QueryElevationComponent.ElevationObj elevations) { this.elevations = elevations; int size = elevations.ids.size(); ordSet = new SentinelIntSet(size, -1); termValues = new BytesRef[ordSet.keys.length]; }
/** * test that our assumptions about how caches are affected hold true */ public void testCacheImpacts() throws Exception { // cursor queryies can't live in the queryResultCache, but independent filters // should still be cached & reused // don't add in order of any field to ensure we aren't inadvertantly // counting on internal docid ordering assertU(adoc("id", "9", "str", "c", "float", "-3.2", "int", "42")); assertU(adoc("id", "7", "str", "c", "float", "-3.2", "int", "-1976")); assertU(adoc("id", "2", "str", "c", "float", "-3.2", "int", "666")); assertU(adoc("id", "0", "str", "b", "float", "64.5", "int", "-42")); assertU(adoc("id", "5", "str", "b", "float", "64.5", "int", "2001")); assertU(adoc("id", "8", "str", "b", "float", "64.5", "int", "4055")); assertU(adoc("id", "6", "str", "a", "float", "64.5", "int", "7")); assertU(adoc("id", "1", "str", "a", "float", "64.5", "int", "7")); assertU(adoc("id", "4", "str", "a", "float", "11.1", "int", "6")); assertU(adoc("id", "3", "str", "a", "float", "11.1", "int", "3")); assertU(commit()); final Collection<String> allFieldNames = getAllSortFieldNames(); final SolrInfoMBean filterCacheStats = h.getCore().getInfoRegistry().get("filterCache"); assertNotNull(filterCacheStats); final SolrInfoMBean queryCacheStats = h.getCore().getInfoRegistry().get("queryResultCache"); assertNotNull(queryCacheStats); final long preQcIn = (Long) queryCacheStats.getStatistics().get("inserts"); final long preFcIn = (Long) filterCacheStats.getStatistics().get("inserts"); final long preFcHits = (Long) filterCacheStats.getStatistics().get("hits"); SentinelIntSet ids = assertFullWalkNoDups (10, params("q", "*:*", "rows",""+TestUtil.nextInt(random(),1,11), "fq", "-id:[1 TO 2]", "fq", "-id:[6 TO 7]", "fl", "id", "sort", buildRandomSort(allFieldNames))); assertEquals(6, ids.size()); final long postQcIn = (Long) queryCacheStats.getStatistics().get("inserts"); final long postFcIn = (Long) filterCacheStats.getStatistics().get("inserts"); final long postFcHits = (Long) filterCacheStats.getStatistics().get("hits"); assertEquals("query cache inserts changed", preQcIn, postQcIn); // NOTE: use of pure negative filters causees "*:* to be tracked in filterCache assertEquals("filter cache did not grow correctly", 3, postFcIn-preFcIn); assertTrue("filter cache did not have any new cache hits", 0 < postFcHits-preFcHits); }
/** * <p> * Given a set of params, executes a cursor query using {@link #CURSOR_MARK_START} * and then continuously walks the results using {@link #CURSOR_MARK_START} as long * as a non-0 number of docs ar returned. This method records the the set of all id's * (must be postive ints) encountered and throws an assertion failure if any id is * encountered more then once, or if the set grows above maxSize * </p> * * <p> * Note that this method explicily uses the "cloudClient" for executing the queries, * instead of relying on the test infrastructure to execute the queries redundently * aainst both the cloud client as well as a control client. This is because term stat * differences in a sharded setup can result in differnent scores for documents compared * to the control index -- which can affect the sorting in some cases and cause false * negatives in the response comparisons (even if we don't include "score" in the "fl") * </p> */ public SentinelIntSet assertFullWalkNoDups(int maxSize, SolrParams params) throws Exception { SentinelIntSet ids = new SentinelIntSet(maxSize, -1); String cursorMark = CURSOR_MARK_START; int docsOnThisPage = Integer.MAX_VALUE; while (0 < docsOnThisPage) { final SolrParams p = p(params, CURSOR_MARK_PARAM, cursorMark); QueryResponse rsp = cloudClient.query(p); String nextCursorMark = assertHashNextCursorMark(rsp); SolrDocumentList docs = extractDocList(rsp); docsOnThisPage = docs.size(); if (null != params.getInt(CommonParams.ROWS)) { int rows = params.getInt(CommonParams.ROWS); assertTrue("Too many docs on this page: " + rows + " < " + docsOnThisPage, docsOnThisPage <= rows); } if (0 == docsOnThisPage) { assertEquals("no more docs, but "+CURSOR_MARK_NEXT+" isn't same", cursorMark, nextCursorMark); } for (SolrDocument doc : docs) { int id = ((Integer)doc.get("id")).intValue(); if (ids.exists(id)) { String msg = "(" + p + ") walk already seen: " + id; try { queryAndCompareShards(params("distrib","false", "q","id:"+id)); } catch (AssertionError ae) { throw (AssertionError) new AssertionError(msg + ", found shard inconsistency that would explain it...").initCause(ae); } rsp = cloudClient.query(params("q","id:"+id)); throw new AssertionError(msg + ", don't know why; q=id:"+id+" gives: " + rsp.toString()); } ids.put(id); assertFalse("id set bigger then max allowed ("+maxSize+"): " + ids.size(), maxSize < ids.size()); } cursorMark = nextCursorMark; } return ids; }