/** * Multi-Line representation of this Insanity object, starting with * the Type and Msg, followed by each CacheEntry.toString() on it's * own line prefaced by a tab character */ @Override public String toString() { StringBuilder buf = new StringBuilder(); buf.append(getType()).append(": "); String m = getMsg(); if (null != m) buf.append(m); buf.append('\n'); CacheEntry[] ce = getCacheEntries(); for (int i = 0; i < ce.length; i++) { buf.append('\t').append(ce[i].toString()).append('\n'); } return buf.toString(); }
/** * Asserts that FieldCacheSanityChecker does not detect any * problems with FieldCache.DEFAULT. * <p> * If any problems are found, they are logged to System.err * (allong with the msg) when the Assertion is thrown. * </p> * <p> * This method is called by tearDown after every test method, * however IndexReaders scoped inside test methods may be garbage * collected prior to this method being called, causing errors to * be overlooked. Tests are encouraged to keep their IndexReaders * scoped at the class level, or to explicitly call this method * directly in the same scope as the IndexReader. * </p> * * @see org.apache.lucene.util.FieldCacheSanityChecker */ protected static void assertSaneFieldCaches(final String msg) { final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries(); Insanity[] insanity = null; try { try { insanity = FieldCacheSanityChecker.checkSanity(entries); } catch (RuntimeException e) { dumpArray(msg + ": FieldCache", entries, System.err); throw e; } assertEquals(msg + ": Insane FieldCache usage(s) found", 0, insanity.length); insanity = null; } finally { // report this in the event of any exception/failure // if no failure, then insanity will be null anyway if (null != insanity) { dumpArray(msg + ": Insane FieldCache usage(s)", insanity, System.err); } } }
/** * Internal helper method used by check that iterates over * valMismatchKeys and generates a Collection of Insanity * instances accordingly. The MapOfSets are used to populate * the Insanity objects. * @see InsanityType#VALUEMISMATCH */ private Collection<Insanity> checkValueMismatch(MapOfSets<Integer, CacheEntry> valIdToItems, MapOfSets<ReaderField, Integer> readerFieldToValIds, Set<ReaderField> valMismatchKeys) { final List<Insanity> insanity = new ArrayList<>(valMismatchKeys.size() * 3); if (! valMismatchKeys.isEmpty() ) { // we have multiple values for some ReaderFields final Map<ReaderField, Set<Integer>> rfMap = readerFieldToValIds.getMap(); final Map<Integer, Set<CacheEntry>> valMap = valIdToItems.getMap(); for (final ReaderField rf : valMismatchKeys) { final List<CacheEntry> badEntries = new ArrayList<>(valMismatchKeys.size() * 2); for(final Integer value: rfMap.get(rf)) { for (final CacheEntry cacheEntry : valMap.get(value)) { badEntries.add(cacheEntry); } } CacheEntry[] badness = new CacheEntry[badEntries.size()]; badness = badEntries.toArray(badness); insanity.add(new Insanity(InsanityType.VALUEMISMATCH, "Multiple distinct value objects for " + rf.toString(), badness)); } } return insanity; }
public Insanity(InsanityType type, String msg, CacheEntry... entries) { if (null == type) { throw new IllegalArgumentException ("Insanity requires non-null InsanityType"); } if (null == entries || 0 == entries.length) { throw new IllegalArgumentException ("Insanity requires non-null/non-empty CacheEntry[]"); } this.type = type; this.msg = msg; this.entries = entries; }
@Override public NamedList getStatistics() { NamedList stats = new SimpleOrderedMap(); CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries(); stats.add("entries_count", entries.length); for (int i = 0; i < entries.length; i++) { CacheEntry e = entries[i]; stats.add("entry#" + i, e.toString()); } Insanity[] insanity = checker.check(entries); stats.add("insanity_count", insanity.length); for (int i = 0; i < insanity.length; i++) { /** RAM estimation is both CPU and memory intensive... we don't want to do it unless asked. // we only estimate the size of insane entries for (CacheEntry e : insanity[i].getCacheEntries()) { // don't re-estimate if we've already done it. if (null == e.getEstimatedSize()) e.estimateSize(); } **/ stats.add("insanity#" + i, insanity[i].toString()); } return stats; }
/** * Internal helper method used by check that iterates over * valMismatchKeys and generates a Collection of Insanity * instances accordingly. The MapOfSets are used to populate * the Insanity objects. * @see InsanityType#VALUEMISMATCH */ private Collection<Insanity> checkValueMismatch(MapOfSets<Integer, CacheEntry> valIdToItems, MapOfSets<ReaderField, Integer> readerFieldToValIds, Set<ReaderField> valMismatchKeys) { final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3); if (! valMismatchKeys.isEmpty() ) { // we have multiple values for some ReaderFields final Map<ReaderField, Set<Integer>> rfMap = readerFieldToValIds.getMap(); final Map<Integer, Set<CacheEntry>> valMap = valIdToItems.getMap(); for (final ReaderField rf : valMismatchKeys) { final List<CacheEntry> badEntries = new ArrayList<CacheEntry>(valMismatchKeys.size() * 2); for(final Integer value: rfMap.get(rf)) { for (final CacheEntry cacheEntry : valMap.get(value)) { badEntries.add(cacheEntry); } } CacheEntry[] badness = new CacheEntry[badEntries.size()]; badness = badEntries.toArray(badness); insanity.add(new Insanity(InsanityType.VALUEMISMATCH, "Multiple distinct value objects for " + rf.toString(), badness)); } } return insanity; }
/** * Tests a CacheEntry[] for indication of "insane" cache usage. * <p> * <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored. * (:TODO: is this a bad idea? are we masking a real problem?) * </p> */ public Insanity[] check(CacheEntry... cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.length) return new Insanity[0]; // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances final MapOfSets<Integer, CacheEntry> valIdToItems = new MapOfSets<>(new HashMap<Integer, Set<CacheEntry>>(17)); // maps ReaderField keys to Sets of ValueIds final MapOfSets<ReaderField, Integer> readerFieldToValIds = new MapOfSets<>(new HashMap<ReaderField, Set<Integer>>(17)); // // any keys that we know result in more then one valId final Set<ReaderField> valMismatchKeys = new HashSet<>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.length; i++) { final CacheEntry item = cacheEntries[i]; final Object val = item.getValue(); // It's OK to have dup entries, where one is eg // float[] and the other is the Bits (from // getDocWithField()) if (val != null && "BitsEntry".equals(val.getClass().getSimpleName())) { continue; } if (val instanceof FieldCache.CreationPlaceholder) continue; final ReaderField rf = new ReaderField(item.getReaderKey(), item.getFieldName()); final Integer valId = Integer.valueOf(System.identityHashCode(val)); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.put(valId, item); if (1 < readerFieldToValIds.put(rf, valId)) { valMismatchKeys.add(rf); } } final List<Insanity> insanity = new ArrayList<>(valMismatchKeys.size() * 3); insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.addAll(checkSubreaders(valIdToItems, readerFieldToValIds)); return insanity.toArray(new Insanity[insanity.size()]); }
/** * Quick and dirty convenience method that instantiates an instance with * "good defaults" and uses it to test the CacheEntrys * @see #check */ public static Insanity[] checkSanity(CacheEntry... cacheEntries) { FieldCacheSanityChecker sanityChecker = new FieldCacheSanityChecker(); sanityChecker.setRamUsageEstimator(true); return sanityChecker.check(cacheEntries); }
/** * Tests a CacheEntry[] for indication of "insane" cache usage. * <p> * <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored. * (:TODO: is this a bad idea? are we masking a real problem?) * </p> */ public Insanity[] check(CacheEntry... cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.length) return new Insanity[0]; if (estimateRam) { for (int i = 0; i < cacheEntries.length; i++) { cacheEntries[i].estimateSize(); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances final MapOfSets<Integer, CacheEntry> valIdToItems = new MapOfSets<Integer, CacheEntry>(new HashMap<Integer, Set<CacheEntry>>(17)); // maps ReaderField keys to Sets of ValueIds final MapOfSets<ReaderField, Integer> readerFieldToValIds = new MapOfSets<ReaderField, Integer>(new HashMap<ReaderField, Set<Integer>>(17)); // // any keys that we know result in more then one valId final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.length; i++) { final CacheEntry item = cacheEntries[i]; final Object val = item.getValue(); // It's OK to have dup entries, where one is eg // float[] and the other is the Bits (from // getDocWithField()) if (val instanceof Bits) { continue; } if (val instanceof FieldCache.CreationPlaceholder) continue; final ReaderField rf = new ReaderField(item.getReaderKey(), item.getFieldName()); final Integer valId = Integer.valueOf(System.identityHashCode(val)); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.put(valId, item); if (1 < readerFieldToValIds.put(rf, valId)) { valMismatchKeys.add(rf); } } final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3); insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.addAll(checkSubreaders(valIdToItems, readerFieldToValIds)); return insanity.toArray(new Insanity[insanity.size()]); }