public static AtomicGeoPointFieldData empty(final int maxDoc) { return new AbstractAtomicGeoPointFieldData() { @Override public long ramBytesUsed() { return 0; } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } @Override public void close() { } @Override public MultiGeoPointValues getGeoPointValues() { return FieldData.emptyMultiGeoPoints(maxDoc); } }; }
public static AtomicOrdinalsFieldData empty() { return new AbstractAtomicOrdinalsFieldData(DEFAULT_SCRIPT_FUNCTION) { @Override public long ramBytesUsed() { return 0; } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } @Override public void close() { } @Override public RandomAccessOrds getOrdinalsValues() { return DocValues.emptySortedSet(); } }; }
/** Sets the key to the value for the provided reader; * if the key is already set then this doesn't change it. */ public void put(AtomicReader reader, CacheKey key, Accountable value) { final Object readerKey = reader.getCoreCacheKey(); synchronized (readerCache) { Map<CacheKey,Accountable> innerCache = readerCache.get(readerKey); if (innerCache == null) { // First time this reader is using FieldCache innerCache = new HashMap<>(); readerCache.put(readerKey, innerCache); wrapper.initReader(reader); } if (innerCache.get(key) == null) { innerCache.put(key, value); } else { // Another thread beat us to it; leave the current // value } } }
Accountable readRamTree(StreamInput in) throws IOException { final String name = in.readString(); final long bytes = in.readVLong(); int numChildren = in.readVInt(); if (numChildren == 0) { return Accountables.namedAccountable(name, bytes); } List<Accountable> children = new ArrayList(numChildren); while (numChildren-- > 0) { children.add(readRamTree(in)); } return Accountables.namedAccountable(name, children, bytes); }
void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); out.writeVLong(tree.ramBytesUsed()); Collection<Accountable> children = tree.getChildResources(); out.writeVInt(children.size()); for (Accountable child : children) { writeRamTree(out, child); } }
@Override public AtomicParentChildFieldData load(LeafReaderContext context) { final LeafReader reader = context.reader(); return new AbstractAtomicParentChildFieldData() { public Set<String> types() { return parentTypes; } @Override public SortedDocValues getOrdinalsValues(String type) { try { return DocValues.getSorted(reader, ParentFieldMapper.joinField(type)); } catch (IOException e) { throw new IllegalStateException("cannot load join doc values field for type [" + type + "]", e); } } @Override public long ramBytesUsed() { // unknown return 0; } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } @Override public void close() throws ElasticsearchException { } }; }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); resources.add(Accountables.namedAccountable("ordinals", ordinals)); resources.add(Accountables.namedAccountable("term bytes", bytes)); resources.add(Accountables.namedAccountable("term offsets", termOrdToBytesOffset)); return Collections.unmodifiableList(resources); }
public static AtomicNumericFieldData empty(final int maxDoc) { return new AtomicDoubleFieldData(0) { @Override public SortedNumericDoubleValues getDoubleValues() { return FieldData.emptySortedNumericDoubles(maxDoc); } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } }; }
public static AtomicParentChildFieldData empty() { return new AbstractAtomicParentChildFieldData() { @Override public long ramBytesUsed() { return 0; } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } @Override public void close() { } @Override public SortedDocValues getOrdinalsValues(String type) { return DocValues.emptySorted(); } @Override public Set<String> types() { return emptySet(); } }; }
static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); Collection<Accountable> children = tree.getChildResources(); if (children.isEmpty() == false) { builder.startArray(Fields.CHILDREN); for (Accountable child : children) { toXContent(builder, child); } builder.endArray(); } builder.endObject(); }
@Override public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { if (shardId != null) { final IndexShard shard = indexService.getShardOrNull(shardId.id()); if (shard != null) { shard.fieldData().onCache(shardId, fieldName, ramUsage); } } }
public void onRemoval(Accountable key, Accountable value, boolean evicted) { if (evicted) { evictionsMetric.inc(); } long dec = 0; if (key != null) { dec += key.ramBytesUsed(); } if (value != null) { dec += value.ramBytesUsed(); } totalMetric.dec(dec); }
public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception { fillExtendedMvSet(); refreshReader(); IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues()); IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader); assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals)); // 3 b/c 1 segment level caches and 1 top level cache // in case of doc values, we don't cache atomic FD, so only the top-level cache is there assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L)); IndexOrdinalsFieldData cachedInstance = null; for (Accountable ramUsage : indicesFieldDataCache.getCache().values()) { if (ramUsage instanceof IndexOrdinalsFieldData) { cachedInstance = (IndexOrdinalsFieldData) ramUsage; break; } } assertThat(cachedInstance, sameInstance(globalOrdinals)); topLevelReader.close(); // Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L)); refreshReader(); assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals))); ifdService.clear(); assertThat(indicesFieldDataCache.getCache().weight(), equalTo(0L)); }
public CacheEntry(Object readerKey, String fieldName, Class<?> cacheType, Object custom, Accountable value) { this.readerKey = readerKey; this.fieldName = fieldName; this.cacheType = cacheType; this.custom = custom; this.value = value; }
@Inject public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener, ThreadPool threadPool) { super(settings); this.threadPool = threadPool; this.indicesFieldDataCacheListener = indicesFieldDataCacheListener; final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1"); final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes(); CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.newBuilder() .removalListener(this); if (sizeInBytes > 0) { cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); } // defaults to 4, but this is a busy map for all indices, increase it a bit by default final int concurrencyLevel = settings.getAsInt(FIELDDATA_CACHE_CONCURRENCY_LEVEL, 16); if (concurrencyLevel <= 0) { throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } cacheBuilder.concurrencyLevel(concurrencyLevel); logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes)); cache = cacheBuilder.build(); this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1)); // Start thread that will manage cleaning the field data cache periodically threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval)); }
@Override public void onRemoval(RemovalNotification<Key, Accountable> notification) { Key key = notification.getKey(); assert key != null && key.listeners != null; IndexFieldCache indexCache = key.indexCache; final Accountable value = notification.getValue(); for (IndexFieldDataCache.Listener listener : key.listeners) { try { listener.onRemoval(key.shardId, indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), value.ramBytesUsed()); } catch (Throwable e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on field data cache unloading", e); } } }
IndexFieldCache(ESLogger logger,final Cache<Key, Accountable> cache, Index index, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Listener... listeners) { this.logger = logger; this.listeners = listeners; this.index = index; this.fieldNames = fieldNames; this.fieldDataType = fieldDataType; this.cache = cache; }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); if (lookupFactory != null) { resources.add(Accountables.namedAccountable("lookup", lookupFactory)); } resources.add(Accountables.namedAccountable("delegate", delegateProducer)); return Collections.unmodifiableList(resources); }
@Override public Collection<Accountable> getChildResources() { if (fst != null) { return Collections.singleton(Accountables.namedAccountable("fst", fst)); } else { return Collections.emptyList(); } }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); resources.add(Accountables.namedAccountable("offsets", endOffsets)); resources.add(Accountables.namedAccountable("ordinals", ords)); return Collections.unmodifiableCollection(resources); }
@Override public void onCache(ShardId shardId, Accountable accountable) { if (shardId != null) { final IndexShard shard = indexService.shard(shardId.id()); if (shard != null) { long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; shard.shardBitsetFilterCache().onCached(ramBytesUsed); } } }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); resources.add(Accountables.namedAccountable("indexedPoints", indexedPoint)); if (set != null) { resources.add(Accountables.namedAccountable("missing bitset", set)); } return Collections.unmodifiableList(resources); }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); resources.add(Accountables.namedAccountable("latitude", lat)); resources.add(Accountables.namedAccountable("longitude", lon)); return Collections.unmodifiableList(resources); }
@Override public Collection<Accountable> getChildResources() { List<Accountable> resources = new ArrayList<>(); resources.add(Accountables.namedAccountable("latitude", lat)); resources.add(Accountables.namedAccountable("longitude", lon)); if (set != null) { resources.add(Accountables.namedAccountable("missing bitset", set)); } return Collections.unmodifiableList(resources); }
@Override public Collection<Accountable> getChildResources() { return Collections.singleton(Accountables.namedAccountable("delegate", in)); }
@Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); }
@Override public void onCache(ShardId shardId, String fieldName, Accountable fieldData) { }
@Override public void onCache(ShardId shardId, Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) { }
public Cache<Key, Accountable> getCache() { return cache; }
@Override public Collection<Accountable> getChildResources() { return in.getChildResources(); }
@Override public long applyAsLong(Key key, Accountable ramUsage) { int weight = (int) Math.min(ramUsage.ramBytesUsed(), Integer.MAX_VALUE); return weight == 0 ? 1 : weight; }