Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use separate BitSet cache in Doc Level Security #43669

Merged
merged 12 commits into from
Jul 3, 2019
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/

package org.elasticsearch.xpack.core.security.authz.accesscontrol;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.cache.Cache;
import org.elasticsearch.common.cache.CacheBuilder;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;

import java.io.Closeable;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;

/**
* This is a cache for {@link BitSet} instances that are used with the {@link DocumentSubsetReader}.
* It is bounded by memory size and access time.
*
* @see org.elasticsearch.index.cache.bitset.BitsetFilterCache
*/
public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListener, Closeable, Accountable {

/**
* The TTL defaults to 1 week. We depend on the {@code max_bytes} setting to keep the cache to a sensible size, by evicting LRU
* entries, however there is benefit in reclaiming memory by expiring bitsets that have not be used for some period of time.
* Because {@link org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission.Group#query} can be templated, it is
* not uncommon for a query to only be used for a relatively short period of time (e.g. because a user's metadata changed, or because
* that user is an infrequent user of Elasticsearch). This access time expiry helps free up memory in those circumstances even if the
* cache is never filled.
*/
static final Setting<TimeValue> CACHE_TTL_SETTING =
Setting.timeSetting("xpack.security.dls.bitset.cache.ttl", TimeValue.timeValueHours(24 * 7), Property.NodeScope);

static final Setting<ByteSizeValue> CACHE_SIZE_SETTING = Setting.byteSizeSetting("xpack.security.dls.bitset.cache.size",
new ByteSizeValue(50, ByteSizeUnit.MB), Property.NodeScope);

private static final BitSet NULL_MARKER = new FixedBitSet(0);

private final Logger logger;
private final Cache<BitsetCacheKey, BitSet> bitsetCache;
private final Map<IndexReader.CacheKey, Set<BitsetCacheKey>> keysByIndex;

public DocumentSubsetBitsetCache(Settings settings) {
this.logger = LogManager.getLogger(getClass());
final TimeValue ttl = CACHE_TTL_SETTING.get(settings);
final ByteSizeValue size = CACHE_SIZE_SETTING.get(settings);
this.bitsetCache = CacheBuilder.<BitsetCacheKey, BitSet>builder()
.setExpireAfterAccess(ttl)
.setMaximumWeight(size.getBytes())
.weigher((key, bitSet) -> bitSet == NULL_MARKER ? 0 : bitSet.ramBytesUsed()).build();
this.keysByIndex = new ConcurrentHashMap<>();
}

@Override
public void onClose(IndexReader.CacheKey ownerCoreCacheKey) {
final Set<BitsetCacheKey> keys = keysByIndex.remove(ownerCoreCacheKey);
if (keys != null) {
// Because this Set has been removed from the map, and the only update to the set is performed in a
// Map#compute call, it should not be possible to get a concurrent modification here.
keys.forEach(bitsetCache::invalidate);
}
}

@Override
public void close() {
clear("close");
}

public void clear(String reason) {
logger.debug("clearing all DLS bitsets because [{}]", reason);
// Due to the order here, it is possible than a new entry could be added _after_ the keysByIndex map is cleared
// but _before_ the cache is cleared. This would mean it sits orphaned in keysByIndex, but this is not a issue.
// When the index is closed, the key will be removed from the map, and there will not be a corresponding item
// in the cache, which will make the cache-invalidate a no-op.
// Since the entry is not in the cache, if #getBitSet is called, it will be loaded, and the new key will be added
// to the index without issue.
keysByIndex.clear();
bitsetCache.invalidateAll();
}

int entryCount() {
return this.bitsetCache.count();
}

@Override
public long ramBytesUsed() {
return this.bitsetCache.weight();
}

/**
* Obtain the {@link BitSet} for the given {@code query} in the given {@code context}.
* If there is a cached entry for that query and context, it will be returned.
* Otherwise a new BitSet will be created and stored in the cache.
* The returned BitSet may be null (e.g. if the query has no results).
*/
@Nullable
public BitSet getBitSet(final Query query, final LeafReaderContext context) throws ExecutionException {
final IndexReader.CacheHelper coreCacheHelper = context.reader().getCoreCacheHelper();
if (coreCacheHelper == null) {
throw new IllegalArgumentException("Reader " + context.reader() + " does not support caching");
}
coreCacheHelper.addClosedListener(this);
final IndexReader.CacheKey indexKey = coreCacheHelper.getKey();
final BitsetCacheKey cacheKey = new BitsetCacheKey(indexKey, query);

final BitSet bitSet = bitsetCache.computeIfAbsent(cacheKey, ignore1 -> {
// This ensures all insertions into the set are guarded by ConcurrentHashMap's atomicity guarantees.
keysByIndex.compute(indexKey, (ignore2, set) -> {
if (set == null) {
set = Sets.newConcurrentHashSet();
}
set.add(cacheKey);
return set;
});
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer s = weight.scorer(context);
if (s == null) {
// A cache loader is not allowed to return null, return a marker object instead.
return NULL_MARKER;
} else {
return BitSet.of(s.iterator(), context.reader().maxDoc());
}
});
if (bitSet == NULL_MARKER) {
return null;
} else {
return bitSet;
}
}

public static List<Setting<?>> getSettings() {
return List.of(CACHE_TTL_SETTING, CACHE_SIZE_SETTING);
}

public Map<String, Object> usageStats() {
final ByteSizeValue ram = new ByteSizeValue(ramBytesUsed(), ByteSizeUnit.BYTES);
return Map.of(
"count", entryCount(),
"memory", ram.toString(),
"memory_in_bytes", ram.getBytes()
);
}

private class BitsetCacheKey {
final IndexReader.CacheKey index;
final Query query;

private BitsetCacheKey(IndexReader.CacheKey index, Query query) {
this.index = index;
this.query = query;
}

@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final BitsetCacheKey that = (BitsetCacheKey) other;
return Objects.equals(this.index, that.index) &&
Objects.equals(this.query, that.query);
}

@Override
public int hashCode() {
return Objects.hash(index, query);
}

@Override
public String toString() {
return getClass().getSimpleName() + "(" + index + "," + query + ")";
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import org.elasticsearch.common.cache.Cache;
import org.elasticsearch.common.cache.CacheBuilder;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;

import java.io.IOException;
import java.io.UncheckedIOException;
Expand All @@ -34,9 +33,9 @@
*/
public final class DocumentSubsetReader extends FilterLeafReader {

public static DocumentSubsetDirectoryReader wrap(DirectoryReader in, BitsetFilterCache bitsetFilterCache,
public static DocumentSubsetDirectoryReader wrap(DirectoryReader in, DocumentSubsetBitsetCache bitsetCache,
Query roleQuery) throws IOException {
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
return new DocumentSubsetDirectoryReader(in, bitsetCache, roleQuery);
}

/**
Expand Down Expand Up @@ -110,29 +109,29 @@ private static int getNumDocs(LeafReader reader, Query roleQuery, BitSet roleQue
public static final class DocumentSubsetDirectoryReader extends FilterDirectoryReader {

private final Query roleQuery;
private final BitsetFilterCache bitsetFilterCache;
private final DocumentSubsetBitsetCache bitsetCache;

DocumentSubsetDirectoryReader(final DirectoryReader in, final BitsetFilterCache bitsetFilterCache, final Query roleQuery)
throws IOException {
DocumentSubsetDirectoryReader(final DirectoryReader in, final DocumentSubsetBitsetCache bitsetCache,
final Query roleQuery) throws IOException {
super(in, new SubReaderWrapper() {
@Override
public LeafReader wrap(LeafReader reader) {
try {
return new DocumentSubsetReader(reader, bitsetFilterCache, roleQuery);
return new DocumentSubsetReader(reader, bitsetCache, roleQuery);
} catch (Exception e) {
throw ExceptionsHelper.convertToElastic(e);
}
}
});
this.bitsetFilterCache = bitsetFilterCache;
this.bitsetCache = bitsetCache;
this.roleQuery = roleQuery;

verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(in);
}

@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
return new DocumentSubsetDirectoryReader(in, bitsetCache, roleQuery);
}

private static void verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(DirectoryReader reader) {
Expand All @@ -156,9 +155,9 @@ public CacheHelper getReaderCacheHelper() {
private final BitSet roleQueryBits;
private final int numDocs;

private DocumentSubsetReader(final LeafReader in, BitsetFilterCache bitsetFilterCache, final Query roleQuery) throws Exception {
private DocumentSubsetReader(final LeafReader in, DocumentSubsetBitsetCache bitsetCache, final Query roleQuery) throws Exception {
super(in);
this.roleQueryBits = bitsetFilterCache.getBitSetProducer(roleQuery).getBitSet(in.getContext());
this.roleQueryBits = bitsetCache.getBitSet(roleQuery, in.getContext());
this.numDocs = getNumDocs(in, roleQuery, roleQueryBits);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
Expand Down Expand Up @@ -44,17 +43,17 @@ public class SecurityIndexReaderWrapper implements CheckedFunction<DirectoryRead
private static final Logger logger = LogManager.getLogger(SecurityIndexReaderWrapper.class);

private final Function<ShardId, QueryShardContext> queryShardContextProvider;
private final BitsetFilterCache bitsetFilterCache;
private final DocumentSubsetBitsetCache bitsetCache;
private final XPackLicenseState licenseState;
private final ThreadContext threadContext;
private final ScriptService scriptService;

public SecurityIndexReaderWrapper(Function<ShardId, QueryShardContext> queryShardContextProvider,
BitsetFilterCache bitsetFilterCache, ThreadContext threadContext, XPackLicenseState licenseState,
DocumentSubsetBitsetCache bitsetCache, ThreadContext threadContext, XPackLicenseState licenseState,
ScriptService scriptService) {
this.scriptService = scriptService;
this.queryShardContextProvider = queryShardContextProvider;
this.bitsetFilterCache = bitsetFilterCache;
this.bitsetCache = bitsetCache;
this.threadContext = threadContext;
this.licenseState = licenseState;
}
Expand Down Expand Up @@ -84,7 +83,7 @@ public DirectoryReader apply(final DirectoryReader reader) {
if (documentPermissions != null && documentPermissions.hasDocumentLevelPermissions()) {
BooleanQuery filterQuery = documentPermissions.filter(getUser(), scriptService, shardId, queryShardContextProvider);
if (filterQuery != null) {
wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetFilterCache, new ConstantScoreQuery(filterQuery));
wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetCache, new ConstantScoreQuery(filterQuery));
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/

package org.elasticsearch.xpack.core.security.support;

import org.elasticsearch.common.cache.Cache;
import org.elasticsearch.common.util.concurrent.ReleasableLock;

import java.util.Iterator;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Predicate;

/**
* A utility class to facilitate iterating over (and modifying) a {@link org.elasticsearch.common.cache.Cache}.
* The semantics of the cache are such that when iterating (with the potential to call {@link Iterator#remove()}), we must prevent any
* other modifications.
* This class provides the necessary methods to support this constraint in a clear manner.
*/
public class CacheIteratorHelper<K, V> {
private final Cache<K, V> cache;
private final ReleasableLock updateLock;
private final ReleasableLock iteratorLock;

public CacheIteratorHelper(Cache<K, V> cache) {
this.cache = cache;
final ReadWriteLock lock = new ReentrantReadWriteLock();
// the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using the
// iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache
// the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values
// of the cache the write lock must obtained to prevent any modifications.
updateLock = new ReleasableLock(lock.readLock());
iteratorLock = new ReleasableLock(lock.writeLock());
}

public ReleasableLock acquireUpdateLock() {
return updateLock.acquire();
}

private ReleasableLock acquireForIterator() {
return iteratorLock.acquire();
}

public void removeKeysIf(Predicate<K> removeIf) {
// the cache cannot be modified while doing this operation per the terms of the cache iterator
try (ReleasableLock ignored = this.acquireForIterator()) {
Iterator<K> iterator = cache.keys().iterator();
while (iterator.hasNext()) {
K key = iterator.next();
if (removeIf.test(key)) {
iterator.remove();
}
}
}
}
}
Loading