Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix format problem in composite of unmapped (backport of #50869) #50875

Merged
merged 1 commit into from
Jan 10, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,7 @@ setup:
- match: { aggregations.test.buckets.3.key.geo: "12/2048/0" }
- match: { aggregations.test.buckets.3.key.kw: "bar" }
- match: { aggregations.test.buckets.3.doc_count: 1 }

---
"Simple Composite aggregation with geotile grid add aggregate after":
- skip:
Expand Down Expand Up @@ -770,3 +771,49 @@ setup:
- match: { aggregations.test.buckets.2.key.geo: "12/2048/0" }
- match: { aggregations.test.buckets.2.key.kw: "bar" }
- match: { aggregations.test.buckets.2.doc_count: 1 }

---
"Mixed ip and unmapped fields":
- skip:
version: " - 7.99.99"
reason: This will fail against 7.x until the fix is backported there
# It is important that the index *without* the ip field be sorted *before*
# the index *with* the ip field because that has caused bugs in the past.
- do:
indices.create:
index: test_1
- do:
indices.create:
index: test_2
body:
mappings:
properties:
f:
type: ip
- do:
index:
index: test_2
id: 1
body: { "f": "192.168.0.1" }
refresh: true

- do:
search:
index: test_*
body:
aggregations:
test:
composite:
sources: [
"f": {
"terms": {
"field": "f"
}
}
]

- match: { hits.total.value: 1 }
- match: { hits.total.relation: eq }
- length: { aggregations.test.buckets: 1 }
- match: { aggregations.test.buckets.0.key.f: "192.168.0.1" }
- match: { aggregations.test.buckets.0.doc_count: 1 }
10 changes: 10 additions & 0 deletions server/src/main/java/org/elasticsearch/search/DocValueFormat.java
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,11 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) {
public BytesRef parseBytesRef(String value) {
return new BytesRef(value);
}

@Override
public String toString() {
return "raw";
}
};

DocValueFormat BINARY = new DocValueFormat() {
Expand Down Expand Up @@ -346,6 +351,11 @@ public String format(BytesRef value) {
public BytesRef parseBytesRef(String value) {
return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(value)));
}

@Override
public String toString() {
return "ip";
}
};

final class Decimal implements DocValueFormat {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,13 @@ public List<InternalBucket> getBuckets() {
return buckets;
}

/**
* The formats used when writing the keys. Package private for testing.
*/
List<DocValueFormat> getFormats() {
return formats;
}

@Override
public Map<String, Object> afterKey() {
if (afterKey != null) {
Expand Down Expand Up @@ -204,8 +211,17 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce
reduceContext.consumeBucketsAndMaybeBreak(1);
result.add(reduceBucket);
}
final CompositeKey lastKey = result.size() > 0 ? result.get(result.size()-1).getRawKey() : null;
return new InternalComposite(name, size, sourceNames, formats, result, lastKey, reverseMuls,

List<DocValueFormat> reducedFormats = formats;
CompositeKey lastKey = null;
if (result.size() > 0) {
lastBucket = result.get(result.size() - 1);
/* Attach the formats from the last bucket to the reduced composite
* so that we can properly format the after key. */
reducedFormats = lastBucket.formats;
lastKey = lastBucket.getRawKey();
}
return new InternalComposite(name, size, sourceNames, reducedFormats, result, lastKey, reverseMuls,
earlyTerminated, pipelineAggregators(), metaData);
}

Expand All @@ -219,7 +235,12 @@ protected InternalBucket reduceBucket(List<InternalBucket> buckets, ReduceContex
aggregations.add(bucket.aggregations);
}
InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return new InternalBucket(sourceNames, formats, buckets.get(0).key, reverseMuls, docCount, aggs);
/* Use the formats from the bucket because they'll be right to format
* the key. The formats on the InternalComposite doing the reducing are
* just whatever formats make sense for *its* index. This can be real
* trouble when the index doing the reducing is unmapped. */
List<DocValueFormat> reducedFormats = buckets.get(0).formats;
return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, docCount, aggs);
}

@Override
Expand Down Expand Up @@ -349,6 +370,13 @@ public Aggregations getAggregations() {
return aggregations;
}

/**
* The formats used when writing the keys. Package private for testing.
*/
List<DocValueFormat> getFormats() {
return formats;
}

@Override
public int compareKey(InternalBucket other) {
for (int i = 0; i < key.size(); i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package org.elasticsearch.search.aggregations.bucket.composite;

import com.google.common.collect.Lists;

import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.time.DateFormatter;
Expand All @@ -36,6 +37,7 @@
import java.io.IOException;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
Expand All @@ -47,6 +49,9 @@
import java.util.stream.IntStream;

import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static java.util.stream.Collectors.toList;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
Expand Down Expand Up @@ -238,8 +243,7 @@ public void testReduceSame() throws IOException {
for (int i = 0; i < numSame; i++) {
toReduce.add(result);
}
InternalComposite finalReduce = (InternalComposite) result.reduce(toReduce,
new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, true));
InternalComposite finalReduce = (InternalComposite) result.reduce(toReduce, reduceContext());
assertThat(finalReduce.getBuckets().size(), equalTo(result.getBuckets().size()));
Iterator<InternalComposite.InternalBucket> expectedIt = result.getBuckets().iterator();
for (InternalComposite.InternalBucket bucket : finalReduce.getBuckets()) {
Expand All @@ -249,6 +253,30 @@ public void testReduceSame() throws IOException {
}
}

/**
* Check that reducing with an unmapped index produces useful formats.
*/
public void testReduceUnmapped() throws IOException {
InternalComposite mapped = createTestInstance(randomAlphaOfLength(10), emptyList(), emptyMap(), InternalAggregations.EMPTY);
List<DocValueFormat> rawFormats = formats.stream().map(f -> DocValueFormat.RAW).collect(toList());
InternalComposite unmapped = new InternalComposite(mapped.getName(), mapped.getSize(), sourceNames,
rawFormats, emptyList(), null, reverseMuls, true, emptyList(), emptyMap());
List<InternalAggregation> toReduce = Arrays.asList(unmapped, mapped);
Collections.shuffle(toReduce, random());
InternalComposite finalReduce = (InternalComposite) unmapped.reduce(toReduce, reduceContext());
assertThat(finalReduce.getBuckets().size(), equalTo(mapped.getBuckets().size()));
if (false == mapped.getBuckets().isEmpty()) {
assertThat(finalReduce.getFormats(), equalTo(mapped.getFormats()));
}
Iterator<InternalComposite.InternalBucket> expectedIt = mapped.getBuckets().iterator();
for (InternalComposite.InternalBucket bucket : finalReduce.getBuckets()) {
InternalComposite.InternalBucket expectedBucket = expectedIt.next();
assertThat(bucket.getRawKey(), equalTo(expectedBucket.getRawKey()));
assertThat(bucket.getDocCount(), equalTo(expectedBucket.getDocCount()));
assertThat(bucket.getFormats(), equalTo(expectedBucket.getFormats()));
}
}

public void testCompareCompositeKeyBiggerFieldName() {
InternalComposite.ArrayMap key1 = createMap(
Lists.newArrayList("field1", "field2"),
Expand Down Expand Up @@ -381,4 +409,8 @@ private InternalComposite.ArrayMap createMap(List<String> fields, Comparable[] v
values
);
}

private InternalAggregation.ReduceContext reduceContext() {
return new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, true);
}
}