From 61b1f4ad0b88323a238c3f6a1d6d539c4479675a Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 27 Apr 2016 09:17:10 +0200 Subject: [PATCH] Fix xcontent rendering of ip terms aggs. #18003 Currently terms on an ip address try to put their binary representation in the json response. With this commit, they would return a formatted ip address: ``` "buckets": [ { "key": "192.168.1.7", "doc_count": 1 } ] ``` --- ...balOrdinalsSignificantTermsAggregator.java | 6 +- .../significant/InternalSignificantTerms.java | 19 ++- .../significant/SignificantLongTerms.java | 13 +-- .../significant/SignificantStringTerms.java | 42 ++++--- .../SignificantStringTermsAggregator.java | 6 +- .../significant/UnmappedSignificantTerms.java | 5 +- .../bucket/terms/StringTerms.java | 2 +- .../SignificanceHeuristicTests.java | 10 +- .../test/search.aggregation/20_terms.yaml | 110 ++++++++++++++++++ .../30_sig_terms.yaml} | 41 +++++++ 10 files changed, 208 insertions(+), 46 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml rename rest-api-spec/src/main/resources/rest-api-spec/test/{search/test_sig_terms.yaml => search.aggregation/30_sig_terms.yaml} (66%) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 2372886672e4f..e161ecb374ca6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -110,7 +110,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws } if (spare == null) { - spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null); + spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format); } spare.bucketOrd = bucketOrd; copy(globalOrds.lookupOrd(globalTermOrd), spare.termBytes); @@ -135,7 +135,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + return new SignificantStringTerms(subsetSize, supersetSize, name, format, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), significanceHeuristic, Arrays.asList(list), pipelineAggregators(), metaData()); } @@ -146,7 +146,7 @@ public SignificantStringTerms buildEmptyAggregation() { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + return new SignificantStringTerms(0, supersetSize, name, format, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), significanceHeuristic, Collections. emptyList(), pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 21b92e83fff69..d42aa73188714 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -33,6 +34,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; /** * @@ -56,15 +58,19 @@ public static abstract class Bucket extends SignificantTerms.Bucket { long bucketOrd; protected InternalAggregations aggregations; double score; + transient final DocValueFormat format; - protected Bucket(long subsetSize, long supersetSize) { + protected Bucket(long subsetSize, long supersetSize, DocValueFormat format) { // for serialization super(subsetSize, supersetSize); + this.format = format; } - protected Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) { + protected Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, + InternalAggregations aggregations, DocValueFormat format) { super(subsetDf, subsetSize, supersetDf, supersetSize); this.aggregations = aggregations; + this.format = format; } @Override @@ -122,9 +128,11 @@ public double getSignificanceScore() { } } - protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, - SignificanceHeuristic significanceHeuristic, List buckets, List pipelineAggregators, - Map metaData) { + protected DocValueFormat format; + + protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, DocValueFormat format, int requiredSize, + long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, + List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.requiredSize = requiredSize; this.minDocCount = minDocCount; @@ -132,6 +140,7 @@ protected InternalSignificantTerms(long subsetSize, long supersetSize, String na this.subsetSize = subsetSize; this.supersetSize = supersetSize; this.significanceHeuristic = significanceHeuristic; + this.format = Objects.requireNonNull(format); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index fba66e7ba0caf..f2172082ae4c9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -81,18 +81,15 @@ public static void registerStreams() { static class Bucket extends InternalSignificantTerms.Bucket { long term; - private transient final DocValueFormat format; - public Bucket(long subsetSize, long supersetSize, DocValueFormat formatter) { - super(subsetSize, supersetSize); - this.format = formatter; + public Bucket(long subsetSize, long supersetSize, DocValueFormat format) { + super(subsetSize, supersetSize, format); // for serialization } public Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, long term, InternalAggregations aggregations, DocValueFormat format) { - super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations); - this.format = format; + super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format); this.term = term; } @@ -160,7 +157,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } } - private DocValueFormat format; SignificantLongTerms() { } // for serialization @@ -169,8 +165,7 @@ public SignificantLongTerms(long subsetSize, long supersetSize, String name, Doc long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, List pipelineAggregators, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData); - this.format = Objects.requireNonNull(format); + super(subsetSize, supersetSize, name, format, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index 5d076190bc467..b951c3517022f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -55,7 +56,8 @@ public SignificantStringTerms readResult(StreamInput in) throws IOException { private final static BucketStreams.Stream BUCKET_STREAM = new BucketStreams.Stream() { @Override public Bucket readResult(StreamInput in, BucketStreamContext context) throws IOException { - Bucket buckets = new Bucket((long) context.attributes().get("subsetSize"), (long) context.attributes().get("supersetSize")); + Bucket buckets = new Bucket((long) context.attributes().get("subsetSize"), + (long) context.attributes().get("supersetSize"), context.format()); buckets.readFrom(in); return buckets; } @@ -84,18 +86,20 @@ public static class Bucket extends InternalSignificantTerms.Bucket { BytesRef termBytes; - public Bucket(long subsetSize, long supersetSize) { + public Bucket(long subsetSize, long supersetSize, DocValueFormat format) { // for serialization - super(subsetSize, supersetSize); + super(subsetSize, supersetSize, format); } - public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) { - super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations); + public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations, + DocValueFormat format) { + super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format); this.termBytes = term; } - public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations, double score) { - this(term, subsetDf, subsetSize, supersetDf, supersetSize, aggregations); + public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, + InternalAggregations aggregations, double score, DocValueFormat format) { + this(term, subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format); this.score = score; } @@ -112,7 +116,7 @@ int compareTerm(SignificantTerms.Bucket other) { @Override public String getKeyAsString() { - return termBytes.utf8ToString(); + return format.format(termBytes); } @Override @@ -122,7 +126,7 @@ public String getKey() { @Override Bucket newBucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) { - return new Bucket(termBytes, subsetDf, subsetSize, supersetDf, supersetSize, aggregations); + return new Bucket(termBytes, subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format); } @Override @@ -146,7 +150,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.utf8Field(CommonFields.KEY, termBytes); + builder.field(CommonFields.KEY, getKeyAsString()); builder.field(CommonFields.DOC_COUNT, getDocCount()); builder.field("score", score); builder.field("bg_count", supersetDf); @@ -158,11 +162,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws SignificantStringTerms() {} // for serialization - public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, - SignificanceHeuristic significanceHeuristic, List buckets, + public SignificantStringTerms(long subsetSize, long supersetSize, String name, DocValueFormat format, int requiredSize, + long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, List pipelineAggregators, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData); + super(subsetSize, supersetSize, name, format, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData); } @Override @@ -172,25 +176,26 @@ public Type type() { @Override public SignificantStringTerms create(List buckets) { - return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.requiredSize, this.minDocCount, + return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.format, this.requiredSize, this.minDocCount, this.significanceHeuristic, buckets, this.pipelineAggregators(), this.metaData); } @Override public Bucket createBucket(InternalAggregations aggregations, SignificantStringTerms.Bucket prototype) { return new Bucket(prototype.termBytes, prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize, - aggregations); + aggregations, prototype.format); } @Override protected SignificantStringTerms create(long subsetSize, long supersetSize, List buckets, InternalSignificantTerms prototype) { - return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.requiredSize, prototype.minDocCount, - prototype.significanceHeuristic, buckets, prototype.pipelineAggregators(), prototype.getMetaData()); + return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.format, prototype.requiredSize, + prototype.minDocCount, prototype.significanceHeuristic, buckets, prototype.pipelineAggregators(), prototype.getMetaData()); } @Override protected void doReadFrom(StreamInput in) throws IOException { + this.format = in.readNamedWriteable(DocValueFormat.class); this.requiredSize = readSize(in); this.minDocCount = in.readVLong(); this.subsetSize = in.readVLong(); @@ -199,7 +204,7 @@ protected void doReadFrom(StreamInput in) throws IOException { int size = in.readVInt(); List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { - Bucket bucket = new Bucket(subsetSize, supersetSize); + Bucket bucket = new Bucket(subsetSize, supersetSize, format); bucket.readFrom(in); buckets.add(bucket); } @@ -209,6 +214,7 @@ protected void doReadFrom(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(format); writeSize(requiredSize, out); out.writeVLong(minDocCount); out.writeVLong(subsetSize); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java index c1d4e2225ea84..41d136d01dc55 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java @@ -90,7 +90,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws } if (spare == null) { - spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null); + spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format); } bucketOrds.get(i, spare.termBytes); @@ -117,7 +117,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + return new SignificantStringTerms(subsetSize, supersetSize, name, format, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), significanceHeuristic, Arrays.asList(list), pipelineAggregators(), metaData()); } @@ -128,7 +128,7 @@ public SignificantStringTerms buildEmptyAggregation() { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + return new SignificantStringTerms(0, supersetSize, name, format, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), significanceHeuristic, Collections. emptyList(), pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index 8858050efaf44..5369e2690584c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -59,8 +60,8 @@ public static void registerStreams() { public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData) { //We pass zero for index/subset sizes because for the purpose of significant term analysis // we assume an unmapped index's size is irrelevant to the proceedings. - super(0, 0, name, requiredSize, minDocCount, SignificantTermsAggregatorBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC, BUCKETS, - pipelineAggregators, metaData); + super(0, 0, name, DocValueFormat.RAW, requiredSize, minDocCount, SignificantTermsAggregatorBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC, + BUCKETS, pipelineAggregators, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 9c7c85ef85594..ddaaebe899922 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -141,7 +141,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.utf8Field(CommonFields.KEY, termBytes); + builder.field(CommonFields.KEY, getKeyAsString()); builder.field(CommonFields.DOC_COUNT, getDocCount()); if (showDocCountError) { builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index 75970a79d1d20..df449aeeaf28c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -131,9 +131,9 @@ InternalSignificantTerms[] getRandomSignificantTerms(SignificanceHeuristic heuri sTerms[1] = new SignificantLongTerms(); } else { BytesRef term = new BytesRef("someterm"); - buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY)); - sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, Collections.emptyList(), - null); + buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY, DocValueFormat.RAW)); + sTerms[0] = new SignificantStringTerms(10, 20, "some_name", DocValueFormat.RAW, 1, 1, heuristic, buckets, + Collections.emptyList(), null); sTerms[1] = new SignificantStringTerms(); } return sTerms; @@ -184,7 +184,7 @@ private List createInternalAggregations() { private InternalSignificantTerms createAggregation(String type, SignificanceHeuristic significanceHeuristic, List buckets, long subsetSize, long supersetSize) { if (type.equals("string")) { - return new SignificantStringTerms(subsetSize, supersetSize, "sig_terms", 2, -1, significanceHeuristic, buckets, new ArrayList(), new HashMap()); + return new SignificantStringTerms(subsetSize, supersetSize, "sig_terms", DocValueFormat.RAW, 2, -1, significanceHeuristic, buckets, new ArrayList(), new HashMap()); } else { return new SignificantLongTerms(subsetSize, supersetSize, "sig_terms", DocValueFormat.RAW, 2, -1, significanceHeuristic, buckets, new ArrayList(), new HashMap()); } @@ -192,7 +192,7 @@ private InternalSignificantTerms createAggregation(String type, SignificanceHeur private InternalSignificantTerms.Bucket createBucket(String type, long subsetDF, long subsetSize, long supersetDF, long supersetSize, long label) { if (type.equals("string")) { - return new SignificantStringTerms.Bucket(new BytesRef(Long.toString(label).getBytes(StandardCharsets.UTF_8)), subsetDF, subsetSize, supersetDF, supersetSize, InternalAggregations.EMPTY); + return new SignificantStringTerms.Bucket(new BytesRef(Long.toString(label).getBytes(StandardCharsets.UTF_8)), subsetDF, subsetSize, supersetDF, supersetSize, InternalAggregations.EMPTY, DocValueFormat.RAW); } else { return new SignificantLongTerms.Bucket(subsetDF, subsetSize, supersetDF, supersetSize, label, InternalAggregations.EMPTY, DocValueFormat.RAW); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml new file mode 100644 index 0000000000000..cf4771eade387 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -0,0 +1,110 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + test: + properties: + str: + type: keyword + ip: + type: ip + + - do: + cluster.health: + wait_for_status: green + +--- +"Basic test": + - do: + index: + index: test_1 + type: test + id: 1 + body: { "str" : "abc" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.str_terms.buckets: 2 } + + - match: { aggregations.str_terms.buckets.0.key: "abc" } + + - is_false: aggregations.str_terms.buckets.0.key_as_string + + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } + + - match: { aggregations.str_terms.buckets.1.key: "bcd" } + + - is_false: aggregations.str_terms.buckets.1.key_as_string + + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + +--- +"IP test": + - do: + index: + index: test_1 + type: test + id: 1 + body: { "ip": "::1" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "ip": "127.0.0.1" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "ip": "::1" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "aggs" : { "ip_terms" : { "terms" : { "field" : "ip" } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_terms.buckets: 2 } + + - match: { aggregations.ip_terms.buckets.0.key: "::1" } + + - is_false: aggregations.ip_terms.buckets.0.key_as_string + + - match: { aggregations.ip_terms.buckets.0.doc_count: 2 } + + - match: { aggregations.ip_terms.buckets.1.key: "127.0.0.1" } + + - is_false: aggregations.ip_terms.buckets.1.key_as_string + + - match: { aggregations.ip_terms.buckets.1.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/test_sig_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml similarity index 66% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search/test_sig_terms.yaml rename to rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml index 9c01bbc2b5dad..45c042baea435 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/test_sig_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml @@ -80,3 +80,44 @@ - match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"} - match: {aggregations.class.buckets.1.sig_terms.buckets.0.key: "good"} +--- +"IP test": + - do: + indices.create: + index: ip_index + body: + mappings: + doc: + properties: + ip: + type: ip + + - do: + index: + index: ip_index + type: doc + id: 1 + body: { ip: "::1" } + - do: + index: + index: ip_index + type: doc + id: 2 + body: { } + + - do: + indices.refresh: {} + + - do: + search: + body: { "query" : { "exists" : { "field" : "ip" } }, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "min_doc_count" : 1 } } } } + + - match: { hits.total: 1 } + + - length: { aggregations.ip_terms.buckets: 1 } + + - match: { aggregations.ip_terms.buckets.0.key: "::1" } + + - is_false: aggregations.ip_terms.buckets.0.key_as_string + + - match: { aggregations.ip_terms.buckets.0.doc_count: 1 }