Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix xcontent rendering of ip terms aggs. #18003

Merged
merged 1 commit into from
May 13, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws
}

if (spare == null) {
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format);
}
spare.bucketOrd = bucketOrd;
copy(globalOrds.lookupOrd(globalTermOrd), spare.termBytes);
Expand All @@ -135,7 +135,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws
list[i] = bucket;
}

return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(),
return new SignificantStringTerms(subsetSize, supersetSize, name, format, bucketCountThresholds.getRequiredSize(),
bucketCountThresholds.getMinDocCount(), significanceHeuristic, Arrays.asList(list), pipelineAggregators(),
metaData());
}
Expand All @@ -146,7 +146,7 @@ public SignificantStringTerms buildEmptyAggregation() {
ContextIndexSearcher searcher = context.searchContext().searcher();
IndexReader topReader = searcher.getIndexReader();
int supersetSize = topReader.numDocs();
return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(),
return new SignificantStringTerms(0, supersetSize, name, format, bucketCountThresholds.getRequiredSize(),
bucketCountThresholds.getMinDocCount(), significanceHeuristic,
Collections.<InternalSignificantTerms.Bucket> emptyList(), pipelineAggregators(), metaData());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
Expand All @@ -33,6 +34,7 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;

/**
*
Expand All @@ -56,15 +58,19 @@ public static abstract class Bucket extends SignificantTerms.Bucket {
long bucketOrd;
protected InternalAggregations aggregations;
double score;
transient final DocValueFormat format;

protected Bucket(long subsetSize, long supersetSize) {
protected Bucket(long subsetSize, long supersetSize, DocValueFormat format) {
// for serialization
super(subsetSize, supersetSize);
this.format = format;
}

protected Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) {
protected Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize,
InternalAggregations aggregations, DocValueFormat format) {
super(subsetDf, subsetSize, supersetDf, supersetSize);
this.aggregations = aggregations;
this.format = format;
}

@Override
Expand Down Expand Up @@ -122,16 +128,19 @@ public double getSignificanceScore() {
}
}

protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount,
SignificanceHeuristic significanceHeuristic, List<? extends Bucket> buckets, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) {
protected DocValueFormat format;

protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, DocValueFormat format, int requiredSize,
long minDocCount, SignificanceHeuristic significanceHeuristic, List<? extends Bucket> buckets,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData);
this.requiredSize = requiredSize;
this.minDocCount = minDocCount;
this.buckets = buckets;
this.subsetSize = subsetSize;
this.supersetSize = supersetSize;
this.significanceHeuristic = significanceHeuristic;
this.format = Objects.requireNonNull(format);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,18 +81,15 @@ public static void registerStreams() {
static class Bucket extends InternalSignificantTerms.Bucket {

long term;
private transient final DocValueFormat format;

public Bucket(long subsetSize, long supersetSize, DocValueFormat formatter) {
super(subsetSize, supersetSize);
this.format = formatter;
public Bucket(long subsetSize, long supersetSize, DocValueFormat format) {
super(subsetSize, supersetSize, format);
// for serialization
}

public Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, long term, InternalAggregations aggregations,
DocValueFormat format) {
super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations);
this.format = format;
super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
this.term = term;
}

Expand Down Expand Up @@ -160,7 +157,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}
}
private DocValueFormat format;

SignificantLongTerms() {
} // for serialization
Expand All @@ -169,8 +165,7 @@ public SignificantLongTerms(long subsetSize, long supersetSize, String name, Doc
long minDocCount, SignificanceHeuristic significanceHeuristic, List<? extends InternalSignificantTerms.Bucket> buckets,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {

super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData);
this.format = Objects.requireNonNull(format);
super(subsetSize, supersetSize, name, format, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationStreams;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
Expand Down Expand Up @@ -55,7 +56,8 @@ public SignificantStringTerms readResult(StreamInput in) throws IOException {
private final static BucketStreams.Stream<Bucket> BUCKET_STREAM = new BucketStreams.Stream<Bucket>() {
@Override
public Bucket readResult(StreamInput in, BucketStreamContext context) throws IOException {
Bucket buckets = new Bucket((long) context.attributes().get("subsetSize"), (long) context.attributes().get("supersetSize"));
Bucket buckets = new Bucket((long) context.attributes().get("subsetSize"),
(long) context.attributes().get("supersetSize"), context.format());
buckets.readFrom(in);
return buckets;
}
Expand Down Expand Up @@ -84,18 +86,20 @@ public static class Bucket extends InternalSignificantTerms.Bucket {

BytesRef termBytes;

public Bucket(long subsetSize, long supersetSize) {
public Bucket(long subsetSize, long supersetSize, DocValueFormat format) {
// for serialization
super(subsetSize, supersetSize);
super(subsetSize, supersetSize, format);
}

public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) {
super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations);
public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations,
DocValueFormat format) {
super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
this.termBytes = term;
}

public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations, double score) {
this(term, subsetDf, subsetSize, supersetDf, supersetSize, aggregations);
public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize,
InternalAggregations aggregations, double score, DocValueFormat format) {
this(term, subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
this.score = score;
}

Expand All @@ -112,7 +116,7 @@ int compareTerm(SignificantTerms.Bucket other) {

@Override
public String getKeyAsString() {
return termBytes.utf8ToString();
return format.format(termBytes);
}

@Override
Expand All @@ -122,7 +126,7 @@ public String getKey() {

@Override
Bucket newBucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations) {
return new Bucket(termBytes, subsetDf, subsetSize, supersetDf, supersetSize, aggregations);
return new Bucket(termBytes, subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
}

@Override
Expand All @@ -146,7 +150,7 @@ public void writeTo(StreamOutput out) throws IOException {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.utf8Field(CommonFields.KEY, termBytes);
builder.field(CommonFields.KEY, getKeyAsString());
builder.field(CommonFields.DOC_COUNT, getDocCount());
builder.field("score", score);
builder.field("bg_count", supersetDf);
Expand All @@ -158,11 +162,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws

SignificantStringTerms() {} // for serialization

public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount,
SignificanceHeuristic significanceHeuristic, List<? extends InternalSignificantTerms.Bucket> buckets,
public SignificantStringTerms(long subsetSize, long supersetSize, String name, DocValueFormat format, int requiredSize,
long minDocCount, SignificanceHeuristic significanceHeuristic, List<? extends InternalSignificantTerms.Bucket> buckets,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) {
super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData);
super(subsetSize, supersetSize, name, format, requiredSize, minDocCount, significanceHeuristic, buckets, pipelineAggregators, metaData);
}

@Override
Expand All @@ -172,25 +176,26 @@ public Type type() {

@Override
public SignificantStringTerms create(List<SignificantStringTerms.Bucket> buckets) {
return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.requiredSize, this.minDocCount,
return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.format, this.requiredSize, this.minDocCount,
this.significanceHeuristic, buckets, this.pipelineAggregators(), this.metaData);
}

@Override
public Bucket createBucket(InternalAggregations aggregations, SignificantStringTerms.Bucket prototype) {
return new Bucket(prototype.termBytes, prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize,
aggregations);
aggregations, prototype.format);
}

@Override
protected SignificantStringTerms create(long subsetSize, long supersetSize, List<InternalSignificantTerms.Bucket> buckets,
InternalSignificantTerms prototype) {
return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.requiredSize, prototype.minDocCount,
prototype.significanceHeuristic, buckets, prototype.pipelineAggregators(), prototype.getMetaData());
return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.format, prototype.requiredSize,
prototype.minDocCount, prototype.significanceHeuristic, buckets, prototype.pipelineAggregators(), prototype.getMetaData());
}

@Override
protected void doReadFrom(StreamInput in) throws IOException {
this.format = in.readNamedWriteable(DocValueFormat.class);
this.requiredSize = readSize(in);
this.minDocCount = in.readVLong();
this.subsetSize = in.readVLong();
Expand All @@ -199,7 +204,7 @@ protected void doReadFrom(StreamInput in) throws IOException {
int size = in.readVInt();
List<InternalSignificantTerms.Bucket> buckets = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
Bucket bucket = new Bucket(subsetSize, supersetSize);
Bucket bucket = new Bucket(subsetSize, supersetSize, format);
bucket.readFrom(in);
buckets.add(bucket);
}
Expand All @@ -209,6 +214,7 @@ protected void doReadFrom(StreamInput in) throws IOException {

@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(format);
writeSize(requiredSize, out);
out.writeVLong(minDocCount);
out.writeVLong(subsetSize);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws
}

if (spare == null) {
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format);
}

bucketOrds.get(i, spare.termBytes);
Expand All @@ -117,7 +117,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws
list[i] = bucket;
}

return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(),
return new SignificantStringTerms(subsetSize, supersetSize, name, format, bucketCountThresholds.getRequiredSize(),
bucketCountThresholds.getMinDocCount(), significanceHeuristic, Arrays.asList(list), pipelineAggregators(),
metaData());
}
Expand All @@ -128,7 +128,7 @@ public SignificantStringTerms buildEmptyAggregation() {
ContextIndexSearcher searcher = context.searchContext().searcher();
IndexReader topReader = searcher.getIndexReader();
int supersetSize = topReader.numDocs();
return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(),
return new SignificantStringTerms(0, supersetSize, name, format, bucketCountThresholds.getRequiredSize(),
bucketCountThresholds.getMinDocCount(), significanceHeuristic,
Collections.<InternalSignificantTerms.Bucket> emptyList(), pipelineAggregators(), metaData());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationStreams;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
Expand Down Expand Up @@ -59,8 +60,8 @@ public static void registerStreams() {
public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
//We pass zero for index/subset sizes because for the purpose of significant term analysis
// we assume an unmapped index's size is irrelevant to the proceedings.
super(0, 0, name, requiredSize, minDocCount, SignificantTermsAggregatorBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC, BUCKETS,
pipelineAggregators, metaData);
super(0, 0, name, DocValueFormat.RAW, requiredSize, minDocCount, SignificantTermsAggregatorBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC,
BUCKETS, pipelineAggregators, metaData);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ public void writeTo(StreamOutput out) throws IOException {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.utf8Field(CommonFields.KEY, termBytes);
builder.field(CommonFields.KEY, getKeyAsString());
builder.field(CommonFields.DOC_COUNT, getDocCount());
if (showDocCountError) {
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,9 @@ InternalSignificantTerms[] getRandomSignificantTerms(SignificanceHeuristic heuri
sTerms[1] = new SignificantLongTerms();
} else {
BytesRef term = new BytesRef("someterm");
buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY));
sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, Collections.emptyList(),
null);
buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY, DocValueFormat.RAW));
sTerms[0] = new SignificantStringTerms(10, 20, "some_name", DocValueFormat.RAW, 1, 1, heuristic, buckets,
Collections.emptyList(), null);
sTerms[1] = new SignificantStringTerms();
}
return sTerms;
Expand Down Expand Up @@ -184,15 +184,15 @@ private List<InternalAggregation> createInternalAggregations() {

private InternalSignificantTerms createAggregation(String type, SignificanceHeuristic significanceHeuristic, List<InternalSignificantTerms.Bucket> buckets, long subsetSize, long supersetSize) {
if (type.equals("string")) {
return new SignificantStringTerms(subsetSize, supersetSize, "sig_terms", 2, -1, significanceHeuristic, buckets, new ArrayList<PipelineAggregator>(), new HashMap<String, Object>());
return new SignificantStringTerms(subsetSize, supersetSize, "sig_terms", DocValueFormat.RAW, 2, -1, significanceHeuristic, buckets, new ArrayList<PipelineAggregator>(), new HashMap<String, Object>());
} else {
return new SignificantLongTerms(subsetSize, supersetSize, "sig_terms", DocValueFormat.RAW, 2, -1, significanceHeuristic, buckets, new ArrayList<PipelineAggregator>(), new HashMap<String, Object>());
}
}

private InternalSignificantTerms.Bucket createBucket(String type, long subsetDF, long subsetSize, long supersetDF, long supersetSize, long label) {
if (type.equals("string")) {
return new SignificantStringTerms.Bucket(new BytesRef(Long.toString(label).getBytes(StandardCharsets.UTF_8)), subsetDF, subsetSize, supersetDF, supersetSize, InternalAggregations.EMPTY);
return new SignificantStringTerms.Bucket(new BytesRef(Long.toString(label).getBytes(StandardCharsets.UTF_8)), subsetDF, subsetSize, supersetDF, supersetSize, InternalAggregations.EMPTY, DocValueFormat.RAW);
} else {
return new SignificantLongTerms.Bucket(subsetDF, subsetSize, supersetDF, supersetSize, label, InternalAggregations.EMPTY, DocValueFormat.RAW);
}
Expand Down
Loading