Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LUCENE-10236: Updated field-weight used in CombinedFieldQuery scoring calculation, and added a test #444

Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
}

MultiNormsLeafSimScorer scoringSimScorer =
new MultiNormsLeafSimScorer(simWeight, context.reader(), fields, true);
new MultiNormsLeafSimScorer(simWeight, context.reader(), fieldAndWeights.values(), true);
LeafSimScorer nonScoringSimScorer =
new LeafSimScorer(simWeight, context.reader(), "pseudo_field", false);
// we use termscorers + disjunction as an impl detail
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.Explanation;
Expand Down Expand Up @@ -61,7 +63,13 @@ final class MultiNormsLeafSimScorer {
if (needsScores) {
final List<NumericDocValues> normsList = new ArrayList<>();
final List<Float> weightList = new ArrayList<>();
final Set<String> duplicateCheckingSet = new HashSet<>();
for (FieldAndWeight field : normFields) {
assert duplicateCheckingSet.add(field.field)
: "There is a duplicated field ["
+ field.field
+ "] used to construct MultiNormsLeafSimScorer";

NumericDocValues norms = reader.getNormValues(field.field);
if (norms != null) {
normsList.add(norms);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@
*/
package org.apache.lucene.sandbox.search;

import static com.carrotsearch.randomizedtesting.RandomizedTest.atMost;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;

import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import java.io.IOException;
import java.util.Arrays;
Expand Down Expand Up @@ -165,6 +169,117 @@ public void testSameScore() throws IOException {
dir.close();
}

public void testSameScoreAndCollectionBetweenCompleteAndTopScores() throws IOException {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you explain how this test relates to the fix? Would it make sense to have a more targeted test similar to testCopyField?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test was actually developed in another related PR #418, and it would generate duplicated field-weight pairs in fields object to trigger the condition. As the existing tests don't currently trigger that condition, I thought this would be a good test to put in first for both PRs.

I guess if we were to have a focused test for this PR, it would still look something very similar to this test (and testCopyField), maybe something like the following, and it just ensures the test run through without assertion exception. The delta between the tests are just I removed some doc content randomizations and result comparison between TOP_SCORE and COMPLETE collection.

 public void testShouldRunWithoutAssertionException() throws IOException {
    int numDocs =
        randomBoolean()
            ? atLeast(1000)
            : atLeast(128 * 8 * 8 * 3); // make sure some terms have skip data
    int numMatchDoc = randomIntBetween(200, 500);
    int numHits = atMost(100);
    int boost1 = Math.max(1, random().nextInt(5));
    int boost2 = Math.max(1, random().nextInt(5));

    Directory dir = newDirectory();
    Similarity similarity = randomCompatibleSimilarity();

    IndexWriterConfig iwc = new IndexWriterConfig();
    iwc.setSimilarity(similarity);
    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

    // adding potentially matching doc
    for (int i = 0; i < numMatchDoc; i++) {
      Document doc = new Document();

      int freqA = random().nextInt(20) + 1;
      if (randomBoolean()) {
        for (int j = 0; j < freqA; j++) {
          doc.add(new TextField("a", "foo", Store.NO));
        }
      }

      freqA = random().nextInt(20) + 1;
      if (randomBoolean()) {
        for (int j = 0; j < freqA; j++) {
          doc.add(new TextField("a", "foo" + j, Store.NO));
        }
      }

      freqA = random().nextInt(20) + 1;
      if (randomBoolean()) {
        for (int j = 0; j < freqA; j++) {
          doc.add(new TextField("a", "zoo", Store.NO));
        }
      }

      int freqB = random().nextInt(20) + 1;
      if (randomBoolean()) {
        for (int j = 0; j < freqB; j++) {
          doc.add(new TextField("b", "zoo", Store.NO));
        }
      }

      freqB = random().nextInt(20) + 1;
      if (randomBoolean()) {
        for (int j = 0; j < freqB; j++) {
          doc.add(new TextField("b", "zoo" + j, Store.NO));
        }
      }

      int freqC = random().nextInt(20) + 1;
      for (int j = 0; j < freqC; j++) {
        doc.add(new TextField("c", "bla" + j, Store.NO));
      }
      w.addDocument(doc);
    }

    IndexReader reader = w.getReader();
    IndexSearcher searcher = newSearcher(reader);
    searcher.setSimilarity(similarity);

    CombinedFieldQuery query =
        new CombinedFieldQuery.Builder()
            .addField("a", (float) boost1)
            .addField("b", (float) boost2)
            .addTerm(new BytesRef("foo"))
            .addTerm(new BytesRef("zoo"))
            .build();

    TopScoreDocCollector completeCollector =
        TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); 
    searcher.search(query, completeCollector);

    reader.close();
    w.close();
    dir.close();
  }

I guess I'm fine either way? If you prefer a more focused test, I can replace it with the above one.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think starting with this more focused test makes sense for this PR.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good. I've replaced the test with a more focused one.

int numDocs =
randomBoolean()
? atLeast(1000)
: atLeast(128 * 8 * 8 * 3); // make sure some terms have skip data
int numMatchDoc = randomIntBetween(200, 500);
int numHits = atMost(100) + 1;
int boost1 = Math.max(1, random().nextInt(5));
int boost2 = Math.max(1, random().nextInt(5));

Directory dir = newDirectory();
Similarity similarity = randomCompatibleSimilarity();

IndexWriterConfig iwc = new IndexWriterConfig();
iwc.setSimilarity(similarity);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

// adding non-matching docs
for (int i = 0; i < numDocs - numMatchDoc; ++i) {
Document doc = new Document();

int freqA = random().nextInt(50) + 1;
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "bar" + j, Store.NO));
}

int freqB = random().nextInt(50) + 1;
for (int j = 0; j < freqB; j++) {
doc.add(new TextField("b", "bla" + j, Store.NO));
}
int freqC = random().nextInt(50) + 1;
for (int j = 0; j < freqC; j++) {
doc.add(new TextField("c", "bla" + j, Store.NO));
}
w.addDocument(doc);
}

// adding potentially matching doc
for (int i = 0; i < numMatchDoc; i++) {
Document doc = new Document();

int freqA = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "foo", Store.NO));
}
}

freqA = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "foo" + j, Store.NO));
}
}

freqA = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "zoo", Store.NO));
}
}

int freqB = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqB; j++) {
doc.add(new TextField("b", "zoo", Store.NO));
}
}

freqB = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqB; j++) {
doc.add(new TextField("b", "zoo" + j, Store.NO));
}
}

int freqC = random().nextInt(20) + 1;
for (int j = 0; j < freqC; j++) {
doc.add(new TextField("c", "bla" + j, Store.NO));
}
w.addDocument(doc);
}

IndexReader reader = w.getReader();
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarity(similarity);

CombinedFieldQuery query =
new CombinedFieldQuery.Builder()
.addField("a", (float) boost1)
.addField("b", (float) boost2)
.addTerm(new BytesRef("foo"))
.addTerm(new BytesRef("zoo"))
.build();

TopScoreDocCollector topScoresCollector =
TopScoreDocCollector.create(numHits, null, numHits); // TOP_SCORES
searcher.search(query, topScoresCollector);

TopScoreDocCollector completeCollector =
TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE); // COMPLETE
searcher.search(query, completeCollector);

CheckHits.checkEqual(
query, completeCollector.topDocs().scoreDocs, topScoresCollector.topDocs().scoreDocs);

reader.close();
w.close();
dir.close();
}

public void testNormsDisabled() throws IOException {
Directory dir = newDirectory();
Similarity similarity = randomCompatibleSimilarity();
Expand Down