diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 6a5db93053e3b..1e3b913c5cb5a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -28,3 +28,5 @@ BWC_VERSION: - "2.11.1" - "2.11.2" - "2.12.0" + - "2.12.1" + - "2.13.0" diff --git a/.github/ISSUE_TEMPLATE/meta.yml b/.github/ISSUE_TEMPLATE/meta.yml new file mode 100644 index 0000000000000..b766a26bc3ff2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/meta.yml @@ -0,0 +1,58 @@ +name: ✨ Meta Issue +description: An issue that collects other issues together to describe a larger project or activity. +title: '[META] ' +labels: ['Meta, untriaged'] +body: + - type: textarea + attributes: + label: Please describe the end goal of this project + description: A clear and concise description of this project/endeavor. This should be understandable to someone with no context. + placeholder: Ex. Views is a way to project indices in OpenSearch, these views act as a focal point for describing the underlying data and how the data is accessed. It allows for restricting the scope and filtering the response consistently. + validations: + required: true + - type: textarea + attributes: + label: Supporting References + description: Please provide links (and descriptions!) to RFCs, design docs, etc + validations: + required: true + - type: textarea + attributes: + label: Issues + description: Please create a list of issues that should be tracked by this meta issue, including a short description. The purpose is to provide everyone on the project with an "at a glance" update of the state us the work being tracked. If you use the format "- [ ]" it will put your list into a checklist. + placeholder: Ex. - [ ] https://github.com/opensearch-project/security/issues/3888 Add views to the cluster metadata schema + validations: + required: true + - type: dropdown + attributes: + label: Related component + description: Choose a specific OpenSearch component your project belongs to. If you are unsure of which component to select or if the component is not present, select "Other". + multiple: false + options: + - # Empty first option to force selection + - Build + - Clients + - Cluster Manager + - Extensions + - Indexing:Performance + - Indexing:Replication + - Indexing + - Libraries + - Other + - Plugins + - Search:Aggregations + - Search:Performance + - Search:Query Capabilities + - Search:Query Insights + - Search:Relevance + - Search:Remote Search + - Search:Resiliency + - Search:Searchable Snapshots + - Search + - Storage:Durability + - Storage:Performance + - Storage:Remote + - Storage:Snapshots + - Storage + validations: + required: true diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index d6c65ddd446cd..b2f22a90938cc 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -53,7 +53,7 @@ jobs: name: results.txt - name: Find Comment - uses: peter-evans/find-comment@v2 + uses: peter-evans/find-comment@v3 id: fc with: issue-number: ${{ github.event.number }} @@ -61,7 +61,7 @@ jobs: body-includes: 'Compatibility status:' - name: Add comment on the PR - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.number }} diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index df63847f8afca..b45e053cc25c2 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -29,7 +29,7 @@ jobs: - name: Create Issue From File id: create-issue - uses: peter-evans/create-issue-from-file@v4 + uses: peter-evans/create-issue-from-file@v5 with: title: Add documentation related to new feature content-filepath: ./ci/documentation/issue.md diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 8c33d41c6b2b4..8ac44cc37d27c 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -78,7 +78,7 @@ jobs: - name: Create Comment Success if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | @@ -101,7 +101,7 @@ jobs: - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | @@ -111,7 +111,7 @@ jobs: - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 2714d45bd108f..61962c91b4903 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.9.0 + uses: lycheeverse/lychee-action@v1.9.1 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index 34e8f57cc1878..fdc2bf16937b4 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - id: find-maintainers - uses: actions/github-script@v7 + uses: actions/github-script@v7.0.1 with: github-token: ${{ secrets.GITHUB_TOKEN }} result-encoding: string diff --git a/.github/workflows/poc-checklist.yml b/.github/workflows/poc-checklist.yml index 3d014e000a487..1b4f6b31e02f8 100644 --- a/.github/workflows/poc-checklist.yml +++ b/.github/workflows/poc-checklist.yml @@ -11,7 +11,7 @@ jobs: issues: write steps: - name: Add comment - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.issue.number }} body: | diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 11998e36c2dbb..7efcf529588ed 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -17,7 +17,7 @@ jobs: name: Verify Description Checklist runs-on: ubuntu-latest steps: - - uses: peternied/check-pull-request-description-checklist@v1 + - uses: peternied/check-pull-request-description-checklist@v1.1 with: checklist-items: | New functionality includes testing. diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index c305818bdb0a9..83bf4926a8c2d 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v7 + - uses: actions/github-script@v7.0.1 with: script: | const { issue, repository } = context.payload; diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index a20c671c137b2..be2a89ac931e9 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -1,28 +1,32 @@ name: Increment Version on: + workflow_dispatch: + inputs: + tag: + description: 'the tag' + required: true + type: string push: tags: - '*.*.*' -permissions: {} +permissions: + contents: write + issues: write + pull-requests: write + jobs: build: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v2.1.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - - uses: actions/checkout@v4 - - name: Fetch Tag and Version Information + - name: Fetch tag and version information run: | TAG=$(echo "${GITHUB_REF#refs/*/}") + if [ -n ${{ github.event.inputs.tag }} ]; then + TAG=${{ github.event.inputs.tag }} + fi CURRENT_VERSION_ARRAY=($(echo "$TAG" | tr . '\n')) BASE=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:2}") BASE_X=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:1}.x") @@ -44,24 +48,22 @@ jobs: echo "NEXT_VERSION=$NEXT_VERSION" >> $GITHUB_ENV echo "NEXT_VERSION_UNDERSCORE=$NEXT_VERSION_UNDERSCORE" >> $GITHUB_ENV echo "NEXT_VERSION_ID=$NEXT_VERSION_ID" >> $GITHUB_ENV + - uses: actions/checkout@v4 with: ref: ${{ env.BASE }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Increment Patch Version - run: | - echo Incrementing $CURRENT_VERSION to $NEXT_VERSION - echo " - \"$CURRENT_VERSION\"" >> .ci/bwcVersions - sed -i "s/opensearch = $CURRENT_VERSION/opensearch = $NEXT_VERSION/g" buildSrc/version.properties - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Increment Patch Version on Major.Minor branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: true - - name: Create Pull Request + - name: Create PR for BASE + id: base_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' commit-message: Increment version to ${{ env.NEXT_VERSION }} @@ -76,19 +78,18 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ env.BASE_X }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to .X branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on Major.X branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request + - name: Create PR for BASE_X + id: base_x_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -103,19 +104,18 @@ jobs: - uses: actions/checkout@v4 with: ref: main - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to main branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on main branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request + - name: Create PR for main + id: main_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: main branch: 'create-pull-request/patch-main' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -126,3 +126,32 @@ jobs: title: '[AUTO] [main] Add bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. + + - name: Create tracking issue + id: create-issue + uses: actions/github-script@v7.0.1 + with: + script: | + const body = ` + ### Description + A new version of OpenSearch was released, to prepare for the next release new version numbers need to be updated in all active branches of development. + + ### Exit Criteria + Review and merged the following pull requests + - [ ] ${{ steps.base_pr.outputs.pull-request-url }} + - [ ] ${{ steps.base_x_pr.outputs.pull-request-url }} + - [ ] ${{ steps.main_pr.outputs.pull-request-url }} + + ### Additional Context + See project wide guidance on branching and versions [[link]](https://github.com/opensearch-project/.github/blob/main/RELEASING.md). + ` + const { data: issue }= await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["Build"], + title: "Increment version for ${{ env.NEXT_VERSION }}", + body: body + }); + console.error(JSON.stringify(issue)); + return issue.number; + result-encoding: string diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index 6dd48ca15eaa9..dcf2a09717d28 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -8,4 +8,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: gradle/wrapper-validation-action@v1 + - uses: gradle/wrapper-validation-action@v2 diff --git a/.idea/vcs.xml b/.idea/vcs.xml index 48557884a8893..c668657daf908 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -1,20 +1,20 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="4"> - <component name="IssueNavigationConfiguration"> - <option name="links"> - <list> - <IssueNavigationLink> - <option name="issueRegexp" value="#(\d+)" /> - <option name="linkRegexp" value="https://github.com/opensearch-project/OpenSearch/pulls/$1" /> - </IssueNavigationLink> - <IssueNavigationLink> - <option name="issueRegexp" value="#(\d+)" /> - <option name="linkRegexp" value="https://github.com/opensearch-project/OpenSearch/issues/$1" /> - </IssueNavigationLink> - </list> - </option> - </component> + <component name="IssueNavigationConfiguration"> + <option name="links"> + <list> + <IssueNavigationLink> + <option name="issueRegexp" value="#(\d+)" /> + <option name="linkRegexp" value="https://github.com/opensearch-project/OpenSearch/pulls/$1" /> + </IssueNavigationLink> + <IssueNavigationLink> + <option name="issueRegexp" value="#(\d+)" /> + <option name="linkRegexp" value="https://github.com/opensearch-project/OpenSearch/issues/$1" /> + </IssueNavigationLink> + </list> + </option> + </component> <component name="VcsDirectoryMappings"> - <mapping directory="$PROJECT_DIR$" vcs="Git" /> + <mapping directory="" vcs="Git" /> </component> -</project> +</project> \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 10338f6646053..e54a8987e1b4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - [Admission Control] Integrate CPU AC with ResourceUsageCollector and add CPU AC stats to nodes/stats ([#10887](https://github.com/opensearch-project/OpenSearch/pull/10887)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -47,6 +50,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) - Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) - Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -83,6 +89,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) - Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) - Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) ### Security @@ -101,6 +111,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967), [#11983](https://github.com/opensearch-project/OpenSearch/pull/11983)) - [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) - [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) @@ -115,6 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) - Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) - Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Add experimental SIMD implementation of B-tree to round down dates ([#11194](https://github.com/opensearch-project/OpenSearch/issues/11194)) - Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) - Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) @@ -124,106 +136,51 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) - Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) - Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- [Tiered caching] Integrating ehcache as a disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) +- Add remove_by_pattern ingest processor ([#11920](https://github.com/opensearch-project/OpenSearch/pull/11920)) +- Support index level allocation filtering for searchable snapshot index ([#11563](https://github.com/opensearch-project/OpenSearch/pull/11563)) +- Add `org.opensearch.rest.MethodHandlers` and `RestController#getAllHandlers` ([11876](https://github.com/opensearch-project/OpenSearch/pull/11876)) +- New DateTime format for RFC3339 compatible date fields ([#11465](https://github.com/opensearch-project/OpenSearch/pull/11465)) +- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) +- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) +- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) +- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) ### Dependencies -- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) -- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) -- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) -- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) -- Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) -- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) -- Bump `netty` from 4.1.100.Final to 4.1.104.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)) -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) -- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) -- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) -- Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) -- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629)) -- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) -- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) -- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) -- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) -- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) -- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) -- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) -- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) -- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) -- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) -- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) -- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) -- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) -- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.0 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795)) -- Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) +- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.33.0 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289)) +- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) +- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) +- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `opentelemetry` from 1.34.1 to 1.35.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388)) +- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.0 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464)) +- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) +- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) +- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) ### Changed -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) -- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) -- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) -- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) -- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) -- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) -- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) -- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) -- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) -- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) -- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) -- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) -- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) -- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) -- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) -- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) -- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) -- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) -- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) -- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) -- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) -- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) -- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) -- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) -- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) +- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) ### Deprecated ### Removed -- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) ### Fixed -- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) -- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) -- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) -- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) -- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) -- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) -- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) -- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) -- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) -- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) -- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) -- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) -- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) -- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485)) -- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) -- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) -- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) -- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) -- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) -- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) +- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) +- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) +- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) +- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) +- Warn about deprecated and ignored index.mapper.dynamic index setting ([#11193](https://github.com/opensearch-project/OpenSearch/pull/11193)) +- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 21adbb0305ab1..f0851fc58d444 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -348,7 +348,7 @@ Please follow these formatting guidelines: * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. * If *absolutely* necessary, you can disable formatting for regions of code with the `// tag::NAME` and `// end::NAME` directives, but note that these are intended for use in documentation, so please make it clear what you have done, and only do this where the benefit clearly outweighs the decrease in consistency. * Note that JavaDoc and block comments i.e. `/* ... */` are not formatted, but line comments i.e `// ...` are. -* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, if might get called out in PR reviews as something to change. +* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, it might get called out in PR reviews as something to change. ## Adding Dependencies diff --git a/NOTICE.txt b/NOTICE.txt index 6c7dc983f8c7a..d463b8f28561f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10,3 +10,6 @@ Foundation (http://www.apache.org/). This product includes software developed by Joda.org (http://www.joda.org/). + +This product includes software developed by +Morten Haraldsen (ethlo) (https://github.com/ethlo) under the Apache License, version 2.0. diff --git a/TRIAGING.md b/TRIAGING.md new file mode 100644 index 0000000000000..3917f1e1442b9 --- /dev/null +++ b/TRIAGING.md @@ -0,0 +1,83 @@ +<img src="https://opensearch.org/assets/img/opensearch-logo-themed.svg" height="64px"> + +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. + +### Do I need to attend for my issue to be addressed/triaged? + +Attendance is not required for your issue to be triaged or addressed. If not accepted the issue will be updated with a comment for next steps. All new issues are triaged weekly. + +You can track if your issue was triaged by watching your GitHub notifications for updates. + +### What happens if my issue does not get covered this time? + +Each meeting we seek to address all new issues. However, should we run out of time before your issue is discussed, you are always welcome to attend the next meeting or to follow up on the issue post itself. + +### How do I join the Triage meeting? + +Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Chime](https://aws.amazon.com/chime/), with this [meeting link](https://chime.aws/1988437365). + +After joining the Chime meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. + +If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting. + +### Is there an agenda for each week? + +Meetings are 55 minutes and follows this structure: + +Yes, each 55-minute meeting follows this structure: +1. **Initial Gathering:** Feel free to turn on your video and engage in informal conversation. Shortly, a volunteer triage [facilitator](#what-is-the-role-of-the-facilitator) will begin the meeting and share their screen. +2. **Record Attendees:** The facilitator will request attendees to share their GitHub profile links. These links will be collected and assembled into a [tag](#how-do-triage-facilitator-tag-comments-during-the-triage-meeting) to annotate comments during the meeting. +3. **Announcements:** Any announcements will be made at the beginning of the meeting. +4. **Review of New Issues:** We start by reviewing all untriaged [issues](https://github.com/search?q=label%3Auntriaged+is%3Aopen++repo%3Aopensearch-project%2FOpenSearch+&type=issues&ref=advsearch&s=created&o=desc) for the OpenSearch repo. +5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. +6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. + +### What is the role of the facilitator? + +The facilitator is crucial in driving the meeting, ensuring a smooth flow of issues into OpenSearch for future contributions. They maintain the meeting's agenda, solicit input from attendees, and record outcomes using the triage tag as items are discussed. + +### Do I need to have already contributed to the project to attend a triage meeting? + +No prior contributions are required. All interested individuals are welcome and encouraged to attend. Triage meetings offer a fantastic opportunity for new contributors to understand the project and explore various contribution avenues. + +### What if I have an issue that is almost a duplicate, should I open a new one to be triaged? + +You can always open an [issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) including one that you think may be a duplicate. If you believe your issue is similar but distinct from an existing one, you are encouraged to file it and explain the differences during the triage meeting. + +### What if I have follow-up questions on an issue? + +If you have an existing issue you would like to discuss, you can always comment on the issue itself. Alternatively, you are welcome to come to the triage meeting to discuss. + +### Is this meeting a good place to get help setting up features on my OpenSearch instance? + +While we are always happy to help the community, the best resource for implementation questions is [the OpenSearch forum](https://forum.opensearch.org/). + +There you can find answers to many common questions as well as speak with implementation experts. + +### What are the issue labels associated with triaging? + +Yes, there are several labels that are used to identify the 'state' of issues filed in OpenSearch . +| Label | When Applied | Meaning | +|---------------|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| `Untriaged` | When issues are created or re-opened. | Issues labeled as 'Untriaged' require the attention of the repository maintainers and may need to be prioritized for quicker resolution. It's crucial to keep the count of 'Untriaged' labels low to ensure all potential security issues are addressed in a timely manner. See [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) for more details on handling these issues. | +| `Help Wanted` | Anytime. | Issues marked as 'Help Wanted' signal that they are actionable and not the current focus of the project maintainers. Community contributions are especially encouraged for these issues. | +| `Good First Issue` | Anytime. | Issues labeled as 'Good First Issue' are small in scope and can be resolved with a single pull request. These are recommended starting points for newcomers looking to make their first contributions. | + +### What are the typical outcomes of a triaged issue? + +| Outcome | Label | Description | Canned Response | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Accepted | `-untriaged` | The issue has the details needed to be directed towards area owners. | "Thanks for filing this issue, please feel free to submit a pull request." | +| Rejected | N/A | The issue will be closed with a reason for why it was rejected. Reasons might include lack of details, or being outside the scope of the project. | "Thanks for creating this issue; however, it isn't being accepted due to {REASON}. Please feel free to open a new issue after addressing the reason." | +| Area Triage | `+{AREALABEL}` | OpenSearch has many different areas. If it's unclear whether an issue should be accepted, it will be labeled with the area and an owner will be @mentioned for follow-up. | "Thanks for creating this issue; the triage meeting was unsure if this issue should be accepted, @{PERSON} or someone from the area please review and then accept or reject this issue?" | +| Transfer | N/A | If the issue applies to another repository within the OpenSearch Project, it will be transferred accordingly. | "@opensearch-project/triage, can you please transfer this issue to project {REPOSITORY}." Or, if someone at the meeting has permissions, they can start the transfer. | + +### Is this where I should bring up potential security vulnerabilities? + +Due to the sensitive nature of security vulnerabilities, please report all potential vulnerabilities directly by following the steps outlined on the [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) document. + +### How do triage facilitator tag comments during the triage meeting? + +During the triage meeting, facilitators should use the tag _[Triage - attendees [1](#Profile_link) [2](#Profile_link)]_ to indicate a collective decision. This ensures contributors know the decision came from the meeting rather than an individual and identifies participants for any follow-up queries. + +This tag should not be used outside triage meetings. diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 6b4634c7e791c..be4579b4e5324 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -84,3 +84,45 @@ spotless { targetExclude 'src/main/generated/**/*.java' } } + +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + // Add support for incubator modules on supported Java versions. + run.jvmArgs += ['--add-modules=jdk.incubator.vector'] + run.classpath += files(jar.archiveFile) + run.classpath -= sourceSets.main.output + evaluationDependsOn(':libs:opensearch-common') + + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + java20Implementation project(':libs:opensearch-common').sourceSets.java20.output + java20AnnotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes + disableTasks('forbiddenApisJava20') +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java new file mode 100644 index 0000000000000..4e995f5a5067c --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterConstructionBenchmark { + + private List<BytesRef> items; + + @Param({ "1000000", "10000000", "50000000" }) + private int numIds; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySetFactory fuzzySetFactory; + private String fieldName; + + @Setup + public void setupIds() { + this.fieldName = IdFieldMapper.NAME; + this.items = IntStream.range(0, numIds).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + this.fuzzySetFactory = new FuzzySetFactory(Map.of(fieldName, parameters)); + } + + @Benchmark + public FuzzySet buildFilter() throws IOException { + return fuzzySetFactory.createFuzzySet(items.size(), fieldName, () -> items.iterator()); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java new file mode 100644 index 0000000000000..383539219830e --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterLookupBenchmark { + + @Param({ "50000000", "1000000" }) + private int numItems; + + @Param({ "1000000" }) + private int searchKeyCount; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySet fuzzySet; + private List<BytesRef> items; + private Random random = new Random(); + + @Setup + public void setupFilter() throws IOException { + String fieldName = IdFieldMapper.NAME; + items = IntStream.range(0, numItems).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + fuzzySet = new FuzzySetFactory(Map.of(fieldName, parameters)).createFuzzySet(numItems, fieldName, () -> items.iterator()); + } + + @Benchmark + public void contains_withExistingKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(items.get(random.nextInt(items.size()))) == FuzzySet.Result.MAYBE); + } + } + + @Benchmark + public void contains_withRandomKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(new BytesRef(UUIDs.base64UUID()))); + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java index 4e07af452968b..3909a3f4eb8fc 100644 --- a/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -21,7 +21,6 @@ import org.openjdk.jmh.infra.Blackhole; import java.util.Random; -import java.util.function.Supplier; @Fork(value = 3) @Warmup(iterations = 3, time = 1) @@ -83,17 +82,17 @@ public static class Options { "256" }) public Integer size; - @Param({ "binary", "linear" }) + @Param({ "binary", "linear", "btree" }) public String type; @Param({ "uniform", "skewed_edge", "skewed_center" }) public String distribution; public long[] queries; - public Supplier<Roundable> supplier; + public RoundableSupplier supplier; @Setup - public void setup() { + public void setup() throws ClassNotFoundException { Random random = new Random(size); long[] values = new long[size]; for (int i = 1; i < values.length; i++) { @@ -128,16 +127,7 @@ public void setup() { throw new IllegalArgumentException("invalid distribution: " + distribution); } - switch (type) { - case "binary": - supplier = () -> new BinarySearcher(values, size); - break; - case "linear": - supplier = () -> new BidirectionalLinearSearcher(values, size); - break; - default: - throw new IllegalArgumentException("invalid type: " + type); - } + supplier = new RoundableSupplier(type, values, size); } private static long nextPositiveLong(Random random) { diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..44ac42810996f --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) throws ClassNotFoundException { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + throw new ClassNotFoundException("BtreeSearcher is not supported below JDK 20"); + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..e81c1b137bd30 --- /dev/null +++ b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + delegate = () -> new BtreeSearcher(values, size); + break; + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/build.gradle b/build.gradle index 296c30391af09..2aac4a1e893e9 100644 --- a/build.gradle +++ b/build.gradle @@ -54,8 +54,8 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.23.2" apply false - id "org.gradle.test-retry" version "1.5.4" apply false + id "com.diffplug.spotless" version "6.25.0" apply false + id "org.gradle.test-retry" version "1.5.8" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -516,7 +516,6 @@ subprojects { includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexClusterDefaultDocRep") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexIT") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexTranslogDisabledIT") - includeClasses.add("org.opensearch.remotestore.RemoteIndexPrimaryRelocationIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreBackpressureIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreRefreshListenerIT") @@ -545,6 +544,7 @@ subprojects { includeClasses.add("org.opensearch.snapshots.SnapshotStatusApisIT") includeClasses.add("org.opensearch.test.rest.ClientYamlTestSuiteIT") includeClasses.add("org.opensearch.upgrade.DetectEsInstallationTaskTests") + includeClasses.add("org.opensearch.cluster.MinimumClusterManagerNodesIT") } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3c846b48549fb..0562ecc6ee61b 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -118,7 +118,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' - api 'com.networknt:json-schema-validator:1.0.86' + api 'com.networknt:json-schema-validator:1.2.0' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java index 8ecfbf40b6c62..0c901b9726992 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java @@ -94,7 +94,7 @@ public static void configureRepositories(Project project) { String revision = matcher.group(1); MavenArtifactRepository luceneRepo = repos.maven(repo -> { repo.setName("lucene-snapshots"); - repo.setUrl("https://artifacts.opensearch.org/snapshots/lucene/"); + repo.setUrl("https://ci.opensearch.org/ci/dbc/snapshots/lucene/"); }); repos.exclusiveContent(exclusiveRepo -> { exclusiveRepo.filter( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 1ad7e056b6ae6..bc44f81a81aff 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin<Project> { - private static final String SYSTEM_JDK_VERSION = "17.0.9+9"; + private static final String SYSTEM_JDK_VERSION = "21.0.2+13"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.9+9"; + private static final String GRADLE_JDK_VERSION = "21.0.2+13"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 351b42e5bc921..4b8f52ec07615 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.22.1" + implementation "org.apache.logging.log4j:log4j-core:2.23.0" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3813750507f18..39b0335c7ef55 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,8 +1,8 @@ opensearch = 3.0.0 -lucene = 9.9.1 +lucene = 9.11.0-snapshot-8a555eb bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.1+12 +bundled_jdk = 21.0.2+13 # optional dependencies spatial4j = 0.7 @@ -18,7 +18,7 @@ asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 -antlr4 = 4.11.1 +antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 @@ -26,12 +26,12 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.104.Final +netty = 4.1.107.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.13 -reactor = 3.5.11 +reactor_netty = 1.1.15 +reactor = 3.5.14 # client dependencies httpclient5 = 5.2.1 @@ -50,12 +50,12 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.76 +bouncycastle=1.77 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.5.0 +mockito = 5.10.0 objenesis = 3.2 bytebuddy = 1.14.7 @@ -70,5 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.32.0 +opentelemetry = 1.35.0 opentelemetrysemconv = 1.23.1-alpha diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java index adddb3bda725c..f609fae4e3c81 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java @@ -305,6 +305,7 @@ public void shutdown() { } } + @SuppressWarnings("removal") static class SnifferThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java index 88f667549f3e8..faef1441d0a02 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java @@ -51,6 +51,7 @@ protected Matcher<String> nodeNameMatcher() { return is("integTest-0"); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index ededa7bff34d8..4e85d19986e43 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.6.0" + id "com.netflix.nebula.ospackage-base" version "11.8.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index b7ab2e1c2309b..ebffdde0f3699 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -117,12 +117,6 @@ ${path.logs} #opensearch.experimental.feature.extensions.enabled: false # # -# Gates the concurrent segment search feature. This feature enables concurrent segment search in a separate -# index searcher threadpool. -# -#opensearch.experimental.feature.concurrent_segment_search.enabled: false -# -# # Gates the optimization of datetime formatters caching along with change in default datetime formatter # Once there is no observed impact on performance, this feature flag can be removed. # diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java index d269603656114..9ca42ac5f4ec1 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java @@ -78,15 +78,14 @@ private void printPlugin(Environment env, Terminal terminal, Path plugin, String PluginInfo info = PluginInfo.readFromProperties(env.pluginsDir().resolve(plugin)); terminal.println(Terminal.Verbosity.SILENT, prefix + info.getName()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!PluginsService.isPluginVersionCompatible(info, Version.CURRENT)) { terminal.errorPrintln( "WARNING: plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getVersion() - + " but version " + + info.getOpenSearchVersionRangesString() + + " and is not compatible with " + Version.CURRENT - + " is required" ); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java index f4532f5f83cc4..c264788df20e8 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java @@ -70,8 +70,10 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.PosixPermissionsResetter; +import org.opensearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -284,6 +286,35 @@ static void writePlugin(String name, Path structure, String... additionalProps) writeJar(structure.resolve("plugin.jar"), className); } + static void writePlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) throws IOException { + String[] properties = Stream.concat( + Stream.of( + "description", + "fake desc", + "name", + name, + "version", + "1.0", + "dependencies", + "{opensearch:\"" + opensearchVersionRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ), + Arrays.stream(additionalProps) + ).toArray(String[]::new); + PluginTestUtil.writePluginProperties(structure, properties); + String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin"; + writeJar(structure.resolve("plugin.jar"), className); + } + + static Path createPlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) + throws IOException { + writePlugin(name, structure, opensearchVersionRange, additionalProps); + return writeZip(structure, null); + } + static void writePluginSecurityPolicy(Path pluginDir, String... permissions) throws IOException { StringBuilder securityPolicyContent = new StringBuilder("grant {\n "); for (String permission : permissions) { @@ -867,6 +898,32 @@ public void testInstallMisspelledOfficialPlugins() throws Exception { assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } + public void testInstallPluginWithCompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + Version.CURRENT.toString())).toUri() + .toURL() + .toString(); + skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()); + assertThat(terminal.getOutput(), containsString("100%")); + } + + public void testInstallPluginWithIncompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + // Core version is behind plugin version by one w.r.t patch, hence incompatible + Version coreVersion = Version.CURRENT; + Version pluginVersion = VersionUtils.getVersion(coreVersion.major, coreVersion.minor, (byte) (coreVersion.revision + 1)); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + pluginVersion.toString())).toUri() + .toURL() + .toString(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()) + ); + assertThat(e.getMessage(), containsString("Plugin [fake] was built for OpenSearch version ~" + pluginVersion)); + } + public void testBatchFlag() throws Exception { MockTerminal terminal = new MockTerminal(); installPlugin(terminal, true); diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java index 7bbced38c7adb..6878efce4c804 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java @@ -278,7 +278,7 @@ public void testExistingIncompatiblePlugin() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home); - String message = "plugin [fake_plugin1] was built for OpenSearch version 1.0 but version " + Version.CURRENT + " is required"; + String message = "plugin [fake_plugin1] was built for OpenSearch version 5.0.0 and is not compatible with " + Version.CURRENT; assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); assertEquals("WARNING: " + message + "\n", terminal.getErrorOutput()); @@ -286,4 +286,41 @@ public void testExistingIncompatiblePlugin() throws Exception { terminal = listPlugins(home, params); assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); } + + public void testPluginWithDependencies() throws Exception { + PluginTestUtil.writePluginProperties( + env.pluginsDir().resolve("fake_plugin1"), + "description", + "fake desc 1", + "name", + "fake_plugin1", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "org.fake1" + ); + String[] params = { "-v" }; + MockTerminal terminal = listPlugins(home, params); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsDir(), + "fake_plugin1", + "- Plugin information:", + "Name: fake_plugin1", + "Description: fake desc 1", + "Version: 1.0", + "OpenSearch Version: " + Version.CURRENT.toString(), + "Java Version: " + System.getProperty("java.specification.version"), + "Native Controller: false", + "Extended Plugins: []", + " * Classname: org.fake1", + "Folder name: null" + ), + terminal.getOutput() + ); + } } diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index 822b471e2e034..3ca6b1fe84ea7 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -13,7 +13,7 @@ repositories { gradlePluginPortal() // TODO: Find the way to use the repositories from RepositoriesSetupPlugin maven { - url = "https://artifacts.opensearch.org/snapshots/lucene/" + url = "https://ci.opensearch.org/ci/dbc/snapshots/lucene/" } } @@ -37,7 +37,7 @@ tasks.withType(JacocoReport).configureEach { if (System.getProperty("tests.coverage")) { reporting { reports { - testCodeCoverageReport(JacocoCoverageReport) { + testCodeCoverageReport(JacocoCoverageReport) { testType = TestSuiteType.UNIT_TEST } } @@ -45,6 +45,6 @@ if (System.getProperty("tests.coverage")) { // Attach code coverage report task to Gradle check task project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure { - dependsOn tasks.named('testCodeCoverageReport', JacocoReport) + dependsOn tasks.named('testCodeCoverageReport', JacocoReport) } } diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index 93e1127c97a56..f3a4bf5cc765b 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -99,7 +99,9 @@ allprojects { } } format 'misc', { - target '*.md', '*.gradle', '**/*.yaml', '**/*.yml', '**/*.svg' + target '*.md', '*.gradle', '**/*.json', '**/*.yaml', '**/*.yml', '**/*.svg' + + targetExclude '**/simple-bulk11.json', '**/simple-msearch5.json' trimTrailingWhitespace() endWithNewline() diff --git a/gradle/ide.gradle b/gradle/ide.gradle index bc442a081adf0..14d6b2982ccd0 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -82,6 +82,9 @@ if (System.getProperty('idea.active') == 'true') { runConfigurations { defaults(JUnit) { vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { + vmParameters += ' -Djava.security.manager=allow' + } } } copyright { diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f1d76d80bbfa3..82a4add334a7d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=c16d517b50dd28b3f5838f0e844b7520b8f1eb610f2f29de7e4e04a1b7c9c79b +distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 4f89b81636420..60bf488833393 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -43,3 +43,64 @@ tasks.named('forbiddenApisMain').configure { // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server replaceSignatureFiles 'jdk-signatures' } + +// Add support for incubator modules on supported Java versions. +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs += ['--add-modules', 'jdk.incubator.vector'] + options.compilerArgs -= '-Werror' // use of incubator modules is reported as a warning + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + tasks.withType(Test).configureEach { + // Relying on the convention for Test.classpath in custom Test tasks has been deprecated + // and scheduled to be removed in Gradle 9.0. Below lines are added from the migration guide: + // https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#test_task_default_classpath + testClassesDirs = testing.suites.test.sources.output.classesDirs + classpath = testing.suites.test.sources.runtimeClasspath + + // Adds the multi-release JAR to the classpath when executing tests. + // This allows newer sources to be picked up at test runtime (if supported). + classpath += files(jar.archiveFile) + // Removes the "main" sources from the classpath to avoid JarHell problems as + // the multi-release JAR already contains those classes. + classpath -= sourceSets.main.output + } + + tasks.register('roundableSimdTest', Test) { + group 'verification' + include '**/RoundableTests.class' + systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced' + } + + check.dependsOn(roundableSimdTest) + + forbiddenApisJava20 { + failOnMissingClasses = false + ignoreSignaturesOfMissingClasses = true + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index 0f289c09bbae2..60c0717a28f05 100644 --- a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -52,7 +52,7 @@ public static boolean isInetAddress(String ipString) { return ipStringToBytes(ipString) != null; } - private static byte[] ipStringToBytes(String ipString) { + public static byte[] ipStringToBytes(String ipString) { // Make a first pass to categorize the characters in this string. boolean hasColon = false; boolean hasDot = false; diff --git a/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java new file mode 100644 index 0000000000000..626fb6e6b810e --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import jdk.incubator.vector.LongVector; +import jdk.incubator.vector.Vector; +import jdk.incubator.vector.VectorOperators; +import jdk.incubator.vector.VectorSpecies; + +/** + * It uses vectorized B-tree search to find the round-down point. + * + * @opensearch.internal + */ +@InternalApi +class BtreeSearcher implements Roundable { + private static final VectorSpecies<Long> LONG_VECTOR_SPECIES = LongVector.SPECIES_PREFERRED; + private static final int LANES = LONG_VECTOR_SPECIES.length(); + private static final int SHIFT = log2(LANES); + + private final long[] values; + private final long minValue; + + BtreeSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int blocks = (size + LANES - 1) / LANES; // number of blocks + int length = 1 + blocks * LANES; // size of the backing array (1-indexed) + + this.minValue = values[0]; + this.values = new long[length]; + build(values, 0, size, this.values, 1); + } + + /** + * Builds the B-tree memory layout. + * It builds the tree recursively, following an in-order traversal. + * + * <p> + * Each block stores 'lanes' values at indices {@code i, i + 1, ..., i + lanes - 1} where {@code i} is the + * starting offset. The starting offset of the root block is 1. The branching factor is (1 + lanes) so each + * block can have these many children. Given the starting offset {@code i} of a block, the starting offset + * of its k-th child (ranging from {@code 0, 1, ..., k}) can be computed as {@code i + ((i + k) << shift)}. + * + * @param src is the sorted input array + * @param i is the index in the input array to read the value from + * @param size the number of values in the input array + * @param dst is the output array + * @param j is the index in the output array to write the value to + * @return the next index 'i' + */ + private static int build(long[] src, int i, int size, long[] dst, int j) { + if (j < dst.length) { + for (int k = 0; k < LANES; k++) { + i = build(src, i, size, dst, j + ((j + k) << SHIFT)); + + // Fills the B-tree as a complete tree, i.e., all levels are completely filled, + // except the last level which is filled from left to right. + // The trick is to fill the destination array between indices 1...size (inclusive / 1-indexed) + // and pad the remaining array with +infinity. + dst[j + k] = (j + k <= size) ? src[i++] : Long.MAX_VALUE; + } + i = build(src, i, size, dst, j + ((j + LANES) << SHIFT)); + } + return i; + } + + @Override + public long floor(long key) { + Vector<Long> keyVector = LongVector.broadcast(LONG_VECTOR_SPECIES, key); + int i = 1, result = 1; + + while (i < values.length) { + Vector<Long> valuesVector = LongVector.fromArray(LONG_VECTOR_SPECIES, values, i); + int j = i + valuesVector.compare(VectorOperators.GT, keyVector).firstTrue(); + result = (j > i) ? j : result; + i += (j << SHIFT); + } + + assert result > 1 : "key must be greater than or equal to " + minValue; + return values[result - 1]; + } + + private static int log2(int num) { + if ((num <= 0) || ((num & (num - 1)) != 0)) { + throw new IllegalArgumentException(num + " is not a positive power of 2"); + } + return 32 - Integer.numberOfLeadingZeros(num - 1); + } +} diff --git a/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..0709ed4374227 --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary or B-tree search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: <a href="https://github.com/opensearch-project/OpenSearch/pull/9727">PR #9727</a> + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + /** + * Indicates whether the vectorized (SIMD) B-tree search implementation is to be used. + * It is true when either: + * 1. The feature flag is set to "forced", or + * 2. The platform has a minimum of 4 long vector lanes and the feature flag is set to "true". + */ + private static final boolean USE_BTREE_SEARCHER; + + /** + * This class is initialized only when: + * - JDK-20+ + * - jdk.incubator.vector.LongVector is available (--add-modules=jdk.incubator.vector is passed) + */ + private static final class VectorCheck { + final static int SPECIES_PREFERRED = jdk.incubator.vector.LongVector.SPECIES_PREFERRED.length(); + } + + static { + String simdRoundingFeatureFlag = System.getProperty("opensearch.experimental.feature.simd.rounding.enabled"); + boolean useBtreeSearcher = false; + + try { + final Class<?> incubator = Class.forName("jdk.incubator.vector.LongVector"); + + useBtreeSearcher = "forced".equalsIgnoreCase(simdRoundingFeatureFlag) + || (VectorCheck.SPECIES_PREFERRED >= 4 && "true".equalsIgnoreCase(simdRoundingFeatureFlag)); + + } catch (final ClassNotFoundException ex) { + /* do not use BtreeSearcher */ + } + + USE_BTREE_SEARCHER = useBtreeSearcher; + } + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else if (USE_BTREE_SEARCHER) { + return new BtreeSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java index dcf8dd7945012..c8fdb3333a714 100644 --- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java @@ -43,6 +43,7 @@ default CompilerResult compile(String name, String... names) { return compileWithPackage(ApiAnnotationProcessorTests.class.getPackageName(), name, names); } + @SuppressWarnings("removal") default CompilerResult compileWithPackage(String pck, String name, String... names) { final JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); final DiagnosticCollector<JavaFileObject> collector = new DiagnosticCollector<>(); diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java index ae9f629c59024..ad19f456b0df4 100644 --- a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -12,15 +12,31 @@ public class RoundableTests extends OpenSearchTestCase { - public void testFloor() { - int size = randomIntBetween(1, 256); - long[] values = new long[size]; - for (int i = 1; i < values.length; i++) { - values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; - } + public void testRoundingEmptyArray() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> RoundableFactory.create(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + } + + public void testRoundingSmallArray() { + int size = randomIntBetween(1, 64); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + + assertEquals("BidirectionalLinearSearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } - Roundable[] impls = { new BinarySearcher(values, size), new BidirectionalLinearSearcher(values, size) }; + public void testRoundingLargeArray() { + int size = randomIntBetween(65, 256); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + boolean useBtreeSearcher = "forced".equalsIgnoreCase(System.getProperty("opensearch.experimental.feature.simd.rounding.enabled")); + assertEquals(useBtreeSearcher ? "BtreeSearcher" : "BinarySearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } + + private void assertRounding(Roundable roundable, long[] values, int size) { for (int i = 0; i < 100000; i++) { // Index of the expected round-down point. int idx = randomIntBetween(0, size - 1); @@ -35,23 +51,21 @@ public void testFloor() { // round-down point, which will still floor to the same value. long key = expected + (randomNonNegativeLong() % delta); - for (Roundable roundable : impls) { - assertEquals(expected, roundable.floor(key)); - } + assertEquals(expected, roundable.floor(key)); } + + Throwable throwable = assertThrows(AssertionError.class, () -> roundable.floor(values[0] - 1)); + assertEquals("key must be greater than or equal to " + values[0], throwable.getMessage()); } - public void testFailureCases() { - Throwable throwable; + private static long[] randomArrayOfSortedValues(int size) { + int capacity = size + randomInt(20); // May be slightly more than the size. + long[] values = new long[capacity]; - throwable = assertThrows(IllegalArgumentException.class, () -> new BinarySearcher(new long[0], 0)); - assertEquals("at least one value must be present", throwable.getMessage()); - throwable = assertThrows(IllegalArgumentException.class, () -> new BidirectionalLinearSearcher(new long[0], 0)); - assertEquals("at least one value must be present", throwable.getMessage()); + for (int i = 1; i < size; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } - throwable = assertThrows(AssertionError.class, () -> new BinarySearcher(new long[] { 100 }, 1).floor(50)); - assertEquals("key must be greater than or equal to 100", throwable.getMessage()); - throwable = assertThrows(AssertionError.class, () -> new BidirectionalLinearSearcher(new long[] { 100 }, 1).floor(50)); - assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + return values; } } diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.9.1.jar.sha1 b/libs/core/licenses/lucene-core-9.9.1.jar.sha1 deleted file mode 100644 index ae596196d9e6a..0000000000000 --- a/libs/core/licenses/lucene-core-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 6a92993f5dd42..66ba446d4fc54 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -98,8 +98,10 @@ public class Version implements Comparable<Version>, ToXContentFragment { public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_1); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_9_1); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index 3e996bdee83a2..ea23b3d81a775 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -56,6 +56,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.semver.SemverRange; import java.io.ByteArrayInputStream; import java.io.EOFException; @@ -750,6 +751,8 @@ public Object readGenericValue() throws IOException { return readCollection(StreamInput::readGenericValue, HashSet::new, Collections.emptySet()); case 26: return readBigInteger(); + case 27: + return readSemverRange(); default: throw new IOException("Can't read unknown type [" + type + "]"); } @@ -1090,6 +1093,10 @@ public Version readVersion() throws IOException { return Version.fromId(readVInt()); } + public SemverRange readSemverRange() throws IOException { + return SemverRange.fromString(readString()); + } + /** Reads the {@link Version} from the input stream */ public Build readBuild() throws IOException { // the following is new for opensearch: we write the distribution to support any "forks" diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index 2d69e1c686df3..b7599265aece3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -54,6 +54,7 @@ import org.opensearch.core.common.settings.SecureString; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.semver.SemverRange; import java.io.EOFException; import java.io.FileNotFoundException; @@ -784,6 +785,10 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep o.writeByte((byte) 26); o.writeString(v.toString()); }); + writers.put(SemverRange.class, (o, v) -> { + o.writeByte((byte) 27); + o.writeSemverRange((SemverRange) v); + }); WRITERS = Collections.unmodifiableMap(writers); } @@ -1101,6 +1106,10 @@ public void writeVersion(final Version version) throws IOException { writeVInt(version.id); } + public void writeSemverRange(final SemverRange range) throws IOException { + writeString(range.toString()); + } + /** Writes the OpenSearch {@link Build} informn to the output stream */ public void writeBuild(final Build build) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java new file mode 100644 index 0000000000000..da87acc7124aa --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.Version; +import org.opensearch.common.Nullable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.expr.Caret; +import org.opensearch.semver.expr.Equal; +import org.opensearch.semver.expr.Expression; +import org.opensearch.semver.expr.Tilde; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Arrays.stream; + +/** + * Represents a single semver range that allows for specifying which {@code org.opensearch.Version}s satisfy the range. + * It is composed of a range version and a range operator. Following are the supported operators: + * <ul> + * <li>'=' Requires exact match with the range version. For example, =1.2.3 range would match only 1.2.3</li> + * <li>'~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0</li> + * <li>'^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0</li> + * </ul> + */ +public class SemverRange implements ToXContentFragment { + + private final Version rangeVersion; + private final RangeOperator rangeOperator; + + public SemverRange(final Version rangeVersion, final RangeOperator rangeOperator) { + this.rangeVersion = rangeVersion; + this.rangeOperator = rangeOperator; + } + + /** + * Constructs a {@code SemverRange} from its string representation. + * @param range given range + * @return a {@code SemverRange} + */ + public static SemverRange fromString(final String range) { + RangeOperator rangeOperator = RangeOperator.fromRange(range); + String version = range.replaceFirst(rangeOperator.asEscapedString(), ""); + if (!Version.stringHasLength(version)) { + throw new IllegalArgumentException("Version cannot be empty"); + } + return new SemverRange(Version.fromString(version), rangeOperator); + } + + /** + * Return the range operator for this range. + * @return range operator + */ + public RangeOperator getRangeOperator() { + return rangeOperator; + } + + /** + * Return the version for this range. + * @return the range version + */ + public Version getRangeVersion() { + return rangeVersion; + } + + /** + * Check if range is satisfied by given version string. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + */ + public boolean isSatisfiedBy(final String versionToEvaluate) { + return isSatisfiedBy(Version.fromString(versionToEvaluate)); + } + + /** + * Check if range is satisfied by given version. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + * @see #isSatisfiedBy(String) + */ + public boolean isSatisfiedBy(final Version versionToEvaluate) { + return this.rangeOperator.expression.evaluate(this.rangeVersion, versionToEvaluate); + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SemverRange range = (SemverRange) o; + return Objects.equals(rangeVersion, range.rangeVersion) && rangeOperator == range.rangeOperator; + } + + @Override + public int hashCode() { + return Objects.hash(rangeVersion, rangeOperator); + } + + @Override + public String toString() { + return rangeOperator.asString() + rangeVersion; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder.value(toString()); + } + + /** + * A range operator. + */ + public enum RangeOperator { + + EQ("=", new Equal()), + TILDE("~", new Tilde()), + CARET("^", new Caret()), + DEFAULT("", new Equal()); + + private final String operator; + private final Expression expression; + + RangeOperator(final String operator, final Expression expression) { + this.operator = operator; + this.expression = expression; + } + + /** + * String representation of the range operator. + * + * @return range operator as string + */ + public String asString() { + return operator; + } + + /** + * Escaped string representation of the range operator, + * if operator is a regex character. + * + * @return range operator as escaped string, if operator is a regex character + */ + public String asEscapedString() { + if (Objects.equals(operator, "^")) { + return "\\^"; + } + return operator; + } + + public static RangeOperator fromRange(final String range) { + Optional<RangeOperator> rangeOperator = stream(values()).filter( + operator -> operator != DEFAULT && range.startsWith(operator.asString()) + ).findFirst(); + return rangeOperator.orElse(DEFAULT); + } + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java new file mode 100644 index 0000000000000..ce2b74dde0865 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing for minor and patch version variability. + */ +public class Caret implements Expression { + + /** + * Checks if the given version is compatible with the range version allowing for minor and + * patch version variability. + * Allows all versions starting from the rangeVersion upto next major version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString((rangeVersion.major + 1) + ".0.0"); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java new file mode 100644 index 0000000000000..d3e1d63060b77 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate equality of versions. + */ +public class Equal implements Expression { + + /** + * Checks if a given version matches a certain range version. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are equal {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + return versionToEvaluate.equals(rangeVersion); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java new file mode 100644 index 0000000000000..68bb4e249836a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * An evaluation expression. + */ +public interface Expression { + + /** + * Evaluates an expression. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return the result of the expression evaluation + */ + boolean evaluate(final Version rangeVersion, final Version versionToEvaluate); +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java new file mode 100644 index 0000000000000..5f62ffe62ddeb --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing patch version variability. + */ +public class Tilde implements Expression { + + /** + * Checks if the given version is compatible with a range version allowing for patch version variability. + * Allows all versions starting from the rangeVersion upto next minor version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString(rangeVersion.major + "." + (rangeVersion.minor + 1) + "." + 0); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java new file mode 100644 index 0000000000000..06cf9feaaaf8f --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Expressions library module */ +package org.opensearch.semver.expr; diff --git a/server/src/main/java/org/opensearch/common/cache/tier/package-info.java b/libs/core/src/main/java/org/opensearch/semver/package-info.java similarity index 70% rename from server/src/main/java/org/opensearch/common/cache/tier/package-info.java rename to libs/core/src/main/java/org/opensearch/semver/package-info.java index 7ad81dbe3073c..ada935582d408 100644 --- a/server/src/main/java/org/opensearch/common/cache/tier/package-info.java +++ b/libs/core/src/main/java/org/opensearch/semver/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Base package for cache tier support. */ -package org.opensearch.common.cache.tier; +/** Semver library module */ +package org.opensearch.semver; diff --git a/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java new file mode 100644 index 0000000000000..af1d95b2561b7 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.test.OpenSearchTestCase; + +public class SemverRangeTests extends OpenSearchTestCase { + + public void testRangeWithEqualsOperator() { + SemverRange range = SemverRange.fromString("=1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.EQ); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithDefaultOperator() { + SemverRange range = SemverRange.fromString("1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.DEFAULT); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithTildeOperator() { + SemverRange range = SemverRange.fromString("~2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.TILDE); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.3.12")); + + assertFalse(range.isSatisfiedBy("2.3.0")); + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("2.4.0")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testRangeWithCaretOperator() { + SemverRange range = SemverRange.fromString("^2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.CARET); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.4.12")); + + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testInvalidRanges() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + expectThrows(NumberFormatException.class, () -> SemverRange.fromString("$1.2.3")); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java new file mode 100644 index 0000000000000..3cb168d42cda0 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class CaretTests extends OpenSearchTestCase { + + public void testMinorAndPatchVersionVariability() { + Caret caretExpr = new Caret(); + Version rangeVersion = Version.fromString("1.2.3"); + + // Compatible versions + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.3.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.9.9"))); + + // Incompatible versions + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java new file mode 100644 index 0000000000000..fb090865157ed --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class EqualTests extends OpenSearchTestCase { + + public void testEquality() { + Equal equalExpr = new Equal(); + Version rangeVersion = Version.fromString("1.2.3"); + assertTrue(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertFalse(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java new file mode 100644 index 0000000000000..8666611645c3a --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class TildeTests extends OpenSearchTestCase { + + public void testPatchVersionVariability() { + Tilde tildeExpr = new Tilde(); + Version rangeVersion = Version.fromString("1.2.3"); + + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.9"))); + + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.3.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java index 898ce7e4e913b..ab48cc2357e7f 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java @@ -129,6 +129,7 @@ private void configureSocket(ServerSocket socket) throws IOException { socket.setReuseAddress(config.tcpReuseAddress()); } + @SuppressWarnings("removal") protected static SocketChannel accept(ServerSocketChannel serverSocketChannel) throws IOException { try { assert serverSocketChannel.isBlocking() == false; diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 3df8e42fe4f14..530aa1d86afc7 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -388,6 +388,7 @@ private void configureSocket(Socket socket, boolean isConnectComplete) throws IO } } + @SuppressWarnings("removal") private static void connect(SocketChannel socketChannel, InetSocketAddress remoteAddress) throws IOException { try { AccessController.doPrivileged((PrivilegedExceptionAction<Boolean>) () -> socketChannel.connect(remoteAddress)); diff --git a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java index 4a200a5dfa9bd..969fa91b50538 100644 --- a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java +++ b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java @@ -65,6 +65,7 @@ protected Class<?> findClass(String name) throws ClassNotFoundException { /** * Return a new classloader across the parent and extended loaders. */ + @SuppressWarnings("removal") public static ExtendedPluginsClassLoader create(ClassLoader parent, List<ClassLoader> extendedLoaders) { return AccessController.doPrivileged( (PrivilegedAction<ExtendedPluginsClassLoader>) () -> new ExtendedPluginsClassLoader(parent, extendedLoaders) diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java index f41c49844997d..a2531f4a9156e 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java @@ -76,6 +76,7 @@ * @see <a href="http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html"> * http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html</a> */ +@SuppressWarnings("removal") public class SecureSM extends SecurityManager { private final String[] classesThatCanExit; diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java index fe239fea8129e..3c8e78a902fcb 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java @@ -18,6 +18,7 @@ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory; import java.util.concurrent.ForkJoinWorkerThread; +@SuppressWarnings("removal") public class SecuredForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory { static AccessControlContext contextWithPermissions(Permission... perms) { Permissions permissions = new Permissions(); diff --git a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java index 026ffb080ee61..fd666c70cfebb 100644 --- a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java +++ b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java @@ -41,6 +41,7 @@ import junit.framework.TestCase; /** Simple tests for SecureSM */ +@SuppressWarnings("removal") public class SecureSMTests extends TestCase { static { // install a mock security policy: diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java index d57def9406b17..f38fdd6412d79 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -34,6 +34,11 @@ public Counter createUpDownCounter(String name, String description, String unit) return metricsTelemetry.createUpDownCounter(name, description, unit); } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return metricsTelemetry.createHistogram(name, description, unit); + } + @Override public void close() throws IOException { metricsTelemetry.close(); diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java new file mode 100644 index 0000000000000..95ada626e21ee --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Histogram records the value for an existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Histogram { + + /** + * record value. + * @param value value to be added. + */ + void record(double value); + + /** + * record value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void record(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java index 61b3df089928b..94d19bda31f34 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -36,4 +36,15 @@ public interface MetricsRegistry extends Closeable { * @return counter. */ Counter createUpDownCounter(String name, String description, String unit); + + /** + * Creates the histogram type of Metric. Implementation framework will take care + * of the bucketing strategy. + * + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram. + */ + Histogram createHistogram(String name, String description, String unit); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java new file mode 100644 index 0000000000000..20e72bccad899 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Histogram} + * {@opensearch.internal} + */ +@InternalApi +public class NoopHistogram implements Histogram { + + /** + * No-op Histogram instance + */ + public final static NoopHistogram INSTANCE = new NoopHistogram(); + + private NoopHistogram() {} + + @Override + public void record(double value) { + + } + + @Override + public void record(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java index 640c6842a8960..d3dda68cfae71 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -10,6 +10,7 @@ import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsRegistry; import java.io.IOException; @@ -38,6 +39,11 @@ public Counter createUpDownCounter(String name, String description, String unit) return NoopCounter.INSTANCE; } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + @Override public void close() throws IOException { diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java index 6171641db5f07..02f126075845b 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -48,4 +48,15 @@ public void testUpDownCounter() { assertSame(mockCounter, counter); } + public void testHistogram() { + Histogram mockHistogram = mock(Histogram.class); + when(defaultMeterRegistry.createHistogram(any(String.class), any(String.class), any(String.class))).thenReturn(mockHistogram); + Histogram histogram = defaultMeterRegistry.createHistogram( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testHistogram", + "test up-down counter", + "ms" + ); + assertSame(mockHistogram, histogram); + } + } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java index 4c05f0058f2ed..2f4dada29780d 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java @@ -19,6 +19,7 @@ */ @InternalApi public interface XContentContraints { + final String DEFAULT_CODEPOINT_LIMIT_PROPERTY = "opensearch.xcontent.codepoint.max"; final String DEFAULT_MAX_STRING_LEN_PROPERTY = "opensearch.xcontent.string.length.max"; final String DEFAULT_MAX_NAME_LEN_PROPERTY = "opensearch.xcontent.name.length.max"; final String DEFAULT_MAX_DEPTH_PROPERTY = "opensearch.xcontent.depth.max"; @@ -32,4 +33,6 @@ public interface XContentContraints { final int DEFAULT_MAX_DEPTH = Integer.parseInt( System.getProperty(DEFAULT_MAX_DEPTH_PROPERTY, "1000" /* StreamReadConstraints.DEFAULT_MAX_DEPTH */) ); + + final int DEFAULT_CODEPOINT_LIMIT = Integer.parseInt(System.getProperty(DEFAULT_CODEPOINT_LIMIT_PROPERTY, "52428800" /* ~50 Mb */)); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java index 3f6a4b3aeead7..0e69c6c33b923 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java @@ -38,6 +38,7 @@ import com.fasterxml.jackson.core.StreamReadFeature; import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactoryBuilder; import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; @@ -55,6 +56,8 @@ import java.io.Reader; import java.util.Set; +import org.yaml.snakeyaml.LoaderOptions; + /** * A YAML based content implementation using Jackson. */ @@ -67,7 +70,9 @@ public static XContentBuilder contentBuilder() throws IOException { public static final YamlXContent yamlXContent; static { - yamlFactory = new YAMLFactory(); + final LoaderOptions loaderOptions = new LoaderOptions(); + loaderOptions.setCodePointLimit(DEFAULT_CODEPOINT_LIMIT); + yamlFactory = new YAMLFactoryBuilder(new YAMLFactory()).loaderOptions(loaderOptions).build(); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); yamlFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); yamlFactory.setStreamReadConstraints( diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 0e431d8ea4277..81a2b0e290121 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -85,7 +85,8 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_STRING_LEN), /* YAML parser limitation */ XContentType.YAML, - () -> randomAlphaOfLengthBetween(1, 3140000) + /* use 75% of the limit, difficult to get the exact size of the content right */ + () -> randomRealisticUnicodeOfCodepointLengthBetween(1, (int) (YamlXContent.DEFAULT_CODEPOINT_LIMIT * 0.75)) ); private static final Map<XContentType, Supplier<String>> OFF_LIMIT_GENERATORS = Map.of( @@ -97,7 +98,7 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLength(SmileXContent.DEFAULT_MAX_STRING_LEN + 1), /* YAML parser limitation */ XContentType.YAML, - () -> randomRealisticUnicodeOfCodepointLength(3145730) + () -> randomRealisticUnicodeOfCodepointLength(YamlXContent.DEFAULT_CODEPOINT_LIMIT + 1) ); private static final Map<XContentType, Supplier<String>> FIELD_NAME_GENERATORS = Map.of( diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 71af708f2e1dc..648536f9136a8 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -36,10 +36,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.Operator; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,10 +48,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class QueryStringWithAnalyzersIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryStringWithAnalyzersIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public QueryStringWithAnalyzersIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryStringWithAnalyzersIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 26f4acb2b1e6a..e55c1c69b2e40 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -44,7 +43,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -68,10 +67,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class HighlighterWithAnalyzersTests extends ParameterizedOpenSearchIntegTestCase { +public class HighlighterWithAnalyzersTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public HighlighterWithAnalyzersTests(Settings dynamicSettings) { - super(dynamicSettings); + public HighlighterWithAnalyzersTests(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -82,11 +81,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle new file mode 100644 index 0000000000000..c7052896e609b --- /dev/null +++ b/modules/cache-common/build.gradle @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +opensearchplugin { + description 'Module for caches which are optional and do not require additional security permission' + classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicy.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicy.java new file mode 100644 index 0000000000000..55aee6825b992 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicy.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CachePolicyInfoWrapper; +import org.opensearch.common.cache.CacheTierPolicy; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; + +import java.util.function.Function; + +/** + * A cache tier policy which accepts queries whose took time is greater than some threshold, + * which is specified as a dynamic cluster-level setting. The threshold should be set to approximately + * the time it takes to get a result from the cache tier. + * The policy expects to be able to read a CachePolicyInfoWrapper from the start of the BytesReference. + */ +public class DiskTierTookTimePolicy implements CacheTierPolicy<BytesReference> { + /*public static final Setting<TimeValue> DISK_TOOKTIME_THRESHOLD_SETTING = Setting.positiveTimeSetting( + "indices.requests.cache.disk.tooktime.threshold", + TimeValue.ZERO, + Setting.Property.Dynamic, + Setting.Property.NodeScope + );*/ // Set this to TimeValue.ZERO to let all data through + + private TimeValue threshold; + private final Function<BytesReference, CachePolicyInfoWrapper> getPolicyInfoFn; + + public DiskTierTookTimePolicy( + Settings settings, + Function<BytesReference, CachePolicyInfoWrapper> getPolicyInfoFn + ) { + this.threshold = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOKTIME_THRESHOLD.get(settings); + this.getPolicyInfoFn = getPolicyInfoFn; + } + + protected void setThreshold(TimeValue threshold) { // protected so that we can manually set value in unit test + this.threshold = threshold; + } + + @Override + public boolean checkData(BytesReference data) { + Long tookTimeNanos; + try { + tookTimeNanos = getPolicyInfoFn.apply(data).getTookTimeNanos(); + } catch (Exception e) { + // If we can't read a CachePolicyInfoWrapper from the BytesReference, reject the data + return false; + } + + if (tookTimeNanos == null) { + // If the wrapper contains null took time, reject the data + // This can happen if no CachePolicyInfoWrapper was written to the BytesReference, as the wrapper's constructor + // reads an optional long, which will end up as null in this case. This is why we should reject it. + return false; + } + + if (threshold.equals(TimeValue.ZERO)) { + // If the policy is set to zero, admit any well-formed data + return true; + } + TimeValue tookTime = TimeValue.timeValueNanos(tookTimeNanos); + if (tookTime.compareTo(threshold) < 0) { // negative -> tookTime is shorter than threshold + return false; + } + return true; + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java new file mode 100644 index 0000000000000..99791bc08ca1a --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -0,0 +1,367 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheTierPolicy; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.iterable.Iterables; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; + +/** + * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap + * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, + * then items are eventually evicted from it and removed which will result in cache miss. + * + * @param <K> Type of key + * @param <V> Type of value + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieredSpilloverCache<K, V> implements ICache<K, V> { + + private final ICache<K, V> diskCache; + private final ICache<K, V> onHeapCache; + private final RemovalListener<K, V> removalListener; + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); + ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); + /** + * Maintains caching tiers in ascending order of cache latency. + */ + private final List<ICache<K, V>> cacheList; + private final List<CacheTierPolicy<V>> policies; + + TieredSpilloverCache(Builder<K, V> builder) { + Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); + Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); + this.removalListener = Objects.requireNonNull(builder.removalListener, "Removal listener can't be null"); + + this.onHeapCache = builder.onHeapCacheFactory.create( + new CacheConfig.Builder<K, V>().setRemovalListener(new RemovalListener<K, V>() { + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + try (ReleasableLock ignore = writeLock.acquire()) { + if (checkPolicies(notification.getValue())) { + diskCache.put(notification.getKey(), notification.getValue()); + } + } + removalListener.onRemoval(notification); + } + }) + .setKeyType(builder.cacheConfig.getKeyType()) + .setValueType(builder.cacheConfig.getValueType()) + .setSettings(builder.cacheConfig.getSettings()) + .setWeigher(builder.cacheConfig.getWeigher()) + .build(), + builder.cacheType, + builder.cacheFactories + + ); + this.diskCache = builder.diskCacheFactory.create(builder.cacheConfig, builder.cacheType, builder.cacheFactories); + this.cacheList = Arrays.asList(onHeapCache, diskCache); + + List<CacheTierPolicy<V>> builderPolicies = builder.policies; + if (builderPolicies == null) { + builderPolicies = new ArrayList<>(); + } + this.policies = builderPolicies; + } + + // Package private for testing + ICache<K, V> getOnHeapCache() { + return onHeapCache; + } + + // Package private for testing + ICache<K, V> getDiskCache() { + return diskCache; + } + + @Override + public V get(K key) { + return getValueFromTieredCache().apply(key); + } + + @Override + public void put(K key, V value) { + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + } + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + + V cacheValue = getValueFromTieredCache().apply(key); + if (cacheValue == null) { + // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. + // This is needed as there can be many requests for the same key at the same time and we only want to load + // the value once. + V value = null; + try (ReleasableLock ignore = writeLock.acquire()) { + value = onHeapCache.computeIfAbsent(key, loader); + } + return value; + } + return cacheValue; + } + + @Override + public void invalidate(K key) { + // We are trying to invalidate the key from all caches though it would be present in only of them. + // Doing this as we don't know where it is located. We could do a get from both and check that, but what will + // also trigger a hit/miss listener event, so ignoring it for now. + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidate(key); + } + } + } + + @Override + public void invalidateAll() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidateAll(); + } + } + } + + /** + * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. + * @return An iterable over (onHeap + disk) keys + */ + @SuppressWarnings("unchecked") + @Override + public Iterable<K> keys() { + return Iterables.concat(onHeapCache.keys(), diskCache.keys()); + } + + @Override + public long count() { + long count = 0; + for (ICache<K, V> cache : cacheList) { + count += cache.count(); + } + return count; + } + + @Override + public void refresh() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.refresh(); + } + } + } + + @Override + public void close() throws IOException { + for (ICache<K, V> cache : cacheList) { + cache.close(); + } + } + + private Function<K, V> getValueFromTieredCache() { + return key -> { + try (ReleasableLock ignore = readLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + V value = cache.get(key); + if (value != null) { + // update hit stats + return value; + } else { + // update miss stats + } + } + } + return null; + }; + } + + boolean checkPolicies(V value) { + for (CacheTierPolicy<V> policy : policies) { + if (!policy.checkData(value)) { + return false; + } + } + return true; + } + + /** + * Factory to create TieredSpilloverCache objects. + */ + public static class TieredSpilloverCacheFactory implements ICache.Factory { + + /** + * Defines cache name + */ + public static final String TIERED_SPILLOVER_CACHE_NAME = "tiered_spillover"; + + /** + * Default constructor + */ + public TieredSpilloverCacheFactory() {} + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Settings settings = config.getSettings(); + Setting<String> onHeapSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String onHeapCacheStoreName = onHeapSetting.get(settings); + if (!cacheFactories.containsKey(onHeapCacheStoreName)) { + throw new IllegalArgumentException( + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory onHeapCacheFactory = cacheFactories.get(onHeapCacheStoreName); + + Setting<String> onDiskSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String diskCacheStoreName = onDiskSetting.get(settings); + if (!cacheFactories.containsKey(diskCacheStoreName)) { + throw new IllegalArgumentException( + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); + return new Builder<K, V>().setDiskCacheFactory(diskCacheFactory) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setRemovalListener(config.getRemovalListener()) + .setCacheConfig(config) + .setCacheType(cacheType) + //.setPolicy(new DiskTierTookTimePolicy(settings)) + .build(); + } + + @Override + public String getCacheName() { + return TIERED_SPILLOVER_CACHE_NAME; + } + } + + /** + * Builder object for tiered spillover cache. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> { + private ICache.Factory onHeapCacheFactory; + private ICache.Factory diskCacheFactory; + private RemovalListener<K, V> removalListener; + private CacheConfig<K, V> cacheConfig; + private CacheType cacheType; + private Map<String, ICache.Factory> cacheFactories; + private final ArrayList<CacheTierPolicy<V>> policies = new ArrayList<>(); + + /** + * Default constructor + */ + public Builder() {} + + /** + * Set onHeap cache factory + * @param onHeapCacheFactory Factory for onHeap cache. + * @return builder + */ + public Builder<K, V> setOnHeapCacheFactory(ICache.Factory onHeapCacheFactory) { + this.onHeapCacheFactory = onHeapCacheFactory; + return this; + } + + /** + * Set disk cache factory + * @param diskCacheFactory Factory for disk cache. + * @return builder + */ + public Builder<K, V> setDiskCacheFactory(ICache.Factory diskCacheFactory) { + this.diskCacheFactory = diskCacheFactory; + return this; + } + + /** + * Set removal listener for tiered cache. + * @param removalListener Removal listener + * @return builder + */ + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + /** + * Set cache config. + * @param cacheConfig cache config. + * @return builder + */ + public Builder<K, V> setCacheConfig(CacheConfig<K, V> cacheConfig) { + this.cacheConfig = cacheConfig; + return this; + } + + /** + * Set cache type. + * @param cacheType Cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Set cache factories + * @param cacheFactories cache factories + * @return builder + */ + public Builder<K, V> setCacheFactories(Map<String, ICache.Factory> cacheFactories) { + this.cacheFactories = cacheFactories; + return this; + } + + public Builder<K, V> setPolicy(CacheTierPolicy<V> policy) { + this.policies.add(policy); + return this; + } + + public Builder<K, V> setPolicies(List<CacheTierPolicy<V>> policies) { + this.policies.addAll(policies); + return this; + } + + /** + * Build tiered spillover cache. + * @return TieredSpilloverCache + */ + public TieredSpilloverCache<K, V> build() { + return new TieredSpilloverCache<>(this); + } + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java new file mode 100644 index 0000000000000..132c13d76328c --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Plugin for TieredSpilloverCache. + */ +public class TieredSpilloverCachePlugin extends Plugin implements CachePlugin { + + /** + * Plugin name + */ + public static final String TIERED_CACHE_SPILLOVER_PLUGIN_NAME = "tieredSpilloverCachePlugin"; + + /** + * Default constructor + */ + public TieredSpilloverCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME, + new TieredSpilloverCache.TieredSpilloverCacheFactory() + ); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (CacheType cacheType : CacheType.values()) { + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOKTIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + } + return settingList; + } + + @Override + public String getName() { + return TIERED_CACHE_SPILLOVER_PLUGIN_NAME; + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java new file mode 100644 index 0000000000000..01e882ae1d30b --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import static org.opensearch.common.settings.Setting.Property.Dynamic; +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to TieredSpilloverCache. + */ +public class TieredSpilloverCacheSettings { + + /** + * Setting which defines the onHeap cache store to be used in TieredSpilloverCache. + * + * Pattern: {cache_type}.tiered_spillover.onheap.store.name + * Example: indices.request.cache.tiered_spillover.onheap.store.name + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_ONHEAP_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".onheap.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Setting which defines the disk cache store to be used in TieredSpilloverCache. + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_DISK_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + public static final Setting.AffixSetting<TimeValue> TIERED_SPILLOVER_DISK_TOOKTIME_THRESHOLD = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.took_time.threshold", + (key) -> Setting.timeSetting(key, TimeValue.ZERO, NodeScope, Dynamic) + ); + + /** + * Default constructor + */ + TieredSpilloverCacheSettings() {} +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java similarity index 68% rename from server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java rename to modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java index 7a4e0fa7201fd..fa2de3c14b5dc 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Package related to tiered cache enums */ -package org.opensearch.common.cache.store.enums; +/** Package related to cache tiers **/ +package org.opensearch.cache.common.tier; diff --git a/modules/cache-common/src/main/plugin-metadata/plugin-security.policy b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..12fe9f2ddb60b --- /dev/null +++ b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicyTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicyTests.java new file mode 100644 index 0000000000000..a7e261df7bb3d --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/DiskTierTookTimePolicyTests.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cache.common.tier; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.common.UUIDs; +import org.opensearch.common.cache.CachePolicyInfoWrapper; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.function.Function; + +public class DiskTierTookTimePolicyTests extends OpenSearchTestCase { + private final Function<BytesReference, CachePolicyInfoWrapper> transformationFunction = (data) -> { + try { + return getPolicyInfo(data); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + + private CachePolicyInfoWrapper getPolicyInfo(BytesReference data) throws IOException { + return new CachePolicyInfoWrapper(data.streamInput()); + } + + private DiskTierTookTimePolicy getTookTimePolicy() { + // dummy settings + Settings dummySettings = Settings.EMPTY; + return new DiskTierTookTimePolicy(dummySettings, transformationFunction); + } + + public void testTookTimePolicy() throws Exception { + DiskTierTookTimePolicy tookTimePolicy = getTookTimePolicy(); + + // manually set threshold for test + double threshMillis = 10; + long shortMillis = (long) (0.9 * threshMillis); + long longMillis = (long) (1.5 * threshMillis); + tookTimePolicy.setThreshold(new TimeValue((long) threshMillis)); + BytesReference shortTime = getValidPolicyInput(getQSR(), shortMillis * 1000000); + BytesReference longTime = getValidPolicyInput(getQSR(), longMillis * 1000000); + + boolean shortResult = tookTimePolicy.checkData(shortTime); + assertFalse(shortResult); + boolean longResult = tookTimePolicy.checkData(longTime); + assertTrue(longResult); + + DiskTierTookTimePolicy disabledPolicy = getTookTimePolicy(); + disabledPolicy.setThreshold(TimeValue.ZERO); + shortResult = disabledPolicy.checkData(shortTime); + assertTrue(shortResult); + longResult = disabledPolicy.checkData(longTime); + assertTrue(longResult); + } + + public void testMissingWrapper() throws Exception { + DiskTierTookTimePolicy tookTimePolicy = getTookTimePolicy(); + tookTimePolicy.setThreshold(TimeValue.ZERO); + QuerySearchResult qsr = getQSR(); + BytesStreamOutput out = new BytesStreamOutput(); + qsr.writeTo(out); + BytesReference missingWrapper = out.bytes(); + boolean allowedMissingWrapper = tookTimePolicy.checkData(missingWrapper); + assertFalse(allowedMissingWrapper); + } + + public void testNullTookTime() throws Exception { + // Null took time should always be rejected (because it might be the result of a + // BytesReference without a CachePolicyInfoWrapper in front of it) + + DiskTierTookTimePolicy zeroThreshold = getTookTimePolicy(); + zeroThreshold.setThreshold(TimeValue.ZERO); + DiskTierTookTimePolicy nonZeroThreshold = getTookTimePolicy(); + nonZeroThreshold.setThreshold(new TimeValue(10L)); + + Long nullTookTime = null; + CachePolicyInfoWrapper nullWrapper = new CachePolicyInfoWrapper(nullTookTime); + BytesStreamOutput out = new BytesStreamOutput(); + nullWrapper.writeTo(out); + QuerySearchResult qsr = getQSR(); + qsr.writeTo(out); + BytesReference data = out.bytes(); + + assertFalse(zeroThreshold.checkData(data)); + assertFalse(nonZeroThreshold.checkData(data)); + } + + public static QuerySearchResult getQSR() { + // package-private, also used by IndicesRequestCacheTests.java + // setup from QuerySearchResultTests.java + ShardId shardId = new ShardId("index", "uuid", randomInt()); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); + ShardSearchRequest shardSearchRequest = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + randomNonNegativeLong(), + null, + new String[0] + ); + ShardSearchContextId id = new ShardSearchContextId(UUIDs.base64UUID(), randomLong()); + QuerySearchResult result = new QuerySearchResult( + id, + new SearchShardTarget("node", shardId, null, OriginalIndices.NONE), + shardSearchRequest + ); + TopDocs topDocs = new TopDocs(new TotalHits(randomLongBetween(0, Long.MAX_VALUE), TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + result.topDocs(new TopDocsAndMaxScore(topDocs, randomBoolean() ? Float.NaN : randomFloat()), new DocValueFormat[0]); + + return result; + } + + private BytesReference getValidPolicyInput(QuerySearchResult qsr, long tookTimeNanos) throws IOException { + // When it's used in the cache, the policy will receive BytesReferences which have a CachePolicyInfoWrapper + // at the beginning of them, followed by the actual QSR. + CachePolicyInfoWrapper policyInfo = new CachePolicyInfoWrapper(tookTimeNanos); + BytesStreamOutput out = new BytesStreamOutput(); + policyInfo.writeTo(out); + qsr.writeTo(out); + return out.bytes(); + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java new file mode 100644 index 0000000000000..1172a48e97c6a --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class TieredSpilloverCachePluginTests extends OpenSearchTestCase { + + public void testGetCacheFactoryMap() { + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(); + Map<String, ICache.Factory> map = tieredSpilloverCachePlugin.getCacheFactoryMap(); + assertNotNull(map.get(TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME)); + assertEquals(TieredSpilloverCachePlugin.TIERED_CACHE_SPILLOVER_PLUGIN_NAME, tieredSpilloverCachePlugin.getName()); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java similarity index 51% rename from server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java rename to modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index eb75244c6f8b1..e519cc046ba8a 100644 --- a/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -6,21 +6,25 @@ * compatible open source license. */ -package org.opensearch.common.cache.tier; +package org.opensearch.cache.common.tier; +import org.opensearch.common.cache.CacheTierPolicy; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.LoadAwareCacheLoader; -import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; @@ -31,15 +35,27 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; + public class TieredSpilloverCacheTests extends OpenSearchTestCase { public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( onHeapCacheSize, randomIntBetween(1, 4), - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); int numOfItems1 = randomIntBetween(1, onHeapCacheSize / 2 - 1); @@ -51,9 +67,7 @@ public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); } - assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + assertEquals(0, removalListener.evictionsMetric.count()); // Try to hit cache again with some randomization. int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); @@ -71,28 +85,56 @@ public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception cacheMiss++; } } - assertEquals(cacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(numOfItems1 + cacheMiss, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + assertEquals(0, removalListener.evictionsMetric.count()); } - public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { + public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(60, 100); int totalSize = onHeapCacheSize + diskCacheSize; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - StoreAwareCacheBuilder<String, String> cacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>().setMaximumWeightInBytes( - onHeapCacheSize * 50 - ).setWeigher((k, v) -> 50); // Will support onHeapCacheSize entries. + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the desired settings needed to create a TieredSpilloverCache object with INDICES_REQUEST_CACHE cacheType. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockOnDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diskCacheSize) - .setDeliberateDelay(0); + ICache<String, String> tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockOnDiskCache.MockDiskCacheFactory.NAME, + new MockOnDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ); - TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(cacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) - .build(); + TieredSpilloverCache<String, String> tieredSpilloverCache = (TieredSpilloverCache<String, String>) tieredSpilloverICache; // Put values in cache more than it's size and cause evictions from onHeap. int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); @@ -103,22 +145,158 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); } - long actualDiskCacheSize = tieredSpilloverCache.getOnDiskCache().get().count(); - assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + long actualDiskCacheSize = tieredSpilloverCache.getDiskCache().count(); + assertEquals(actualDiskCacheSize, removalListener.evictionsMetric.count()); // Evictions from onHeap equal to + // disk cache size. + + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); + tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); + + assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); + assertEquals(tieredSpilloverCache.getDiskCache().count(), diskTierKeys.size()); + } + public void testWithFactoryCreationWithOnHeapCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockOnDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockOnDiskCache.MockDiskCacheFactory.NAME, + new MockOnDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); assertEquals( - eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count(), - eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count() + ex.getMessage(), + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE ); - assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); + } + + public void testWithFactoryCreationWithDiskCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockOnDiskCache.MockDiskCacheFactory.NAME, + new MockOnDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); + assertEquals( + ex.getMessage(), + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE + ); + } + + public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build() + ) + .build(); + + ICache.Factory mockDiskCacheFactory = new MockOnDiskCache.MockDiskCacheFactory(0, diskCacheSize); + + TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig) + .setRemovalListener(removalListener) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .build(); + + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + long actualDiskCacheSize = tieredSpilloverCache.getDiskCache().count(); + assertEquals(actualDiskCacheSize, removalListener.evictionsMetric.count()); // Evictions from onHeap equal to + // disk cache size. tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); - tieredSpilloverCache.getOnDiskCache().get().keys().forEach(diskTierKeys::add); + tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); - assertEquals(tieredSpilloverCache.getOnDiskCache().get().count(), diskTierKeys.size()); + assertEquals(tieredSpilloverCache.getDiskCache().count(), diskTierKeys.size()); // Try to hit cache again with some randomization. int numOfItems2 = randomIntBetween(50, 200); @@ -146,23 +324,27 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); cacheMiss++; } - // On heap cache misses would also include diskCacheHits as it means it missed onHeap cache. - assertEquals(numOfItems1 + cacheMiss + diskCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(onHeapCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(cacheMiss + numOfItems1, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); - assertEquals(diskCacheHit, eventListener.enumMap.get(CacheStoreType.DISK).hitCount.count()); } public void testComputeIfAbsentWithEvictionsFromBothTier() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( onHeapCacheSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); @@ -171,20 +353,28 @@ public void testComputeIfAbsentWithEvictionsFromBothTier() throws Exception { LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); } - assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); - assertTrue(eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count() > 0); + assertTrue(removalListener.evictionsMetric.count() > 0); } public void testGetAndCount() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; int totalSize = onHeapCacheSize + diskCacheSize; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( onHeapCacheSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); @@ -219,57 +409,51 @@ public void testGetAndCount() throws Exception { assertEquals(numOfItems1, tieredSpilloverCache.count()); } - public void testWithDiskTierNull() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - - StoreAwareCacheBuilder<String, String> onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>() - .setMaximumWeightInBytes(onHeapCacheSize * 20) - .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries - TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(onHeapCacheBuilder) - .setListener(eventListener) - .build(); - - int numOfItems = randomIntBetween(onHeapCacheSize + 1, onHeapCacheSize * 3); - for (int iter = 0; iter < numOfItems; iter++) { - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), loadAwareCacheLoader); - } - assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); - } - public void testPut() { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( onHeapCacheSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); String key = UUID.randomUUID().toString(); String value = UUID.randomUUID().toString(); tieredSpilloverCache.put(key, value); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).cachedCount.count()); assertEquals(1, tieredSpilloverCache.count()); } public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { int onHeapCacheSize = randomIntBetween(200, 400); int diskCacheSize = randomIntBetween(450, 800); + int keyValueSize = 50; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, + keyValueSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + (onHeapCacheSize * keyValueSize) + "b" + ) + .build(), 0 ); @@ -281,14 +465,14 @@ public boolean isLoaded() { } @Override - public String load(String key) throws Exception { + public String load(String key) { return UUID.randomUUID().toString(); } }); } assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); - assertEquals(0, tieredSpilloverCache.getOnDiskCache().get().count()); + assertEquals(0, tieredSpilloverCache.getDiskCache().count()); // Again try to put OnHeap cache capacity amount of new items. List<String> newKeyList = new ArrayList<>(); @@ -318,32 +502,38 @@ public String load(String key) { for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); } - assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); - assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnDiskCache().get().count()); + assertEquals(onHeapCacheSize, tieredSpilloverCache.getDiskCache().count()); } public void testInvalidate() { int onHeapCacheSize = 1; int diskCacheSize = 10; + int keyValueSize = 20; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( onHeapCacheSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); String key = UUID.randomUUID().toString(); String value = UUID.randomUUID().toString(); // First try to invalidate without the key present in cache. tieredSpilloverCache.invalidate(key); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); // Now try to invalidate with the key present in onHeap cache. tieredSpilloverCache.put(key, value); tieredSpilloverCache.invalidate(key); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); assertEquals(0, tieredSpilloverCache.count()); tieredSpilloverCache.put(key, value); @@ -353,20 +543,27 @@ public void testInvalidate() { assertEquals(2, tieredSpilloverCache.count()); // Again invalidate older key tieredSpilloverCache.invalidate(key); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.DISK).invalidationMetric.count()); assertEquals(1, tieredSpilloverCache.count()); } public void testCacheKeys() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(60, 100); - int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, + keyValueSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); List<String> onHeapKeys = new ArrayList<>(); @@ -388,7 +585,7 @@ public void testCacheKeys() throws Exception { List<String> actualOnHeapKeys = new ArrayList<>(); List<String> actualOnDiskKeys = new ArrayList<>(); Iterable<String> onHeapiterable = tieredSpilloverCache.getOnHeapCache().keys(); - Iterable<String> onDiskiterable = tieredSpilloverCache.getOnDiskCache().get().keys(); + Iterable<String> onDiskiterable = tieredSpilloverCache.getDiskCache().keys(); onHeapiterable.iterator().forEachRemaining(actualOnHeapKeys::add); onDiskiterable.iterator().forEachRemaining(actualOnDiskKeys::add); for (String onHeapKey : onHeapKeys) { @@ -414,14 +611,14 @@ public void testCacheKeys() throws Exception { } public void testRefresh() { - int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(60, 100); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, + 50, diskCacheSize, - eventListener, + removalListener, + Settings.EMPTY, 0 ); tieredSpilloverCache.refresh(); @@ -430,13 +627,22 @@ public void testRefresh() { public void testInvalidateAll() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(60, 100); + int keyValueSize = 50; int totalSize = onHeapCacheSize + diskCacheSize; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, + keyValueSize, diskCacheSize, - eventListener, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), 0 ); // Put values in cache more than it's size and cause evictions from onHeap. @@ -462,13 +668,23 @@ public void testInvalidateAll() throws Exception { public void testComputeIfAbsentConcurrently() throws Exception { int onHeapCacheSize = randomIntBetween(100, 300); int diskCacheSize = randomIntBetween(200, 400); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + Settings settings = Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, + keyValueSize, diskCacheSize, - eventListener, + removalListener, + settings, 0 ); @@ -485,7 +701,7 @@ public void testComputeIfAbsentConcurrently() throws Exception { for (int i = 0; i < numberOfSameKeys; i++) { threads[i] = new Thread(() -> { try { - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader() { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { boolean isLoaded = false; @Override @@ -494,7 +710,7 @@ public boolean isLoaded() { } @Override - public Object load(Object key) throws Exception { + public String load(String key) { isLoaded = true; return value; } @@ -525,19 +741,31 @@ public Object load(Object key) throws Exception { public void testConcurrencyForEvictionFlow() throws Exception { int diskCacheSize = randomIntBetween(450, 800); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - - StoreAwareCacheBuilder<String, String> cacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>().setMaximumWeightInBytes( - 200 - ).setWeigher((k, v) -> 150); - - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diskCacheSize) - .setDeliberateDelay(500); - + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + ICache.Factory diskCacheFactory = new MockOnDiskCache.MockDiskCacheFactory(500, diskCacheSize); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> 150) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + 200 + "b" + ) + .build() + ) + .build(); TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(cacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(diskCacheFactory) + .setRemovalListener(removalListener) + .setCacheConfig(cacheConfig) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) .build(); String keyToBeEvicted = "key1"; @@ -572,7 +800,7 @@ public String load(String key) { assertBusy(() -> { assertTrue(loadAwareCacheLoader.isLoaded()); }, 100, TimeUnit.MILLISECONDS); // We wait for new key to be loaded // after which it eviction flow is // guaranteed to occur. - StoreAwareCache<String, String> onDiskCache = tieredSpilloverCache.getOnDiskCache().get(); + ICache<String, String> onDiskCache = tieredSpilloverCache.getDiskCache(); // Now on a different thread, try to get key(above one which got evicted) from tiered cache. We expect this // should return not null value as it should be present on diskCache. @@ -589,57 +817,107 @@ public String load(String key) { countDownLatch.await(); assertNotNull(actualValue.get()); countDownLatch1.await(); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + assertEquals(1, removalListener.evictionsMetric.count()); assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); assertEquals(1, onDiskCache.count()); assertNotNull(onDiskCache.get(keyToBeEvicted)); } - class MockCacheEventListener<K, V> implements StoreAwareCacheEventListener<K, V> { + class MockCacheRemovalListener<K, V> implements RemovalListener<K, V> { + final CounterMetric evictionsMetric = new CounterMetric(); - EnumMap<CacheStoreType, TestStatsHolder> enumMap = new EnumMap<>(CacheStoreType.class); + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionsMetric.inc(); + } + } + + public void testDiskTierPolicies() throws Exception { + // For policy function, allow if what it receives starts with "a" and string is even length + ArrayList<CacheTierPolicy<String>> policies = new ArrayList<>(); + policies.add(new AllowFirstLetterA()); + policies.add(new AllowEvenLengths()); + + int onHeapCacheSize = 0; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + 100, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * 50 + "b" + ) + .build(), + 0, + policies + ); - MockCacheEventListener() { - for (CacheStoreType cacheStoreType : CacheStoreType.values()) { - enumMap.put(cacheStoreType, new TestStatsHolder()); + Map<String, String> keyValuePairs = new HashMap<>(); + Map<String, Boolean> expectedOutputs = new HashMap<>(); + keyValuePairs.put("key1", "abcd"); + expectedOutputs.put("key1", true); + keyValuePairs.put("key2", "abcde"); + expectedOutputs.put("key2", false); + keyValuePairs.put("key3", "bbc"); + expectedOutputs.put("key3", false); + keyValuePairs.put("key4", "ab"); + expectedOutputs.put("key4", true); + keyValuePairs.put("key5", ""); + expectedOutputs.put("key5", false); + + LoadAwareCacheLoader<String, String> loader = new LoadAwareCacheLoader<String, String>() { + boolean isLoaded = false; + @Override + public boolean isLoaded() { + return isLoaded; } - } - @Override - public void onMiss(K key, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).missCount.inc(); - } + @Override + public String load(String key) throws Exception { + isLoaded = true; + return keyValuePairs.get(key); + } + }; - @Override - public void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification) { - if (notification.getRemovalReason().equals(RemovalReason.EVICTED)) { - enumMap.get(notification.getCacheStoreType()).evictionsMetric.inc(); - } else if (notification.getRemovalReason().equals(RemovalReason.INVALIDATED)) { - enumMap.get(notification.getCacheStoreType()).invalidationMetric.inc(); + for (String key : keyValuePairs.keySet()) { + Boolean expectedOutput = expectedOutputs.get(key); + String value = tieredSpilloverCache.computeIfAbsent(key, loader); + assertEquals(keyValuePairs.get(key), value); + String result = tieredSpilloverCache.get(key); + if (expectedOutput) { + // Should retrieve from disk tier if it was accepted + assertEquals(keyValuePairs.get(key), result); + } else { + // Should miss as heap tier size = 0 and the policy rejected it + assertNull(result); } } + } + private static class AllowFirstLetterA implements CacheTierPolicy<String> { @Override - public void onHit(K key, V value, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).hitCount.inc(); + public boolean checkData(String data) { + try { + return (data.charAt(0) == 'a'); + } catch (StringIndexOutOfBoundsException e) { + return false; + } } + } + private static class AllowEvenLengths implements CacheTierPolicy<String> { @Override - public void onCached(K key, V value, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).cachedCount.inc(); - } - - class TestStatsHolder { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric hitCount = new CounterMetric(); - final CounterMetric missCount = new CounterMetric(); - final CounterMetric cachedCount = new CounterMetric(); - final CounterMetric invalidationMetric = new CounterMetric(); + public boolean checkData(String data) { + return data.length() % 2 == 0; } } private LoadAwareCacheLoader<String, String> getLoadAwareCacheLoader() { - return new LoadAwareCacheLoader<String, String>() { + return new LoadAwareCacheLoader<>() { boolean isLoaded = false; @Override @@ -656,34 +934,55 @@ public boolean isLoaded() { } private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( - int onHeapCacheSize, - int diksCacheSize, - StoreAwareCacheEventListener<String, String> eventListener, + int keyValueSize, + int diskCacheSize, + RemovalListener<String, String> removalListener, + Settings settings, long diskDeliberateDelay + ) { - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diksCacheSize) - .setDeliberateDelay(diskDeliberateDelay); - StoreAwareCacheBuilder<String, String> onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>() - .setMaximumWeightInBytes(onHeapCacheSize * 20) - .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries - return new TieredSpilloverCache.Builder<String, String>().setOnHeapCacheBuilder(onHeapCacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) + return intializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null); + } + + private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener<String, String> removalListener, + Settings settings, + long diskDeliberateDelay, + List<CacheTierPolicy<String>> policies + ) { + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) .build(); + + ICache.Factory mockDiskCacheFactory = new MockOnDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize); + + TieredSpilloverCache.Builder<String, String> builder = new TieredSpilloverCache.Builder<String, String>() + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setRemovalListener(removalListener) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig); + if (policies != null) { + builder.setPolicies(policies); + } + return builder.build(); } } -class MockOnDiskCache<K, V> implements StoreAwareCache<K, V> { +class MockOnDiskCache<K, V> implements ICache<K, V> { Map<K, V> cache; int maxSize; - long delay; - StoreAwareCacheEventListener<K, V> eventListener; - MockOnDiskCache(int maxSize, StoreAwareCacheEventListener<K, V> eventListener, long delay) { + MockOnDiskCache(int maxSize, long delay) { this.maxSize = maxSize; - this.eventListener = eventListener; this.delay = delay; this.cache = new ConcurrentHashMap<K, V>(); } @@ -691,18 +990,12 @@ class MockOnDiskCache<K, V> implements StoreAwareCache<K, V> { @Override public V get(K key) { V value = cache.get(key); - if (value != null) { - eventListener.onHit(key, value, CacheStoreType.DISK); - } else { - eventListener.onMiss(key, CacheStoreType.DISK); - } return value; } @Override public void put(K key, V value) { if (this.cache.size() >= maxSize) { // For simplification - eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, value, RemovalReason.EVICTED, CacheStoreType.DISK)); return; } try { @@ -711,11 +1004,10 @@ public void put(K key, V value) { throw new RuntimeException(e); } this.cache.put(key, value); - eventListener.onCached(key, value, CacheStoreType.DISK); } @Override - public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) { V value = cache.computeIfAbsent(key, key1 -> { try { return loader.load(key); @@ -723,20 +1015,11 @@ public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Except throw new RuntimeException(e); } }); - if (!loader.isLoaded()) { - eventListener.onHit(key, value, CacheStoreType.DISK); - } else { - eventListener.onMiss(key, CacheStoreType.DISK); - eventListener.onCached(key, value, CacheStoreType.DISK); - } return value; } @Override public void invalidate(K key) { - if (this.cache.containsKey(key)) { - eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, null, RemovalReason.INVALIDATED, CacheStoreType.DISK)); - } this.cache.remove(key); } @@ -759,18 +1042,40 @@ public long count() { public void refresh() {} @Override - public CacheStoreType getTierType() { - return CacheStoreType.DISK; + public void close() { + + } + + public static class MockDiskCacheFactory implements Factory { + + static final String NAME = "mockDiskCache"; + final long delay; + final int maxSize; + + MockDiskCacheFactory(long delay, int maxSize) { + this.delay = delay; + this.maxSize = maxSize; + } + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + return new Builder<K, V>().setMaxSize(maxSize).setDeliberateDelay(delay).build(); + } + + @Override + public String getCacheName() { + return NAME; + } } - public static class Builder<K, V> extends StoreAwareCacheBuilder<K, V> { + public static class Builder<K, V> extends ICacheBuilder<K, V> { int maxSize; long delay; @Override - public StoreAwareCache<K, V> build() { - return new MockOnDiskCache<K, V>(maxSize, this.getEventListener(), delay); + public ICache<K, V> build() { + return new MockOnDiskCache<K, V>(this.maxSize, this.delay); } public Builder<K, V> setMaxSize(int maxSize) { diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java index c38b29502e282..6afd5c4ca75c1 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -11,12 +11,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.StandardValidator; import org.opensearch.geometry.utils.WellKnownText; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import java.util.Arrays; @@ -29,14 +28,14 @@ * This is the base class for all the Geo related integration tests. Use this class to add the features and settings * for the test cluster on which integration tests are running. */ -public abstract class GeoModulePluginIntegTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class GeoModulePluginIntegTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final double GEOHASH_TOLERANCE = 1E-5D; protected static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); - public GeoModulePluginIntegTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public GeoModulePluginIntegTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -47,11 +46,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this * geo plugin is not getting packaged in a zip, we need to load it before the tests run. diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java index 7344903fd5220..9e7ce0d3c7980 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -44,8 +44,8 @@ public class MissingValueIT extends GeoModulePluginIntegTestCase { private GeoPoint bottomRight; private GeoPoint topLeft; - public MissingValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public MissingValueIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java new file mode 100644 index 0000000000000..c968fb2f6c2da --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java @@ -0,0 +1,647 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.hash.MessageDigests; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that generating community id flow hash for the network flow tuples, the algorithm is defined in + * <a href="https://github.com/corelight/community-id-spec">Community ID Flow Hashing</a>. + */ +public class CommunityIdProcessor extends AbstractProcessor { + public static final String TYPE = "community_id"; + // the version of the community id flow hashing algorithm + private static final String COMMUNITY_ID_HASH_VERSION = "1"; + // 0 byte for padding + private static final byte PADDING_BYTE = 0; + // the maximum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MAX_NUMBER = 255; + // the minimum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MIN_NUMBER = 0; + // the minimum seed for generating hash + private static final int MIN_SEED = 0; + // the maximum seed for generating hash + private static final int MAX_SEED = 65535; + // the minimum port number in transport layer + private static final int MIN_PORT = 0; + // the maximum port number in transport layer + private static final int MAX_PORT = 63335; + private static final String ICMP_MESSAGE_TYPE = "type"; + private static final String ICMP_MESSAGE_CODE = "code"; + private final String sourceIPField; + private final String sourcePortField; + private final String destinationIPField; + private final String destinationPortField; + private final String ianaProtocolNumberField; + private final String protocolField; + private final String icmpTypeField; + private final String icmpCodeField; + private final int seed; + private final String targetField; + private final boolean ignoreMissing; + + CommunityIdProcessor( + String tag, + String description, + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + super(tag, description); + this.sourceIPField = sourceIPField; + this.sourcePortField = sourcePortField; + this.destinationIPField = destinationIPField; + this.destinationPortField = destinationPortField; + this.ianaProtocolNumberField = ianaProtocolNumberField; + this.protocolField = protocolField; + this.icmpTypeField = icmpTypeField; + this.icmpCodeField = icmpCodeField; + this.seed = seed; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + } + + public String getSourceIPField() { + return sourceIPField; + } + + public String getSourcePortField() { + return sourcePortField; + } + + public String getDestinationIPField() { + return destinationIPField; + } + + public String getDestinationPortField() { + return destinationPortField; + } + + public String getIANAProtocolNumberField() { + return ianaProtocolNumberField; + } + + public String getProtocolField() { + return protocolField; + } + + public String getIcmpTypeField() { + return icmpTypeField; + } + + public String getIcmpCodeField() { + return icmpCodeField; + } + + public int getSeed() { + return seed; + } + + public String getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + @Override + public IngestDocument execute(IngestDocument document) { + // resolve protocol firstly + Protocol protocol = resolveProtocol(document); + // exit quietly if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + return document; + } + + // resolve ip secondly, exit quietly if either source ip or destination ip cannot be resolved and ignore_missing is true + byte[] sourceIPByteArray = resolveIP(document, sourceIPField); + if (sourceIPByteArray == null) { + return document; + } + byte[] destIPByteArray = resolveIP(document, destinationIPField); + if (destIPByteArray == null) { + return document; + } + // source ip and destination ip must have same format, either ipv4 or ipv6 + if (sourceIPByteArray.length != destIPByteArray.length) { + throw new IllegalArgumentException("source ip and destination ip must have same format"); + } + + // resolve source port and destination port for transport protocols, + // exit quietly if either source port or destination port is null nor empty + Integer sourcePort = null; + Integer destinationPort = null; + if (protocol.isTransportProtocol()) { + sourcePort = resolvePort(document, sourcePortField); + if (sourcePort == null) { + return document; + } + + destinationPort = resolvePort(document, destinationPortField); + if (destinationPort == null) { + return document; + } + } + + // resolve ICMP message type and code, support both ipv4 and ipv6 + // set source port to icmp type, and set dest port to icmp code, so that we can have a generic way to handle + // all protocols + boolean isOneway = true; + final boolean isICMPProtocol = Protocol.ICMP == protocol || Protocol.ICMP_V6 == protocol; + if (isICMPProtocol) { + Integer icmpType = resolveICMP(document, icmpTypeField, ICMP_MESSAGE_TYPE); + if (icmpType == null) { + return document; + } else { + sourcePort = icmpType; + } + + // for the message types which don't have code, fetch the equivalent code from the pre-defined mapper, + // and they can be considered to two-way flow + Byte equivalentCode = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? ICMPType.getEquivalentCode(icmpType.byteValue()) + : ICMPv6Type.getEquivalentCode(icmpType.byteValue()); + if (equivalentCode != null) { + isOneway = false; + // for IPv6-ICMP, the pre-defined code is negative byte, + // we need to convert it to positive integer for later comparison + destinationPort = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? Integer.valueOf(equivalentCode) + : Byte.toUnsignedInt(equivalentCode); + } else { + // get icmp code from the document if we cannot get equivalent code from the pre-defined mapper + Integer icmpCode = resolveICMP(document, icmpCodeField, ICMP_MESSAGE_CODE); + if (icmpCode == null) { + return document; + } else { + destinationPort = icmpCode; + } + } + } + + assert (sourcePort != null && destinationPort != null); + boolean isLess = compareIPAndPort(sourceIPByteArray, sourcePort, destIPByteArray, destinationPort); + // swap ip and port to remove directionality in the flow tuple, smaller ip:port tuple comes first + // but for ICMP and IPv6-ICMP, if it's a one-way flow, the flow tuple is considered to be ordered + if (!isLess && (!isICMPProtocol || !isOneway)) { + byte[] byteArray = sourceIPByteArray; + sourceIPByteArray = destIPByteArray; + destIPByteArray = byteArray; + + int tempPort = sourcePort; + sourcePort = destinationPort; + destinationPort = tempPort; + } + + // generate flow hash + String digest = generateCommunityIDHash( + protocol.getProtocolCode(), + sourceIPByteArray, + destIPByteArray, + sourcePort, + destinationPort, + seed + ); + document.setFieldValue(targetField, digest); + return document; + } + + @Override + public String getType() { + return TYPE; + } + + /** + * Resolve network protocol + * @param document the ingesting document + * @return the resolved protocol, null if the resolved protocol is null and ignore_missing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Protocol resolveProtocol(IngestDocument document) { + Protocol protocol = null; + Integer ianaProtocolNumber = null; + String protocolName = null; + if (!Strings.isNullOrEmpty(ianaProtocolNumberField)) { + ianaProtocolNumber = document.getFieldValue(ianaProtocolNumberField, Integer.class, true); + } + if (!Strings.isNullOrEmpty(protocolField)) { + protocolName = document.getFieldValue(protocolField, String.class, true); + } + // if iana protocol number is not specified, then resolve protocol name + if (ianaProtocolNumber != null) { + if (ianaProtocolNumber >= IANA_COMMON_MIN_NUMBER + && ianaProtocolNumber <= IANA_COMMON_MAX_NUMBER + && Protocol.protocolCodeMap.containsKey(ianaProtocolNumber.byteValue())) { + protocol = Protocol.protocolCodeMap.get(ianaProtocolNumber.byteValue()); + } else { + throw new IllegalArgumentException("unsupported iana protocol number [" + ianaProtocolNumber + "]"); + } + } else if (protocolName != null) { + Protocol protocolFromName = Protocol.fromProtocolName(protocolName); + if (protocolFromName != null) { + protocol = protocolFromName; + } else { + throw new IllegalArgumentException("unsupported protocol [" + protocolName + "]"); + } + } + + // return null if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "cannot resolve protocol by neither iana protocol number field [" + + ianaProtocolNumberField + + "] nor protocol name field [" + + protocolField + + "]" + ); + } + } + return protocol; + } + + /** + * Resolve ip address + * @param document the ingesting document + * @param fieldName the ip field to be resolved + * @return the byte array of the resolved ip + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private byte[] resolveIP(IngestDocument document, String fieldName) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source ip field path and destination ip field path cannot be null nor empty"); + } + } + + String ipAddress = document.getFieldValue(fieldName, String.class, true); + if (Strings.isNullOrEmpty(ipAddress)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("ip address in the field [" + fieldName + "] is null or empty"); + } + } + + byte[] byteArray = InetAddresses.ipStringToBytes(ipAddress); + if (byteArray == null) { + throw new IllegalArgumentException( + "ip address [" + ipAddress + "] in the field [" + fieldName + "] is not a valid ipv4/ipv6 address" + ); + } else { + return byteArray; + } + } + + /** + * Resolve port for transport protocols + * @param document the ingesting document + * @param fieldName the port field to be resolved + * @return the resolved port number, null if the resolved port is null and ignoreMissing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolvePort(IngestDocument document, String fieldName) { + Integer port; + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source port and destination port field path cannot be null nor empty"); + } + } else { + port = document.getFieldValue(fieldName, Integer.class, true); + } + + if (port == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "both source port and destination port cannot be null, but port in the field path [" + fieldName + "] is null" + ); + } + } else if (port < MIN_PORT || port > MAX_PORT) { + throw new IllegalArgumentException( + "both source port and destination port must be between 0 and 65535, but port in the field path [" + + fieldName + + "] is [" + + port + + "]" + ); + } + return port; + } + + /** + * Resolve ICMP's message type and code field + * @param document the ingesting document + * @param fieldName name of the type or the code field + * @param fieldType type or code + * @return the resolved value of the specified field, return null if ignore_missing if true and the field doesn't exist or is null, + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolveICMP(IngestDocument document, String fieldName, String fieldType) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " field path cannot be null nor empty"); + } + } + Integer fieldValue = document.getFieldValue(fieldName, Integer.class, true); + if (fieldValue == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " cannot be null"); + } + } else if (fieldValue < IANA_COMMON_MIN_NUMBER || fieldValue > IANA_COMMON_MAX_NUMBER) { + throw new IllegalArgumentException("invalid icmp message " + fieldType + " [" + fieldValue + "]"); + } else { + return fieldValue; + } + } + + /** + * + * @param protocolCode byte of the protocol number + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @param seed seed for generating hash + * @return the generated hash value, use SHA-1 + */ + private String generateCommunityIDHash( + byte protocolCode, + byte[] sourceIPByteArray, + byte[] destIPByteArray, + Integer sourcePort, + Integer destinationPort, + int seed + ) { + MessageDigest messageDigest = MessageDigests.sha1(); + messageDigest.update(intToTwoByteArray(seed)); + messageDigest.update(sourceIPByteArray); + messageDigest.update(destIPByteArray); + messageDigest.update(protocolCode); + messageDigest.update(PADDING_BYTE); + messageDigest.update(intToTwoByteArray(sourcePort)); + messageDigest.update(intToTwoByteArray(destinationPort)); + + return COMMUNITY_ID_HASH_VERSION + ":" + Base64.getEncoder().encodeToString(messageDigest.digest()); + } + + /** + * Convert an integer to two byte array + * @param val the integer which will be consumed to produce a two byte array + * @return the two byte array + */ + private byte[] intToTwoByteArray(Integer val) { + byte[] byteArray = new byte[2]; + byteArray[0] = Integer.valueOf(val >>> 8).byteValue(); + byteArray[1] = val.byteValue(); + return byteArray; + } + + /** + * Compare the ip and port, return true if the flow tuple is ordered + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @return true if sourceIP is less than destinationIP or sourceIP equals to destinationIP + * but sourcePort is less than destinationPort + */ + private boolean compareIPAndPort(byte[] sourceIPByteArray, int sourcePort, byte[] destIPByteArray, int destinationPort) { + int compareResult = compareByteArray(sourceIPByteArray, destIPByteArray); + return compareResult < 0 || compareResult == 0 && sourcePort < destinationPort; + } + + /** + * Compare two byte array which have same length + * @param byteArray1 the first byte array to compare + * @param byteArray2 the second byte array to compare + * @return 0 if each byte in both two arrays are same, a value less than 0 if byte in the first array is less than + * the byte at the same index, a value greater than 0 if byte in the first array is greater than the byte at the same index + */ + private int compareByteArray(byte[] byteArray1, byte[] byteArray2) { + assert (byteArray1.length == byteArray2.length); + int i = 0; + int j = 0; + while (i < byteArray1.length && j < byteArray2.length) { + int isLess = Byte.compareUnsigned(byteArray1[i], byteArray2[j]); + if (isLess == 0) { + i++; + j++; + } else { + return isLess; + } + } + return 0; + } + + /** + * Mapping ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPType { + ECHO_REPLY((byte) 0, (byte) 8), + ECHO((byte) 8, (byte) 0), + RTR_ADVERT((byte) 9, (byte) 10), + RTR_SOLICIT((byte) 10, (byte) 9), + TSTAMP((byte) 13, (byte) 14), + TSTAMP_REPLY((byte) 14, (byte) 13), + INFO((byte) 15, (byte) 16), + INFO_REPLY((byte) 16, (byte) 15), + MASK((byte) 17, (byte) 18), + MASK_REPLY((byte) 18, (byte) 17); + + private final byte type; + private final byte code; + + ICMPType(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of ICMP and derives equivalent message code + * @param type the message type of ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * Mapping IPv6-ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPv6Type { + ECHO_REQUEST((byte) 128, (byte) 129), + ECHO_REPLY((byte) 129, (byte) 128), + MLD_LISTENER_QUERY((byte) 130, (byte) 131), + MLD_LISTENER_REPORT((byte) 131, (byte) 130), + ND_ROUTER_SOLICIT((byte) 133, (byte) 134), + ND_ROUTER_ADVERT((byte) 134, (byte) 133), + ND_NEIGHBOR_SOLICIT((byte) 135, (byte) 136), + ND_NEIGHBOR_ADVERT((byte) 136, (byte) 135), + WRU_REQUEST((byte) 139, (byte) 140), + WRU_REPLY((byte) 140, (byte) 139), + HAAD_REQUEST((byte) 144, (byte) 145), + HAAD_REPLY((byte) 145, (byte) 144); + + private final byte type; + private final byte code; + + ICMPv6Type(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of IPv6-ICMP and derives equivalent message code + * @param type the message type of IPv6-ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * An enumeration of the supported network protocols + */ + enum Protocol { + ICMP((byte) 1, false), + TCP((byte) 6, true), + UDP((byte) 17, true), + ICMP_V6((byte) 58, false), + SCTP((byte) 132, true); + + private final byte protocolCode; + private final boolean isTransportProtocol; + + Protocol(int ianaNumber, boolean isTransportProtocol) { + this.protocolCode = Integer.valueOf(ianaNumber).byteValue(); + this.isTransportProtocol = isTransportProtocol; + } + + public static final Map<Byte, Protocol> protocolCodeMap = Arrays.stream(values()) + .collect(Collectors.toMap(Protocol::getProtocolCode, p -> p)); + + public static Protocol fromProtocolName(String protocolName) { + String name = protocolName.toUpperCase(Locale.ROOT); + if (name.equals("IPV6-ICMP")) { + return Protocol.ICMP_V6; + } + try { + return valueOf(name); + } catch (IllegalArgumentException e) { + return null; + } + } + + public byte getProtocolCode() { + return this.protocolCode; + } + + public boolean isTransportProtocol() { + return this.isTransportProtocol; + } + } + + public static class Factory implements Processor.Factory { + @Override + public CommunityIdProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + String sourceIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_ip_field"); + String sourcePortField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "source_port_field"); + String destinationIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "destination_ip_field"); + String destinationPortField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "destination_port_field" + ); + String ianaProtocolNumberField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "iana_protocol_number_field" + ); + String protocolField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "protocol_field"); + String icmpTypeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_type_field"); + String icmpCodeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_code_field"); + int seed = ConfigurationUtils.readIntProperty(TYPE, processorTag, config, "seed", 0); + if (seed < MIN_SEED || seed > MAX_SEED) { + throw newConfigurationException(TYPE, processorTag, "seed", "seed must be between 0 and 65535"); + } + + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", "community_id"); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + + return new CommunityIdProcessor( + processorTag, + description, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java new file mode 100644 index 0000000000000..dec69df275130 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; + +import java.util.Map; + +public final class CopyProcessor extends AbstractProcessor { + public static final String TYPE = "copy"; + + private final TemplateScript.Factory sourceField; + private final TemplateScript.Factory targetField; + + private final boolean ignoreMissing; + + private final boolean removeSource; + + private final boolean overrideTarget; + + CopyProcessor(String tag, String description, TemplateScript.Factory sourceField, TemplateScript.Factory targetField) { + this(tag, description, sourceField, targetField, false, false, false); + } + + CopyProcessor( + String tag, + String description, + TemplateScript.Factory sourceField, + TemplateScript.Factory targetField, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + super(tag, description); + this.sourceField = sourceField; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + this.removeSource = removeSource; + this.overrideTarget = overrideTarget; + } + + public TemplateScript.Factory getSourceField() { + return sourceField; + } + + public TemplateScript.Factory getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + public boolean isRemoveSource() { + return removeSource; + } + + public boolean isOverrideTarget() { + return overrideTarget; + } + + @Override + public IngestDocument execute(IngestDocument document) { + String source = document.renderTemplate(sourceField); + final boolean sourceFieldPathIsNullOrEmpty = Strings.isNullOrEmpty(source); + if (sourceFieldPathIsNullOrEmpty || document.hasField(source, true) == false) { + if (ignoreMissing) { + return document; + } else if (sourceFieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("source field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("source field [" + source + "] doesn't exist"); + } + } + + String target = document.renderTemplate(targetField); + if (Strings.isNullOrEmpty(target)) { + throw new IllegalArgumentException("target field path cannot be null nor empty"); + } + if (source.equals(target)) { + throw new IllegalArgumentException("source field path and target field path cannot be same"); + } + + if (overrideTarget || document.hasField(target, true) == false || document.getFieldValue(target, Object.class) == null) { + Object sourceValue = document.getFieldValue(source, Object.class); + document.setFieldValue(target, IngestDocument.deepCopy(sourceValue)); + } else { + throw new IllegalArgumentException("target field [" + target + "] already exists"); + } + + if (removeSource) { + document.removeField(source); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + + @Override + public CopyProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + String sourceField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_field"); + TemplateScript.Factory sourceFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "source_field", + sourceField, + scriptService + ); + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "target_field", + targetField, + scriptService + ); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + boolean removeSource = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "remove_source", false); + boolean overrideTarget = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override_target", false); + + return new CopyProcessor( + processorTag, + description, + sourceFieldTemplate, + targetFieldTemplate, + ignoreMissing, + removeSource, + overrideTarget + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index a2a51d968e078..0f8b248fd5af8 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -106,6 +106,9 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet processors.put(DropProcessor.TYPE, new DropProcessor.Factory()); processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory()); processors.put(CsvProcessor.TYPE, new CsvProcessor.Factory()); + processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService)); + processors.put(RemoveByPatternProcessor.TYPE, new RemoveByPatternProcessor.Factory()); + processors.put(CommunityIdProcessor.TYPE, new CommunityIdProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java new file mode 100644 index 0000000000000..da87f5201db72 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.Nullable; +import org.opensearch.common.ValidationException; +import org.opensearch.common.regex.Regex; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that removes existing fields by field patterns or excluding field patterns. + */ +public final class RemoveByPatternProcessor extends AbstractProcessor { + + public static final String TYPE = "remove_by_pattern"; + private final List<String> fieldPatterns; + private final List<String> excludeFieldPatterns; + + RemoveByPatternProcessor( + String tag, + String description, + @Nullable List<String> fieldPatterns, + @Nullable List<String> excludeFieldPatterns + ) { + super(tag, description); + if (fieldPatterns != null && excludeFieldPatterns != null || fieldPatterns == null && excludeFieldPatterns == null) { + throw new IllegalArgumentException("either fieldPatterns and excludeFieldPatterns must be set"); + } + if (fieldPatterns == null) { + this.fieldPatterns = null; + this.excludeFieldPatterns = new ArrayList<>(excludeFieldPatterns); + } else { + this.fieldPatterns = new ArrayList<>(fieldPatterns); + this.excludeFieldPatterns = null; + } + } + + public List<String> getFieldPatterns() { + return fieldPatterns; + } + + public List<String> getExcludeFieldPatterns() { + return excludeFieldPatterns; + } + + @Override + public IngestDocument execute(IngestDocument document) { + Set<String> existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set<String> metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + + if (fieldPatterns != null && !fieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = fieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (matched) { + document.removeField(field); + } + } + }); + } + + if (excludeFieldPatterns != null && !excludeFieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = excludeFieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (!matched) { + document.removeField(field); + } + } + }); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + public Factory() {} + + @Override + public RemoveByPatternProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + final List<String> fieldPatterns = new ArrayList<>(); + final List<String> excludeFieldPatterns = new ArrayList<>(); + final Object fieldPattern = ConfigurationUtils.readOptionalObject(config, "field_pattern"); + final Object excludeFieldPattern = ConfigurationUtils.readOptionalObject(config, "exclude_field_pattern"); + + if (fieldPattern == null && excludeFieldPattern == null || fieldPattern != null && excludeFieldPattern != null) { + throw newConfigurationException( + TYPE, + processorTag, + "field_pattern", + "either field_pattern or exclude_field_pattern must be set" + ); + } + + if (fieldPattern != null) { + if (fieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> fieldPatternList = (List<String>) fieldPattern; + fieldPatterns.addAll(fieldPatternList); + } else { + fieldPatterns.add((String) fieldPattern); + } + validateFieldPatterns(processorTag, fieldPatterns, "field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, fieldPatterns, null); + } else { + if (excludeFieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> excludeFieldPatternList = (List<String>) excludeFieldPattern; + excludeFieldPatterns.addAll(excludeFieldPatternList); + } else { + excludeFieldPatterns.add((String) excludeFieldPattern); + } + validateFieldPatterns(processorTag, excludeFieldPatterns, "exclude_field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, null, excludeFieldPatterns); + } + } + + private void validateFieldPatterns(String processorTag, List<String> patterns, String patternKey) { + List<String> validationErrors = new ArrayList<>(); + for (String fieldPattern : patterns) { + if (fieldPattern.contains("#")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a '#'"); + } + if (fieldPattern.contains(":")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a ':'"); + } + if (fieldPattern.startsWith("_")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not start with '_'"); + } + if (Strings.validFileNameExcludingAstrix(fieldPattern) == false) { + validationErrors.add( + patternKey + " [" + fieldPattern + "] must not contain the following characters " + Strings.INVALID_FILENAME_CHARS + ); + } + } + + if (validationErrors.size() > 0) { + ValidationException validationException = new ValidationException(); + validationException.addValidationErrors(validationErrors); + throw newConfigurationException(TYPE, processorTag, patternKey, validationException.getMessage()); + } + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index a48cfd87b78c3..e6d151aec9be1 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; import org.opensearch.index.VersionType; import org.opensearch.ingest.AbstractProcessor; @@ -42,11 +43,15 @@ import org.opensearch.script.TemplateScript; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + /** * Processor that removes existing fields. Nothing happens if the field is not present. */ @@ -55,11 +60,28 @@ public final class RemoveProcessor extends AbstractProcessor { public static final String TYPE = "remove"; private final List<TemplateScript.Factory> fields; + private final List<TemplateScript.Factory> excludeFields; private final boolean ignoreMissing; - RemoveProcessor(String tag, String description, List<TemplateScript.Factory> fields, boolean ignoreMissing) { + RemoveProcessor( + String tag, + String description, + @Nullable List<TemplateScript.Factory> fields, + @Nullable List<TemplateScript.Factory> excludeFields, + boolean ignoreMissing + ) { super(tag, description); - this.fields = new ArrayList<>(fields); + if (fields == null && excludeFields == null || fields != null && excludeFields != null) { + throw new IllegalArgumentException("either fields or excludeFields must be set"); + } + if (fields != null) { + this.fields = new ArrayList<>(fields); + this.excludeFields = null; + } else { + this.fields = null; + this.excludeFields = new ArrayList<>(excludeFields); + } + this.ignoreMissing = ignoreMissing; } @@ -67,42 +89,76 @@ public List<TemplateScript.Factory> getFields() { return fields; } + public List<TemplateScript.Factory> getExcludeFields() { + return excludeFields; + } + @Override public IngestDocument execute(IngestDocument document) { - fields.forEach(field -> { - String path = document.renderTemplate(field); - final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); - if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { - if (ignoreMissing) { - return; - } else if (fieldPathIsNullOrEmpty) { - throw new IllegalArgumentException("field path cannot be null nor empty"); - } else { - throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + if (fields != null && !fields.isEmpty()) { + fields.forEach(field -> { + String path = document.renderTemplate(field); + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } } - } - // cannot remove _index, _version and _version_type. - if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) - || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) - || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { - throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); - } - // removing _id is disallowed when there's an external version specified in the request - if (path.equals(IngestDocument.Metadata.ID.getFieldName()) - && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { - String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); - if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { - Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); - throw new IllegalArgumentException( - "cannot remove metadata field [_id] when specifying external version for the document, version: " - + version - + ", version_type: " - + versionType - ); + + // cannot remove _index, _version and _version_type. + if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); } + // removing _id is disallowed when there's an external version specified in the request + if (path.equals(IngestDocument.Metadata.ID.getFieldName()) + && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { + Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); + throw new IllegalArgumentException( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType + ); + } + } + document.removeField(path); + }); + } + + if (excludeFields != null && !excludeFields.isEmpty()) { + Set<String> excludeFieldSet = new HashSet<>(); + excludeFields.forEach(field -> { + String path = document.renderTemplate(field); + // ignore the empty or null field path + if (!Strings.isNullOrEmpty(path)) { + excludeFieldSet.add(path); + } + }); + + if (!excludeFieldSet.isEmpty()) { + Set<String> existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set<String> metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field) && !excludeFieldSet.contains(field)) { + document.removeField(field); + } + }); } - document.removeField(path); - }); + } + return document; } @@ -127,20 +183,41 @@ public RemoveProcessor create( Map<String, Object> config ) throws Exception { final List<String> fields = new ArrayList<>(); - final Object field = ConfigurationUtils.readObject(TYPE, processorTag, config, "field"); - if (field instanceof List) { - @SuppressWarnings("unchecked") - List<String> stringList = (List<String>) field; - fields.addAll(stringList); - } else { - fields.add((String) field); + final List<String> excludeFields = new ArrayList<>(); + final Object field = ConfigurationUtils.readOptionalObject(config, "field"); + final Object excludeField = ConfigurationUtils.readOptionalObject(config, "exclude_field"); + + if (field == null && excludeField == null || field != null && excludeField != null) { + throw newConfigurationException(TYPE, processorTag, "field", "either field or exclude_field must be set"); } - final List<TemplateScript.Factory> compiledTemplates = fields.stream() - .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) - .collect(Collectors.toList()); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - return new RemoveProcessor(processorTag, description, compiledTemplates, ignoreMissing); + + if (field != null) { + if (field instanceof List) { + @SuppressWarnings("unchecked") + List<String> stringList = (List<String>) field; + fields.addAll(stringList); + } else { + fields.add((String) field); + } + List<TemplateScript.Factory> fieldCompiledTemplates = fields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, fieldCompiledTemplates, null, ignoreMissing); + } else { + if (excludeField instanceof List) { + @SuppressWarnings("unchecked") + List<String> stringList = (List<String>) excludeField; + excludeFields.addAll(stringList); + } else { + excludeFields.add((String) excludeField); + } + List<TemplateScript.Factory> excludeFieldCompiledTemplates = excludeFields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "exclude_field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, null, excludeFieldCompiledTemplates, ignoreMissing); + } } } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java new file mode 100644 index 0000000000000..5edb44b8c64f2 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CommunityIdProcessorFactoryTests extends OpenSearchTestCase { + private CommunityIdProcessor.Factory factory; + + @Before + public void init() { + factory = new CommunityIdProcessor.Factory(); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + int seed = randomIntBetween(0, 65535); + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + config.put("source_port_field", "source_port"); + config.put("destination_ip_field", "destination_ip"); + config.put("destination_port_field", "destination_port"); + config.put("iana_protocol_number_field", "iana_protocol_number"); + config.put("protocol_field", "protocol"); + config.put("icmp_type_field", "icmp_type"); + config.put("icmp_code_field", "icmp_code"); + config.put("seed", seed); + config.put("target_field", "community_id_hash"); + config.put("ignore_missing", ignoreMissing); + String processorTag = randomAlphaOfLength(10); + CommunityIdProcessor communityIDProcessor = factory.create(null, processorTag, null, config); + assertThat(communityIDProcessor.getTag(), equalTo(processorTag)); + assertThat(communityIDProcessor.getSourceIPField(), equalTo("source_ip")); + assertThat(communityIDProcessor.getSourcePortField(), equalTo("source_port")); + assertThat(communityIDProcessor.getDestinationIPField(), equalTo("destination_ip")); + assertThat(communityIDProcessor.getDestinationPortField(), equalTo("destination_port")); + assertThat(communityIDProcessor.getIANAProtocolNumberField(), equalTo("iana_protocol_number")); + assertThat(communityIDProcessor.getProtocolField(), equalTo("protocol")); + assertThat(communityIDProcessor.getIcmpTypeField(), equalTo("icmp_type")); + assertThat(communityIDProcessor.getIcmpCodeField(), equalTo("icmp_code")); + assertThat(communityIDProcessor.getSeed(), equalTo(seed)); + assertThat(communityIDProcessor.getTargetField(), equalTo("community_id_hash")); + assertThat(communityIDProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + } + + public void testCreateWithSourceIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + + config.put("source_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + } + + public void testCreateWithDestinationIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + } + + public void testInvalidSeed() throws Exception { + Map<String, Object> config = new HashMap<>(); + int seed; + if (randomBoolean()) { + seed = -1; + } else { + seed = 65536; + } + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", "destination_ip"); + config.put("seed", seed); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchException e) { + assertThat(e.getMessage(), equalTo("[seed] seed must be between 0 and 65535")); + } + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java new file mode 100644 index 0000000000000..2bda9db80dbcc --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java @@ -0,0 +1,910 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CommunityIdProcessorTests extends OpenSearchTestCase { + + public void testResolveProtocol() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "cannot resolve protocol by neither iana protocol number field [iana_protocol_number] nor protocol name field [protocol]", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + String protocol = randomAlphaOfLength(10); + source.put("protocol", protocol); + IngestDocument ingestDocumentWithProtocol = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithProtocol = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported protocol [" + protocol + "]", + IllegalArgumentException.class, + () -> processorWithProtocol.execute(ingestDocumentWithProtocol) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + int ianaProtocolNumber = randomIntBetween(1000, 10000); + source.put("iana_protocol_number", ianaProtocolNumber); + IngestDocument ingestDocumentWithProtocolNumber = RandomDocumentPicks.randomIngestDocument(random(), source); + + Processor processorWithProtocolNumber = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported iana protocol number [" + ianaProtocolNumber + "]", + IllegalArgumentException.class, + () -> processorWithProtocolNumber.execute(ingestDocumentWithProtocolNumber) + ); + } + + public void testResolveIPAndPort() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", ""); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [source_ip] is null or empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourceIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourceIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + assertThrows( + "ip address in the field [source_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidSourceIP.execute(ingestDocumentWithInvalidSourceIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", ""); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP); + assertThat(ingestDocumentWithEmptyDestIP.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [destination_ip] is null or empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "ip address in the field [destination_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidDestIP.execute(ingestDocumentWithInvalidDestIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument normalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourceIPFieldPath = createCommunityIdProcessor( + "", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourceIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourceIPFieldPath.execute(normalIngestDocument) + ); + } + ignore_missing = randomBoolean(); + Processor processorWithEmptyDestIPFieldPath = createCommunityIdProcessor( + "source_ip", + "source_port", + "", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIPFieldPath.execute(normalIngestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", null); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptySourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort); + assertThat(ingestDocumentWithEmptySourcePort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 65536); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port must be between 0 and 65535, but port in the field path [source_port] is [65536]", + IllegalArgumentException.class, + () -> processorWithInvalidSourcePort.execute(ingestDocumentWithInvalidSourcePort) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", null); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort); + assertThat(ingestDocumentWithEmptyDestPort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is null", + IllegalArgumentException.class, + () -> processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", -1); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is [-1]", + IllegalArgumentException.class, + () -> processorWithInvalidDestPort.execute(ingestDocumentWithInvalidDestPort) + ); + } + + public void testResolveICMPTypeAndCode() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + int protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + String targetFieldName = randomAlphaOfLength(100); + boolean ignoreMissing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + source.put("icmp_type", null); + IngestDocument ingestDocumentWithNullType = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullType.execute(ingestDocumentWithNullType); + assertThat(ingestDocumentWithNullType.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullType.execute(ingestDocumentWithNullType) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + int icmpType; + if (randomBoolean()) { + icmpType = randomIntBetween(256, 1000); + } else { + icmpType = randomIntBetween(-100, -1); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithInvalidICMPType = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidICMPType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + false + ); + assertThrows( + "invalid icmp message type [" + icmpType + "]", + IllegalArgumentException.class, + () -> processorWithInvalidICMPType.execute(ingestDocumentWithInvalidICMPType) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithNoCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNoCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNoCode.execute(ingestDocumentWithNoCode); + assertThat(ingestDocumentWithNoCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNoCode.execute(ingestDocumentWithNoCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + source.put("icmp_code", null); + IngestDocument ingestDocumentWithNullCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullCode.execute(ingestDocumentWithNullCode); + assertThat(ingestDocumentWithNullCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullCode.execute(ingestDocumentWithNullCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + int icmpCode; + if (randomBoolean()) { + icmpCode = randomIntBetween(256, 1000); + } else { + icmpCode = randomIntBetween(-100, -1); + } + source.put("icmp_code", icmpCode); + IngestDocument ingestDocumentWithInvalidCode = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + null, + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "invalid icmp message code [" + icmpCode + "]", + IllegalArgumentException.class, + () -> processorWithInvalidCode.execute(ingestDocumentWithInvalidCode) + ); + } + + public void testTransportProtocols() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + boolean isProtocolNameSpecified = randomBoolean(); + if (isProtocolNameSpecified) { + source.put("protocol", randomFrom("tcp", "udp", "sctp")); + } else { + source.put("iana_number", randomFrom(6, 17, 132)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor; + if (isProtocolNameSpecified) { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } else { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + String communityIDHash = ingestDocument.getFieldValue(targetFieldName, String.class); + assertThat(communityIDHash.startsWith("1:"), equalTo(true)); + } + + public void testICMP() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + boolean isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + source.put("type", randomFrom(0, 8, 9, 10, 13, 15, 17, 18)); + } else { + source.put("protocol", "ipv6-icmp"); + source.put("type", randomFrom(128, 129, 130, 131, 133, 134, 135, 136, 139, 140, 144, 145)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + // see https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml#icmp-parameters-codes-5 + source.put("type", randomIntBetween(3, 6)); + source.put("code", 0); + } else { + source.put("protocol", "ipv6-icmp"); + // see https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xhtml#icmpv6-parameters-codes-23 + source.put("type", randomIntBetween(146, 161)); + source.put("code", 0); + } + + IngestDocument ingestDocumentWithOnewayFlow = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithOnewayFlow = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + "code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processorWithOnewayFlow.execute(ingestDocumentWithOnewayFlow); + assertThat(ingestDocumentWithOnewayFlow.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithOnewayFlow.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + } + + // test that the hash result is consistent with the known value + public void testHashResult() throws Exception { + int index = randomIntBetween(0, CommunityIdHashInstance.values().length - 1); + CommunityIdHashInstance instance = CommunityIdHashInstance.values()[index]; + final boolean isTransportProtocol = instance.name().equals("TCP") + || instance.name().equals("UDP") + || instance.name().equals("SCTP"); + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", instance.getSourceIp()); + source.put("destination_ip", instance.getDestIP()); + if (isTransportProtocol) { + source.put("source_port", instance.getSourcePort()); + source.put("destination_port", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + + // test the flow tuple in reversed direction, the hash result should be the same value + source = new HashMap<>(); + source.put("source_ip", instance.getDestIP()); + source.put("destination_ip", instance.getSourceIp()); + source.put("source_port", instance.getDestPort()); + source.put("destination_port", instance.getSourcePort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocumentWithReversedDirection = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithReversedDirection = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + randomBoolean() + ); + + processorWithReversedDirection.execute(ingestDocumentWithReversedDirection); + assertThat(ingestDocumentWithReversedDirection.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithReversedDirection.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } else { + source.put("type", instance.getSourcePort()); + source.put("code", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_number", + null, + "type", + "code", + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } + } + + private enum CommunityIdHashInstance { + TCP("66.35.250.204", "128.232.110.120", 6, 80, 34855, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="), + UDP("8.8.8.8", "192.168.1.52", 17, 53, 54585, "1:d/FP5EW3wiY1vCndhwleRRKHowQ="), + SCTP("192.168.170.8", "192.168.170.56", 132, 7, 7, "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs="), + ICMP("192.168.0.89", "192.168.0.1", 1, 8, 0, "1:X0snYXpgwiv9TZtqg64sgzUn6Dk="), + ICMP_V6("fe80::260:97ff:fe07:69ea", "ff02::1", 58, 134, 0, "1:pkvHqCL88/tg1k4cPigmZXUtL00="); + + private final String sourceIp; + private final String destIP; + private final int protocolNumber; + private final int sourcePort; + private final int destPort; + private final String hash; + + CommunityIdHashInstance(String sourceIp, String destIP, int protocolNumber, int sourcePort, int destPort, String hash) { + this.sourceIp = sourceIp; + this.destIP = destIP; + this.protocolNumber = protocolNumber; + this.sourcePort = sourcePort; + this.destPort = destPort; + this.hash = hash; + } + + private String getSourceIp() { + return this.sourceIp; + } + + private String getDestIP() { + return this.destIP; + } + + private int getProtocolNumber() { + return this.protocolNumber; + } + + private int getSourcePort() { + return this.sourcePort; + } + + private int getDestPort() { + return this.destPort; + } + + private String getHash() { + return this.hash; + } + } + + private static Processor createCommunityIdProcessor( + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + return new CommunityIdProcessor( + randomAlphaOfLength(10), + null, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java new file mode 100644 index 0000000000000..c1ca86a49e334 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CopyProcessorFactoryTests extends OpenSearchTestCase { + + private CopyProcessor.Factory factory; + + @Before + public void init() { + factory = new CopyProcessor.Factory(TestTemplateService.instance()); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + boolean removeSource = randomBoolean(); + boolean overrideTarget = randomBoolean(); + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "source"); + config.put("target_field", "target"); + config.put("ignore_missing", ignoreMissing); + config.put("remove_source", removeSource); + config.put("override_target", overrideTarget); + String processorTag = randomAlphaOfLength(10); + CopyProcessor copyProcessor = factory.create(null, processorTag, null, config); + assertThat(copyProcessor.getTag(), equalTo(processorTag)); + assertThat(copyProcessor.getSourceField().newInstance(Collections.emptyMap()).execute(), equalTo("source")); + assertThat(copyProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); + assertThat(copyProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + assertThat(copyProcessor.isRemoveSource(), equalTo(removeSource)); + assertThat(copyProcessor.isOverrideTarget(), equalTo(overrideTarget)); + } + + public void testCreateWithSourceField() throws Exception { + Map<String, Object> config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + + config.put("source_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + } + + public void testCreateWithTargetField() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "source"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + + config.put("source_field", "source"); + config.put("target_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + } + + public void testInvalidMustacheTemplate() throws Exception { + CopyProcessor.Factory factory = new CopyProcessor.Factory(TestTemplateService.instance(true)); + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "{{source}}"); + config.put("target_field", "target"); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows(OpenSearchException.class, () -> factory.create(null, processorTag, null, config)); + assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); + assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java new file mode 100644 index 0000000000000..3259ba85ef340 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -0,0 +1,145 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CopyProcessorTests extends OpenSearchTestCase { + + public void testCopyExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + + Processor processorWithEmptyTarget = createCopyProcessor(sourceFieldName, "", false, false, false); + assertThrows( + "target field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyTarget.execute(ingestDocument) + ); + + Processor processorWithSameSourceAndTarget = createCopyProcessor(sourceFieldName, sourceFieldName, false, false, false); + assertThrows( + "source field path and target field path cannot be same", + IllegalArgumentException.class, + () -> processorWithSameSourceAndTarget.execute(ingestDocument) + ); + } + + public void testCopyWithIgnoreMissing() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor("non-existing-field", targetFieldName, false, false, false); + assertThrows( + "source field [non-existing-field] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + Processor processorWithEmptyFieldName = createCopyProcessor("", targetFieldName, false, false, false); + assertThrows( + "source field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyFieldName.execute(ingestDocument) + ); + + Processor processorWithIgnoreMissing = createCopyProcessor("non-existing-field", targetFieldName, true, false, false); + processorWithIgnoreMissing.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } + + public void testCopyWithRemoveSource() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + assertThat(ingestDocument.hasField(sourceFieldName), equalTo(false)); + } + + public void testCopyToExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + Object sourceValue = RandomDocumentPicks.randomFieldValue(random()); + String sourceFieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, sourceValue); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + assertThrows( + "target field [" + targetFieldName + "] already exists", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + // if override_target is false but target field's value is null, copy can execute successfully + String targetFieldWithNullValue = RandomDocumentPicks.addRandomField(random(), ingestDocument, null); + Processor processorWithTargetNullValue = createCopyProcessor(sourceFieldName, targetFieldWithNullValue, false, false, false); + processorWithTargetNullValue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldWithNullValue), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldWithNullValue, Object.class), sourceValue); + + Processor processorWithOverrideTargetIsTrue = createCopyProcessor(sourceFieldName, targetFieldName, false, false, true); + processorWithOverrideTargetIsTrue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + } + + @SuppressWarnings("unchecked") + private static void assertDeepCopiedObjectEquals(Object expected, Object actual) { + if (expected instanceof Map) { + Map<String, Object> expectedMap = (Map<String, Object>) expected; + Map<String, Object> actualMap = (Map<String, Object>) actual; + assertEquals(expectedMap.size(), actualMap.size()); + for (Map.Entry<String, Object> expectedEntry : expectedMap.entrySet()) { + assertDeepCopiedObjectEquals(expectedEntry.getValue(), actualMap.get(expectedEntry.getKey())); + } + } else if (expected instanceof List) { + assertArrayEquals(((List<?>) expected).toArray(), ((List<?>) actual).toArray()); + } else if (expected instanceof byte[]) { + assertArrayEquals((byte[]) expected, (byte[]) actual); + } else { + assertEquals(expected, actual); + } + } + + private static Processor createCopyProcessor( + String sourceFieldName, + String targetFieldName, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + return new CopyProcessor( + randomAlphaOfLength(10), + null, + new TestTemplateService.MockTemplateScript.Factory(sourceFieldName), + new TestTemplateService.MockTemplateScript.Factory(targetFieldName), + ignoreMissing, + removeSource, + overrideTarget + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java new file mode 100644 index 0000000000000..09ba97ebb4595 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class RemoveByPatternProcessorFactoryTests extends OpenSearchTestCase { + + private RemoveByPatternProcessor.Factory factory; + + @Before + public void init() { + factory = new RemoveByPatternProcessor.Factory(); + } + + public void testCreateFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[field_pattern] Validation Failed: " + + "1: field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: field_pattern [#] must not contain a '#';" + + "4: field_pattern [:] must not contain a ':';" + + "5: field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreateExcludeFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("exclude_field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("exclude_field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("exclude_field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[exclude_field_pattern] Validation Failed: " + + "1: exclude_field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: exclude_field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: exclude_field_pattern [#] must not contain a '#';" + + "4: exclude_field_pattern [:] must not contain a ':';" + + "5: exclude_field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreatePatternsFailed() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", List.of("foo*")); + config.put("exclude_field_pattern", List.of("bar*")); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", null); + config2.put("exclude_field_pattern", null); + + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java new file mode 100644 index 0000000000000..82ff93de1f44e --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RemoveByPatternProcessorTests extends OpenSearchTestCase { + + public void testRemoveWithFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("bar_1", "value"); + ingestDocument.setFieldValue("bar_2", "value"); + List<String> fieldPatterns = new ArrayList<>(); + fieldPatterns.add("foo*"); + fieldPatterns.add("_index*"); + fieldPatterns.add("_id*"); + fieldPatterns.add("_version*"); + Processor processor = new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, null); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("bar_1"), equalTo(true)); + assertThat(ingestDocument.hasField("bar_2"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testRemoveWithExcludeFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List<String> excludeFieldPatterns = new ArrayList<>(); + excludeFieldPatterns.add("foo_3*"); + Processor processorWithExcludeFieldsAndPatterns = new RemoveByPatternProcessor( + randomAlphaOfLength(10), + null, + null, + excludeFieldPatterns + ); + processorWithExcludeFieldsAndPatterns.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testCreateRemoveByPatternProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, null, null) + ); + + final List<String> fieldPatterns; + if (randomBoolean()) { + fieldPatterns = new ArrayList<>(); + } else { + fieldPatterns = List.of("foo_1*"); + } + + final List<String> excludeFieldPatterns; + if (randomBoolean()) { + excludeFieldPatterns = new ArrayList<>(); + } else { + excludeFieldPatterns = List.of("foo_2*"); + } + + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, excludeFieldPatterns) + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java index 66ca888a0d39f..6332eeafc387c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java @@ -41,6 +41,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -79,16 +80,6 @@ public void testCreateMultipleFields() throws Exception { ); } - public void testCreateMissingField() throws Exception { - Map<String, Object> config = new HashMap<>(); - try { - factory.create(null, null, null, config); - fail("factory create should have failed"); - } catch (OpenSearchParseException e) { - assertThat(e.getMessage(), equalTo("[field] required property is missing")); - } - } - public void testInvalidMustacheTemplate() throws Exception { RemoveProcessor.Factory factory = new RemoveProcessor.Factory(TestTemplateService.instance(true)); Map<String, Object> config = new HashMap<>(); @@ -98,4 +89,31 @@ public void testInvalidMustacheTemplate() throws Exception { assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); } + + public void testCreateWithExcludeField() throws Exception { + Map<String, Object> config = new HashMap<>(); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field", "field1"); + config2.put("exclude_field", "field2"); + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); + + Map<String, Object> config6 = new HashMap<>(); + config6.put("exclude_field", "exclude_field"); + RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config6); + assertThat( + removeProcessor.getExcludeFields() + .stream() + .map(template -> template.newInstance(Collections.emptyMap()).execute()) + .collect(Collectors.toList()), + equalTo(List.of("exclude_field")) + ); + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index c138ad606d2e5..7fc1d3f2f0a3c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -38,8 +38,10 @@ import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.ingest.TestTemplateService; +import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -57,12 +59,28 @@ public void testRemoveFields() throws Exception { randomAlphaOfLength(10), null, Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory(field)), + null, false ); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(field), equalTo(false)); } + public void testRemoveByExcludeFields() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List<TemplateScript.Factory> excludeFields = new ArrayList<>(); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + Processor processor = new RemoveProcessor(randomAlphaOfLength(10), null, null, excludeFields, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(false)); + } + public void testRemoveNonExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); @@ -183,6 +201,34 @@ public void testRemoveMetadataField() throws Exception { } } + public void testCreateRemoveProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "either fields or excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, null, null, false) + ); + + final List<TemplateScript.Factory> fields; + if (randomBoolean()) { + fields = new ArrayList<>(); + } else { + fields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + } + + final List<TemplateScript.Factory> excludeFields; + if (randomBoolean()) { + excludeFields = new ArrayList<>(); + } else { + excludeFields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + } + + assertThrows( + "either fields or excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, fields, excludeFields, false) + ); + } + public void testRemoveDocumentId() throws Exception { Map<String, Object> config = new HashMap<>(); config.put("field", IngestDocument.Metadata.ID.getFieldName()); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml index f44cc1f9f9fcf..2a816f0386667 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml @@ -36,3 +36,53 @@ - contains: { nodes.$cluster_manager.ingest.processors: { type: split } } - contains: { nodes.$cluster_manager.ingest.processors: { type: trim } } - contains: { nodes.$cluster_manager.ingest.processors: { type: uppercase } } + +--- +"Copy processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "copy processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: copy } } + +--- +"Remove_by_pattern processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "remove_by_pattern processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: remove_by_pattern } } + +--- +"Community_id processor exists": + - skip: + version: " - 2.12.99" + features: contains + reason: "community_id processor was introduced in 2.13.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + - contains: { nodes.$cluster_manager.ingest.processors: { type: community_id } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml index 6668b468f8edc..e120a865052b0 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -319,3 +319,43 @@ teardown: } - match: { docs.0.error.type: "illegal_argument_exception" } - match: { docs.0.error.reason: "cannot remove metadata field [_id] when specifying external version for the document, version: 1, version_type: external_gte" } + +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/1578 +--- +"Test remove processor with exclude_field": + - skip: + version: " - 2.11.99" + reason: "exclude_field is introduced in 2.12" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "exclude_field": "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "zoo"}} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml new file mode 100644 index 0000000000000..0203b62ba67d6 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml @@ -0,0 +1,374 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat copy processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + catch: /\[target\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source" + } + } + ] + } + - do: + catch: /\[source\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "target_field" : "target" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source", + "target_field" : "target", + "ignore_missing" : true, + "remove_source" : true, + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test copy processor with ignore_missing": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field \[unknown\_field\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello" } } + +--- +"Test copy processor with remove_source": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "remove_source" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "hello" } } + +--- +"Test copy processor with override_target": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field \[bar\] already exists/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + +--- +"Test copy processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "bar", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path and target field path cannot be same/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "bar", + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "foo", target: "bar", foo: "hello", bar: "hello" } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml new file mode 100644 index 0000000000000..397eb8f7b6033 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml @@ -0,0 +1,146 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test creating remove_by_pattern processor failed": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : "foo*", + "exclude_field_pattern" : "bar*" + } + } + ] + } + + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + } + } + ] + } + +--- +"Test remove_by_pattern processor with field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : ["foo*", "*a*b"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: {zoo: "bar" }} + +--- +"Test remove_by_pattern processor with exclude_field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "exclude_field_pattern": ["foo*", "a*b*"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { foo1: "bar", foo2: "bar", ab: "bar", aabb: "bar"}} + + +--- +"Test cannot remove metadata fields by remove_by_pattern processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /field\_pattern \[\_id\] must not start with \'\_\'\;/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern": "_id" + } + } + ] + } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml new file mode 100644 index 0000000000000..6de5371bb49f7 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml @@ -0,0 +1,370 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat community_id processor": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + catch: /\[source\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "destination_ip_field" : "dest" + } + } + ] + } + - do: + catch: /\[destination\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "src" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "iana_protocol_number_field" : "iana_number", + "protocol_field" : "protocol", + "icmp_type_field" : "icmp", + "icmp_code_field" : "code", + "seed" : 0, + "target_field" : "community_id", + "ignore_missing" : false + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test community_id processor with ignore_missing": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /ip address in the field \[source\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "1.1.1.1", + protocol: "tcp" + } + + - do: + catch: /ip address in the field \[dest\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "1.1.1.1", protocol: "tcp" } } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "2.2.2.2", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { dest: "2.2.2.2", protocol: "tcp" } } + +--- +"Test community_id processor for tcp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "66.35.250.204", + dest: "128.232.110.120", + protocol: "tcp", + srcPort: 80, + destPort: 34855 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:LQU9qZlK+B5F3KDmev6m5PMibrg=" } + +--- +"Test community_id processor for udp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "8.8.8.8", + dest: "192.168.1.52", + protocol: "udp", + srcPort: 53, + destPort: 54585 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:d/FP5EW3wiY1vCndhwleRRKHowQ=" } + +--- +"Test community_id processor for sctp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.170.8", + dest: "192.168.170.56", + protocol: "sctp", + srcPort: 7, + destPort: 7 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs=" } + +--- +"Test community_id processor for icmp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.0.89", + dest: "192.168.0.1", + protocol: "icmp", + type: 8, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:X0snYXpgwiv9TZtqg64sgzUn6Dk=" } + +--- +"Test community_id processor for icmp-v6": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "fe80::260:97ff:fe07:69ea", + dest: "ff02::1", + protocol: "ipv6-icmp", + type: 134, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:pkvHqCL88/tg1k4cPigmZXUtL00=" } diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index c7cf27b2e6493..b27c0f9fe0b31 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -218,6 +218,7 @@ Set<Property> getProperties() { return properties; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { SpecialPermission.check(); CityResponse response = AccessController.doPrivileged( @@ -305,6 +306,7 @@ private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { SpecialPermission.check(); CountryResponse response = AccessController.doPrivileged( @@ -351,6 +353,7 @@ private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveAsnGeoData(InetAddress ipAddress) { SpecialPermission.check(); AsnResponse response = AccessController.doPrivileged( diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..82a17e2b79290 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +00759eaff8f62b38ba66a05f26ab784c268908d3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 deleted file mode 100644 index 402cc36ba3d68..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1782a69d0e83af9cc3c65db0dcd2e7e7c1e5f90e \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 8ca28a905f216..8e15488900e5f 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; @@ -57,7 +56,7 @@ import org.opensearch.search.aggregations.pipeline.SimpleValue; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.util.Arrays; @@ -80,10 +79,10 @@ import static org.hamcrest.Matchers.notNullValue; // TODO: please convert to unit tests! -public class MoreExpressionIT extends ParameterizedOpenSearchIntegTestCase { +public class MoreExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MoreExpressionIT(Settings dynamicSettings) { - super(dynamicSettings); + public MoreExpressionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -94,11 +93,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ExpressionModulePlugin.class); diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index b1cb5356a4405..7465fa1e5ddbe 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -35,7 +35,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; @@ -43,7 +42,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -54,10 +53,10 @@ import static org.hamcrest.Matchers.containsString; //TODO: please convert to unit tests! -public class StoredExpressionIT extends ParameterizedOpenSearchIntegTestCase { +public class StoredExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public StoredExpressionIT(Settings dynamicSettings) { - super(dynamicSettings); + public StoredExpressionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 5629b3b4a6972..0520177b72b62 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -170,6 +170,7 @@ public String getType() { return NAME; } + @SuppressWarnings("removal") @Override public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) { // classloader created here diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java index 143ff4f5c51bd..d7be890014add 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private FieldScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java index 498c0542e9c3e..94a422503d6bd 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private NumberSortScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java index 499f94afcb6af..a1d6df80715be 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private TermsSetQueryScript.LeafFactory compile(String expression) { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index e480fbbd22ad2..f7abc220e75d8 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -37,11 +37,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptType; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -58,10 +57,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.Is.is; -public class MultiSearchTemplateIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiSearchTemplateIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiSearchTemplateIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiSearchTemplateIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MustacheModulePlugin.class); diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index ec84475b70bb6..842353fdba336 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -128,6 +128,7 @@ private class MustacheExecutableScript extends TemplateScript { this.params = params; } + @SuppressWarnings("removal") @Override public String execute() { final StringWriter writer = new StringWriter(); diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java index 632fee9187eba..f18a7fb3ba1a9 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java @@ -513,6 +513,7 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, Map<String, All } } + @SuppressWarnings("removal") ClassLoader loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>) resource::getClassLoader); return new Allowlist(loader, allowlistClasses, allowlistStatics, allowlistClassBindings, Collections.emptyList()); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java index d0af4651d2d3b..2bf70882a501b 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java @@ -501,6 +501,7 @@ private static void endLambdaClass(ClassWriter cw) { * Defines the {@link Class} for the lambda class using the same {@link Compiler.Loader} * that originally defined the class for the Painless script. */ + @SuppressWarnings("removal") private static Class<?> createLambdaClass(Compiler.Loader loader, ClassWriter cw, Type lambdaClassType) { byte[] classBytes = cw.toByteArray(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java index e9edfb73c740c..257687bfb98c5 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java @@ -67,6 +67,7 @@ /** * Implementation of a ScriptEngine for the Painless language. */ +@SuppressWarnings("removal") public final class PainlessScriptEngine implements ScriptEngine { /** diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java index 260a2fc0c062c..6e3448e5eea77 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java @@ -45,10 +45,10 @@ import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue", "this-escape" }) abstract class PainlessLexer extends Lexer { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java index 40e76194f50b2..7ad5d113637c8 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java @@ -56,7 +56,7 @@ @SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) class PainlessParser extends Parser { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -337,7 +337,7 @@ public Vocabulary getVocabulary() { @Override public String getGrammarFileName() { - return "java-escape"; + return "PainlessParser.g4"; } @Override @@ -425,8 +425,8 @@ public final SourceContext source() throws RecognitionException { setState(87); _errHandler.sync(this); _la = _input.LA(1); - while (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { { setState(84); @@ -571,7 +571,7 @@ public final ParametersContext parameters() throws RecognitionException { setState(109); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(98); decltype(); @@ -1088,8 +1088,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(140); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(139); initializer(); @@ -1101,8 +1101,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(144); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(143); expression(); @@ -1114,8 +1114,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(148); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(147); afterthought(); @@ -1470,8 +1470,8 @@ public final DstatementContext dstatement() throws RecognitionException { setState(193); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(192); expression(); @@ -1661,8 +1661,8 @@ public final BlockContext block() throws RecognitionException { setState(212); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(211); dstatement(); @@ -2491,7 +2491,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); setState(269); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2545,7 +2545,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); setState(278); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2563,7 +2563,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); setState(281); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2581,7 +2581,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); setState(284); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2861,7 +2861,7 @@ public final ExpressionContext expression() throws RecognitionException { noncondexpression(0); setState(320); _la = _input.LA(1); - if (!((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0)) { + if (!(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -3938,7 +3938,7 @@ public final PrimaryContext primary() throws RecognitionException { enterOuterAlt(_localctx, 2); { setState(400); _la = _input.LA(1); - if (!((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0)) { + if (!(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -4564,8 +4564,8 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(469); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(461); expression(); @@ -4923,8 +4923,8 @@ public final ArgumentsContext arguments() throws RecognitionException { setState(524); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(516); argument(); @@ -5104,7 +5104,7 @@ public final LambdaContext lambda() throws RecognitionException { setState(543); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(535); lamtype(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e79eda975f417..e155a890c03d1 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -2189,6 +2189,7 @@ private void generateBridgeMethod(PainlessClassBuilder painlessClassBuilder, Pai bridgeClassWriter.visitEnd(); try { + @SuppressWarnings("removal") BridgeLoader bridgeLoader = AccessController.doPrivileged(new PrivilegedAction<BridgeLoader>() { @Override public BridgeLoader run() { diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java index 3418bcf01e19f..691e84176dce3 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java @@ -48,6 +48,7 @@ public class DocFieldsPhaseTests extends ScriptTestCase { PainlessLookup lookup = PainlessLookupBuilder.buildFromAllowlists(Allowlist.BASE_ALLOWLISTS); + @SuppressWarnings("removal") ScriptScope compile(String script) { Compiler compiler = new Compiler( MockDocTestScript.CONTEXT.instanceClazz, diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java index 5fc0a202ae45e..ab74463382aaa 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java @@ -41,7 +41,6 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testChildrenAggs() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(matchQuery("randomized", true)) diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java index 04703a65aa19d..4a6157e388777 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -76,11 +75,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleParentAgg() throws Exception { final SearchRequestBuilder searchRequest = client().prepareSearch("test") .setSize(10000) diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index c43d6352b26f8..99527c3273c4b 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -44,7 +44,6 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.IdsQueryBuilder; @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testMultiLevelChild() throws Exception { assertAcked( prepareCreate("test").setMapping( diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java index 39da86c7fd726..4b5470d17c100 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index 8c19c0aafe763..9c0f96cf382a6 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -41,7 +41,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -51,10 +51,10 @@ import java.util.Map; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public abstract class ParentChildTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class ParentChildTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ParentChildTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public ParentChildTestCase(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index c8763c2f3f749..01436404e8a85 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; @@ -57,7 +56,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -90,10 +89,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; -public class PercolatorQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { +public class PercolatorQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public PercolatorQuerySearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public PercolatorQuerySearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -104,11 +103,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean addMockGeoShapeFieldMapper() { return false; diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index cdc3cac1a1f06..488c2e33648e7 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -47,7 +46,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -62,14 +61,14 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -public class RankEvalRequestIT extends ParameterizedOpenSearchIntegTestCase { +public class RankEvalRequestIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; - public RankEvalRequestIT(Settings dynamicSettings) { - super(dynamicSettings); + public RankEvalRequestIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(RankEvalModulePlugin.class); diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java index b13a4d5a39a5b..02e858cb8d1f2 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java @@ -157,6 +157,7 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS throw new UnsupportedOperationException("URL repository doesn't support this operation"); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") private static InputStream getInputStream(URL url) throws IOException { try { diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java index 93e2e28718d51..05c6222d3d89a 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java @@ -40,6 +40,7 @@ /** * Provides access to the native method sd_notify from libsystemd. */ +@SuppressWarnings("removal") class Libsystemd { static { diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..0643f16dc1052 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +44a4e095d7e047a9452d81b224905b72c830f8ae \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 deleted file mode 100644 index dde9b7c100dc7..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -147cb42a90a29501d9ca6094ea0db1d213f3076a \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..3f4d49a78791b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7f57fe7322e6d3a9b4edcc3da0b1ee0791a814ec \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 deleted file mode 100644 index b70a22e9db096..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b034dd3a975763e083c7e11b5d0f7d516ab72590 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..1f110011ca9c6 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +9929da235100f8df323cfed165b8111fb2840093 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 deleted file mode 100644 index 323f165c62790..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c405f2f7d0fc127d88dfbadd753469b2028fdf52 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8d6bf9fa0fa1b --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8564c86d880c6ce002250002e2fd0936cbfff61d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 deleted file mode 100644 index dd659ddf4de95..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -970e5775876c2d7e1b9af7421a4b17d96f63faf4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..cbe4aec98fae4 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7e71777cfb5beb4ffd5b03030576d2f062eef13c \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 deleted file mode 100644 index ed0e81d8f1f75..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2421e5238e9b8484929291744d709dd743c01da1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..e1c7aecc104d0 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a597265bd6fb0a7e954e948a295d31507dd73cce \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 deleted file mode 100644 index fd8e000088180..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a23e7de4cd9ae7af285c89dc1c55e0ac3f157fd3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..eefa2809f3540 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +c9e534845bb08985d7fa21e2e71a14bc68c46089 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 deleted file mode 100644 index d0e7a3b0c751c..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d9bce1ea51db279878c51091dd9aefc7b335da4 \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle new file mode 100644 index 0000000000000..65e7daaaacf26 --- /dev/null +++ b/plugins/cache-ehcache/build.gradle @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.opensearch.gradle.Architecture +import org.opensearch.gradle.OS +import org.opensearch.gradle.info.BuildParams + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'Ehcache based cache implementation.' + classname 'org.opensearch.cache.EhcacheCachePlugin' +} + +versions << [ + 'ehcache' : '3.10.8' +] + +dependencies { + api "org.ehcache:ehcache:${versions.ehcache}" +} + +thirdPartyAudit { + ignoreViolations( + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$CounterCell', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin', + 'org.ehcache.impl.internal.concurrent.ThreadLocalRandomUtil', + 'org.ehcache.sizeof.impl.UnsafeSizeOf' + ) + + ignoreMissingClasses( + 'javax.cache.Cache', + 'javax.cache.Cache$Entry', + 'javax.cache.CacheException', + 'javax.cache.CacheManager', + 'javax.cache.configuration.CacheEntryListenerConfiguration', + 'javax.cache.configuration.CompleteConfiguration', + 'javax.cache.configuration.Configuration', + 'javax.cache.configuration.Factory', + 'javax.cache.configuration.OptionalFeature', + 'javax.cache.event.CacheEntryCreatedListener', + 'javax.cache.event.CacheEntryEvent', + 'javax.cache.event.CacheEntryEventFilter', + 'javax.cache.event.CacheEntryExpiredListener', + 'javax.cache.event.CacheEntryListener', + 'javax.cache.event.CacheEntryRemovedListener', + 'javax.cache.event.CacheEntryUpdatedListener', + 'javax.cache.event.EventType', + 'javax.cache.expiry.Duration', + 'javax.cache.expiry.EternalExpiryPolicy', + 'javax.cache.expiry.ExpiryPolicy', + 'javax.cache.integration.CacheLoader', + 'javax.cache.integration.CacheLoaderException', + 'javax.cache.integration.CacheWriter', + 'javax.cache.integration.CacheWriterException', + 'javax.cache.integration.CompletionListener', + 'javax.cache.management.CacheMXBean', + 'javax.cache.management.CacheStatisticsMXBean', + 'javax.cache.processor.EntryProcessor', + 'javax.cache.processor.EntryProcessorResult', + 'javax.cache.processor.MutableEntry', + 'javax.cache.spi.CachingProvider', + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.Marshaller', + 'javax.xml.bind.Unmarshaller', + 'javax.xml.bind.annotation.XmlElement', + 'javax.xml.bind.annotation.XmlRootElement', + 'javax.xml.bind.annotation.XmlSchema', + 'javax.xml.bind.annotation.adapters.XmlAdapter', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.ServiceReference', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.Marker', + 'org.slf4j.event.Level' + ) +} + +tasks.named("bundlePlugin").configure { + from('config/cache-ehcache') { + into 'config' + } +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 new file mode 100644 index 0000000000000..dee07e9238ebf --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 @@ -0,0 +1 @@ +f0d50ede46609db78413ca7f4250d348a597b101 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt new file mode 100644 index 0000000000000..1dbd38242cc98 --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt @@ -0,0 +1,5 @@ +Ehcache V3 +Copyright 2014-2023 Terracotta, Inc. + +The product includes software from the Apache Commons Lang project, +under the Apache License 2.0 (see: org.ehcache.impl.internal.classes.commonslang) diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java new file mode 100644 index 0000000000000..ceda96e4a7d7d --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.CACHE_TYPE_MAP; + +/** + * Ehcache based cache plugin. + */ +public class EhcacheCachePlugin extends Plugin implements CachePlugin { + + private static final String EHCACHE_CACHE_PLUGIN = "EhcachePlugin"; + + /** + * Default constructor to avoid javadoc related failures. + */ + public EhcacheCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME, new EhcacheDiskCache.EhcacheDiskCacheFactory()); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (Map.Entry<CacheType, Map<String, Setting<?>>> entry : CACHE_TYPE_MAP.entrySet()) { + for (Map.Entry<String, Setting<?>> entry1 : entry.getValue().entrySet()) { + settingList.add(entry1.getValue()); + } + } + return settingList; + } + + @Override + public String getName() { + return EHCACHE_CACHE_PLUGIN; + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java new file mode 100644 index 0000000000000..837fd6b268ce6 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -0,0 +1,222 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to ehcache disk cache. + */ +public class EhcacheDiskCacheSettings { + + /** + * Ehcache disk write minimum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.min_threads + */ + + public static final Setting.AffixSetting<Integer> DISK_WRITE_MINIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".min_threads", + (key) -> Setting.intSetting(key, 2, 1, 5, NodeScope) + ); + + /** + * Ehcache disk write maximum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.max_threads + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_MAXIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_threads", + (key) -> Setting.intSetting(key, 2, 1, 20, NodeScope) + ); + + /** + * Not be to confused with number of disk segments, this is different. Defines + * distinct write queues created for disk store where a group of segments share a write queue. This is + * implemented with ehcache using a partitioned thread pool exectutor By default all segments share a single write + * queue ie write concurrency is 1. Check OffHeapDiskStoreConfiguration and DiskWriteThreadPool. + * + * Default is 1 within ehcache. + * + * + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_CONCURRENCY_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".concurrency", + (key) -> Setting.intSetting(key, 1, 1, 3, NodeScope) + ); + + /** + * Defines how many segments the disk cache is separated into. Higher number achieves greater concurrency but + * will hold that many file pointers. Default is 16. + * + * Default value is 16 within Ehcache. + */ + public static final Setting.AffixSetting<Integer> DISK_SEGMENTS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".segments", + (key) -> Setting.intSetting(key, 16, 1, 32, NodeScope) + ); + + /** + * Storage path for disk cache. + */ + public static final Setting.AffixSetting<String> DISK_STORAGE_PATH_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".storage.path", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache alias. + */ + public static final Setting.AffixSetting<String> DISK_CACHE_ALIAS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".alias", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache expire after access setting. + */ + public static final Setting.AffixSetting<TimeValue> DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".expire_after_access", + (key) -> Setting.positiveTimeSetting(key, TimeValue.MAX_VALUE, NodeScope) + ); + + /** + * Disk cache max size setting. + */ + public static final Setting.AffixSetting<Long> DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", + (key) -> Setting.longSetting(key, 1073741824L, NodeScope) + ); + + /** + * Disk cache listener mode setting. + */ + public static final Setting.AffixSetting<Boolean> DISK_CACHE_LISTENER_MODE_SYNC_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".is_event_listener_sync", + (key) -> Setting.boolSetting(key, false, NodeScope) + ); + + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENT_KEY = "disk_segment"; + /** + * Key for max size. + */ + public static final String DISK_MAX_SIZE_IN_BYTES_KEY = "max_size_in_bytes"; + /** + * Key for expire after access. + */ + public static final String DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY = "disk_cache_expire_after_access_key"; + /** + * Key for cache alias. + */ + public static final String DISK_CACHE_ALIAS_KEY = "disk_cache_alias"; + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENTS_KEY = "disk_segments"; + /** + * Key for disk write concurrency. + */ + public static final String DISK_WRITE_CONCURRENCY_KEY = "disk_write_concurrency"; + /** + * Key for max threads. + */ + public static final String DISK_WRITE_MAXIMUM_THREADS_KEY = "disk_write_max_threads"; + /** + * Key for min threads. + */ + public static final String DISK_WRITE_MIN_THREADS_KEY = "disk_write_min_threads"; + /** + * Key for storage path. + */ + public static final String DISK_STORAGE_PATH_KEY = "disk_storage_path"; + /** + * Key for listener mode + */ + public static final String DISK_LISTENER_MODE_SYNC_KEY = "disk_listener_mode"; + + /** + * Map of key to setting. + */ + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of( + DISK_SEGMENT_KEY, + DISK_SEGMENTS_SETTING, + DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY, + DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING, + DISK_CACHE_ALIAS_KEY, + DISK_CACHE_ALIAS_SETTING, + DISK_WRITE_CONCURRENCY_KEY, + DISK_WRITE_CONCURRENCY_SETTING, + DISK_WRITE_MAXIMUM_THREADS_KEY, + DISK_WRITE_MAXIMUM_THREADS_SETTING, + DISK_WRITE_MIN_THREADS_KEY, + DISK_WRITE_MINIMUM_THREADS_SETTING, + DISK_STORAGE_PATH_KEY, + DISK_STORAGE_PATH_SETTING, + DISK_MAX_SIZE_IN_BYTES_KEY, + DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING, + DISK_LISTENER_MODE_SYNC_KEY, + DISK_CACHE_LISTENER_MODE_SYNC_SETTING + ); + + /** + * Map to store desired settings for a cache type. + */ + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + /** + * Used to form concrete setting for cache types and return desired map + * @return map of cacheType and associated settings. + */ + private static final Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + /** + * Fetches setting list for a combination of cache type and store name. + * @param cacheType cache type + * @return settings + */ + public static final Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } + + /** + * Default constructor. Added to fix javadocs. + */ + public EhcacheDiskCacheSettings() {} +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java similarity index 66% rename from server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java rename to plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java index c3222ca3ffb62..f9be1c3dbf826 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Package related to tiered cache listeners */ -package org.opensearch.common.cache.store.listeners; +/** Base package for cache plugin */ +package org.opensearch.cache; diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java new file mode 100644 index 0000000000000..ddfd5b838e927 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -0,0 +1,597 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.io.File; +import java.time.Duration; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +import org.ehcache.Cache; +import org.ehcache.CachePersistenceException; +import org.ehcache.PersistentCacheManager; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.PooledExecutionServiceConfigurationBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.event.CacheEvent; +import org.ehcache.event.CacheEventListener; +import org.ehcache.event.EventType; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.impl.config.store.disk.OffHeapDiskStoreConfiguration; +import org.ehcache.spi.loaderwriter.CacheLoadingException; +import org.ehcache.spi.loaderwriter.CacheWritingException; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_ALIAS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_SEGMENT_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_CONCURRENCY_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MAXIMUM_THREADS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MIN_THREADS_KEY; + +/** + * This variant of disk cache uses Ehcache underneath. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + * + */ +@ExperimentalApi +public class EhcacheDiskCache<K, V> implements ICache<K, V> { + + private static final Logger logger = LogManager.getLogger(EhcacheDiskCache.class); + + // Unique id associated with this cache. + private final static String UNIQUE_ID = UUID.randomUUID().toString(); + private final static String THREAD_POOL_ALIAS_PREFIX = "ehcachePool"; + private final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB + + // A Cache manager can create many caches. + private final PersistentCacheManager cacheManager; + + // Disk cache + private Cache<K, V> cache; + private final long maxWeightInBytes; + private final String storagePath; + private final Class<K> keyType; + private final Class<V> valueType; + private final TimeValue expireAfterAccess; + private final EhCacheEventListener<K, V> ehCacheEventListener; + private final String threadPoolAlias; + private final Settings settings; + private final RemovalListener<K, V> removalListener; + private final CacheType cacheType; + private final String diskCacheAlias; + // TODO: Move count to stats once those changes are ready. + private final CounterMetric entries = new CounterMetric(); + + /** + * Used in computeIfAbsent to synchronize loading of a given key. This is needed as ehcache doesn't provide a + * computeIfAbsent method. + */ + Map<K, CompletableFuture<Tuple<K, V>>> completableFutureMap = new ConcurrentHashMap<>(); + + private EhcacheDiskCache(Builder<K, V> builder) { + this.keyType = Objects.requireNonNull(builder.keyType, "Key type shouldn't be null"); + this.valueType = Objects.requireNonNull(builder.valueType, "Value type shouldn't be null"); + this.expireAfterAccess = Objects.requireNonNull(builder.getExpireAfterAcess(), "ExpireAfterAccess value shouldn't " + "be null"); + this.maxWeightInBytes = builder.getMaxWeightInBytes(); + if (this.maxWeightInBytes <= MINIMUM_MAX_SIZE_IN_BYTES) { + throw new IllegalArgumentException("Ehcache Disk tier cache size should be greater than " + MINIMUM_MAX_SIZE_IN_BYTES); + } + this.cacheType = Objects.requireNonNull(builder.cacheType, "Cache type shouldn't be null"); + if (builder.diskCacheAlias == null || builder.diskCacheAlias.isBlank()) { + this.diskCacheAlias = "ehcacheDiskCache#" + this.cacheType; + } else { + this.diskCacheAlias = builder.diskCacheAlias; + } + this.storagePath = builder.storagePath; + if (this.storagePath == null || this.storagePath.isBlank()) { + throw new IllegalArgumentException("Storage path shouldn't be null or empty"); + } + if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { + this.threadPoolAlias = THREAD_POOL_ALIAS_PREFIX + "DiskWrite#" + UNIQUE_ID; + } else { + this.threadPoolAlias = builder.threadPoolAlias; + } + this.settings = Objects.requireNonNull(builder.getSettings(), "Settings objects shouldn't be null"); + this.cacheManager = buildCacheManager(); + Objects.requireNonNull(builder.getRemovalListener(), "Removal listener can't be null"); + this.removalListener = builder.getRemovalListener(); + this.ehCacheEventListener = new EhCacheEventListener<K, V>(builder.getRemovalListener()); + this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); + } + + private Cache<K, V> buildCache(Duration expireAfterAccess, Builder<K, V> builder) { + try { + return this.cacheManager.createCache( + this.diskCacheAlias, + CacheConfigurationBuilder.newCacheConfigurationBuilder( + this.keyType, + this.valueType, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(K key, V value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(K key, Supplier<? extends V> value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate(K key, Supplier<? extends V> oldValue, V newValue) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_CONCURRENCY_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + ) + ) + ); + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } + } + + private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder<K, V> builder) { + CacheEventListenerConfigurationBuilder configurationBuilder = CacheEventListenerConfigurationBuilder.newEventListenerConfiguration( + this.ehCacheEventListener, + EventType.EVICTED, + EventType.EXPIRED, + EventType.REMOVED, + EventType.UPDATED, + EventType.CREATED + ).unordered(); + if (builder.isEventListenerModeSync) { + return configurationBuilder.synchronous(); + } else { + return configurationBuilder.asynchronous(); + } + } + + // Package private for testing + Map<K, CompletableFuture<Tuple<K, V>>> getCompletableFutureMap() { + return completableFutureMap; + } + + @SuppressForbidden(reason = "Ehcache uses File.io") + private PersistentCacheManager buildCacheManager() { + // In case we use multiple ehCaches, we can define this cache manager at a global level. + return CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true); + } + + @Override + public V get(K key) { + if (key == null) { + throw new IllegalArgumentException("Key passed to ehcache disk cache was null."); + } + V value; + try { + value = cache.get(key); + } catch (CacheLoadingException ex) { + throw new OpenSearchException("Exception occurred while trying to fetch item from ehcache disk cache"); + } + return value; + } + + /** + * Puts the item into cache. + * @param key Type of key. + * @param value Type of value. + */ + @Override + public void put(K key, V value) { + try { + cache.put(key, value); + } catch (CacheWritingException ex) { + throw new OpenSearchException("Exception occurred while put item to ehcache disk cache"); + } + } + + /** + * Computes the value using loader in case key is not present, otherwise fetches it. + * @param key Type of key + * @param loader loader to load the value in case key is missing + * @return value + * @throws Exception when either internal get or put calls fail. + */ + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // Ehache doesn't provide any computeIfAbsent function. Exposes putIfAbsent but that works differently and is + // not performant in case there are multiple concurrent request for same key. Below is our own custom + // implementation of computeIfAbsent on top of ehcache. Inspired by OpenSearch Cache implementation. + V value = cache.get(key); + if (value == null) { + value = compute(key, loader); + } + return value; + } + + private V compute(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // A future that returns a pair of key/value. + CompletableFuture<Tuple<K, V>> completableFuture = new CompletableFuture<>(); + // Only one of the threads will succeed putting a future into map for the same key. + // Rest will fetch existing future. + CompletableFuture<Tuple<K, V>> future = completableFutureMap.putIfAbsent(key, completableFuture); + // Handler to handle results post processing. Takes a tuple<key, value> or exception as an input and returns + // the value. Also before returning value, puts the value in cache. + BiFunction<Tuple<K, V>, Throwable, V> handler = (pair, ex) -> { + V value = null; + if (pair != null) { + cache.put(pair.v1(), pair.v2()); + value = pair.v2(); // Returning a value itself assuming that a next get should return the same. Should + // be safe to assume if we got no exception and reached here. + } + completableFutureMap.remove(key); // Remove key from map as not needed anymore. + return value; + }; + CompletableFuture<V> completableValue; + if (future == null) { + future = completableFuture; + completableValue = future.handle(handler); + V value; + try { + value = loader.load(key); + } catch (Exception ex) { + future.completeExceptionally(ex); + throw new ExecutionException(ex); + } + if (value == null) { + NullPointerException npe = new NullPointerException("loader returned a null value"); + future.completeExceptionally(npe); + throw new ExecutionException(npe); + } else { + future.complete(new Tuple<>(key, value)); + } + + } else { + completableValue = future.handle(handler); + } + V value; + try { + value = completableValue.get(); + if (future.isCompletedExceptionally()) { + future.get(); // call get to force the exception to be thrown for other concurrent callers + throw new IllegalStateException("Future completed exceptionally but no error thrown"); + } + } catch (InterruptedException ex) { + throw new IllegalStateException(ex); + } + return value; + } + + /** + * Invalidate the item. + * @param key key to be invalidated. + */ + @Override + public void invalidate(K key) { + try { + cache.remove(key); + } catch (CacheWritingException ex) { + // Handle + throw new RuntimeException(ex); + } + + } + + @Override + public void invalidateAll() {} + + /** + * Provides a way to iterate over disk cache keys. + * @return Iterable + */ + @Override + public Iterable<K> keys() { + return () -> new EhCacheKeyIterator<>(cache.iterator()); + } + + /** + * Gives the current count of keys in disk cache. + * @return current count of keys + */ + @Override + public long count() { + return entries.count(); + } + + @Override + public void refresh() { + // TODO: ehcache doesn't provide a way to refresh a cache. + } + + @Override + public void close() { + cacheManager.removeCache(this.diskCacheAlias); + cacheManager.close(); + try { + cacheManager.destroyCache(this.diskCacheAlias); + } catch (CachePersistenceException e) { + throw new OpenSearchException("Exception occurred while destroying ehcache and associated data", e); + } + } + + /** + * This iterator wraps ehCache iterator and only iterates over its keys. + * @param <K> Type of key + */ + class EhCacheKeyIterator<K> implements Iterator<K> { + + Iterator<Cache.Entry<K, V>> iterator; + + EhCacheKeyIterator(Iterator<Cache.Entry<K, V>> iterator) { + this.iterator = iterator; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public K next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return iterator.next().getKey(); + } + } + + /** + * Wrapper over Ehcache original listener to listen to desired events and notify desired subscribers. + * @param <K> Type of key + * @param <V> Type of value + */ + class EhCacheEventListener<K, V> implements CacheEventListener<K, V> { + + private final RemovalListener<K, V> removalListener; + + EhCacheEventListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + } + + @Override + public void onEvent(CacheEvent<? extends K, ? extends V> event) { + switch (event.getType()) { + case CREATED: + entries.inc(); + // this.eventListener.onCached(event.getKey(), event.getNewValue(), CacheStoreType.DISK); + assert event.getOldValue() == null; + break; + case EVICTED: + this.removalListener.onRemoval(new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.EVICTED)); + entries.dec(); + assert event.getNewValue() == null; + break; + case REMOVED: + entries.dec(); + this.removalListener.onRemoval(new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.EXPLICIT)); + assert event.getNewValue() == null; + break; + case EXPIRED: + this.removalListener.onRemoval( + new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.INVALIDATED) + ); + entries.dec(); + assert event.getNewValue() == null; + break; + case UPDATED: + break; + default: + break; + } + } + } + + /** + * Factory to create an ehcache disk cache. + */ + public static class EhcacheDiskCacheFactory implements ICache.Factory { + + /** + * Ehcache disk cache name. + */ + public static final String EHCACHE_DISK_CACHE_NAME = "ehcache_disk"; + + /** + * Default constructor. + */ + public EhcacheDiskCacheFactory() {} + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + return new Builder<K, V>().setStoragePath((String) settingList.get(DISK_STORAGE_PATH_KEY).get(settings)) + .setDiskCacheAlias((String) settingList.get(DISK_CACHE_ALIAS_KEY).get(settings)) + .setIsEventListenerModeSync((Boolean) settingList.get(DISK_LISTENER_MODE_SYNC_KEY).get(settings)) + .setCacheType(cacheType) + .setKeyType((config.getKeyType())) + .setValueType(config.getValueType()) + .setRemovalListener(config.getRemovalListener()) + .setExpireAfterAccess((TimeValue) settingList.get(DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY).get(settings)) + .setMaximumWeightInBytes((Long) settingList.get(DISK_MAX_SIZE_IN_BYTES_KEY).get(settings)) + .setSettings(settings) + .build(); + } + + @Override + public String getCacheName() { + return EHCACHE_DISK_CACHE_NAME; + } + } + + /** + * Builder object to build Ehcache disk tier. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + private CacheType cacheType; + private String storagePath; + + private String threadPoolAlias; + + private String diskCacheAlias; + + // Provides capability to make ehCache event listener to run in sync mode. Used for testing too. + private boolean isEventListenerModeSync; + + private Class<K> keyType; + + private Class<V> valueType; + + /** + * Default constructor. Added to fix javadocs. + */ + public Builder() {} + + /** + * Sets the desired cache type. + * @param cacheType cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Sets the key type of value. + * @param keyType type of key + * @return builder + */ + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + /** + * Sets the class type of value. + * @param valueType type of value + * @return builder + */ + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + /** + * Desired storage path for disk cache. + * @param storagePath path for disk cache + * @return builder + */ + public Builder<K, V> setStoragePath(String storagePath) { + this.storagePath = storagePath; + return this; + } + + /** + * Thread pool alias for the cache. + * @param threadPoolAlias alias + * @return builder + */ + public Builder<K, V> setThreadPoolAlias(String threadPoolAlias) { + this.threadPoolAlias = threadPoolAlias; + return this; + } + + /** + * Cache alias + * @param diskCacheAlias disk cache alias + * @return builder + */ + public Builder<K, V> setDiskCacheAlias(String diskCacheAlias) { + this.diskCacheAlias = diskCacheAlias; + return this; + } + + /** + * Determines whether event listener is triggered async/sync. + * @param isEventListenerModeSync mode sync + * @return builder + */ + public Builder<K, V> setIsEventListenerModeSync(boolean isEventListenerModeSync) { + this.isEventListenerModeSync = isEventListenerModeSync; + return this; + } + + @Override + public EhcacheDiskCache<K, V> build() { + return new EhcacheDiskCache<>(this); + } + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java new file mode 100644 index 0000000000000..79f8eec2f3f4c --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * Base package for disk cache related stuff. + */ +package org.opensearch.cache.store.disk; diff --git a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..40007eea62dba --- /dev/null +++ b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; + diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java new file mode 100644 index 0000000000000..538a45456ddc3 --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class EhcachePluginTests extends OpenSearchTestCase { + + private EhcacheCachePlugin ehcacheCachePlugin = new EhcacheCachePlugin(); + + public void testGetCacheStoreTypeMap() { + Map<String, ICache.Factory> factoryMap = ehcacheCachePlugin.getCacheFactoryMap(); + assertNotNull(factoryMap); + assertNotNull(factoryMap.get(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME)); + } +} diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java new file mode 100644 index 0000000000000..d5f5fbb9293bc --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -0,0 +1,505 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Phaser; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.hamcrest.CoreMatchers.instanceOf; + +public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { + + private static final int CACHE_SIZE_IN_BYTES = 1024 * 101; + + public void testBasicGetAndPut() throws IOException { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testBasicGetAndPutUsingFactory() throws IOException { + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + ICache.Factory ehcacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory(); + ICache<String, String> ehcacheTest = ehcacheFactory.create( + new CacheConfig.Builder<String, String>().setValueType(String.class) + .setKeyType(String.class) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + CACHE_SIZE_IN_BYTES + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_LISTENER_MODE_SYNC_KEY) + .getKey(), + true + ) + .build() + ) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of() + ); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testConcurrentPut() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + ehcacheTest.put(entry.getKey(), entry.getValue()); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testEhcacheParallelGets() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + assertEquals(keyValueMap.size(), ehcacheTest.count()); + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + assertEquals(entry.getValue(), ehcacheTest.get(entry.getKey())); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + ehcacheTest.close(); + } + } + + public void testEhcacheKeyIterator() throws Exception { + Settings settings = Settings.builder().build(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(new MockRemovalListener<>()) + .build(); + + int randomKeys = randomIntBetween(2, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + Iterator<String> keys = ehcacheTest.keys().iterator(); + int keysCount = 0; + while (keys.hasNext()) { + String key = keys.next(); + keysCount++; + assertNotNull(ehcacheTest.get(key)); + } + assertEquals(keysCount, randomKeys); + ehcacheTest.close(); + } + } + + public void testEvictions() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + // Generate a string with 100 characters + String value = generateRandomString(100); + + // Trying to generate more than 100kb to cause evictions. + for (int i = 0; i < 1000; i++) { + String key = "Key" + i; + ehcacheTest.put(key, value); + } + assertEquals(660, removalListener.evictionMetric.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrently() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = 2;// randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + String value = "dummy"; + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Verify value is only loaded once. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + assertEquals(value, ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + int numberOfTimesValueLoaded = 0; + for (int i = 0; i < numberOfRequest; i++) { + if (loadAwareCacheLoaderList.get(i).isLoaded()) { + numberOfTimesValueLoaded++; + } + } + assertEquals(1, numberOfTimesValueLoaded); + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + assertEquals(1, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrentlyAndThrowsException() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + throw new RuntimeException("Exception"); + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentWithNullValueLoading() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + return null; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception ex) { + assertThat(ex.getCause(), instanceOf(NullPointerException.class)); + } + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + private static String generateRandomString(int length) { + String characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + StringBuilder randomString = new StringBuilder(length); + + for (int i = 0; i < length; i++) { + int index = (int) (randomDouble() * characters.length()); + randomString.append(characters.charAt(index)); + } + + return randomString.toString(); + } + + static class MockRemovalListener<K, V> implements RemovalListener<K, V> { + + CounterMetric evictionMetric = new CounterMetric(); + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionMetric.inc(); + } + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java index 5b026c30017ca..f3d0f278c7ce7 100644 --- a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java @@ -19,6 +19,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index ad622e68f5ccb..a4b733ec7d894 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -296,6 +296,7 @@ private static SSLContext getSSLContext() throws Exception { * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK */ + @SuppressWarnings("removal") private static String getProtocol() { if (Runtime.version().compareTo(Version.parse("12")) < 0) { return "TLSv1.2"; diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 1bac80e576199..6e21feca7f5fb 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -112,6 +112,7 @@ private static String getRequiredSetting(Settings settings, Setting<String> sett return value; } + @SuppressWarnings("removal") @Override public HostedServiceGetDetailedResponse getServiceDetails() { SpecialPermission.check(); diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java index c6605002c4462..0125ae4d19c3e 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java index b4ed613c0d8dd..9518fac442111 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java @@ -89,6 +89,7 @@ public static void startHttp() throws Exception { httpServer.start(); } + @SuppressWarnings("removal") @Before public void setup() { // redirect EC2 metadata service to httpServer @@ -116,6 +117,7 @@ public void testNetworkHostEc2() throws IOException { /** * Test for network.host: _ec2_ */ + @SuppressWarnings("removal") public void testNetworkHostUnableToResolveEc2() { // redirect EC2 metadata service to unknown location AccessController.doPrivileged( diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index c8b52d3afcd45..85efcc43fd65a 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.34.1" + api "com.google.oauth-client:google-oauth-client:1.35.0" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 deleted file mode 100644 index a8434bd380761..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..a52e79088c7ca --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 @@ -0,0 +1 @@ +2f52003156e40ba8be5f349a2716a77428896e69 \ No newline at end of file diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java index 1401f7ca26ce6..c46bfedbd8507 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java @@ -48,6 +48,7 @@ * {@code connect}. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class Access { private Access() {} diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index baa3464d0a98e..222443efcb214 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -17,7 +17,7 @@ opensearchplugin { } dependencies { - implementation 'org.apache.shiro:shiro-core:1.11.0' + implementation 'org.apache.shiro:shiro-core:1.13.0' // Needed for shiro implementation "org.slf4j:slf4j-api:${versions.slf4j}" @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" + implementation "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation project(path: ':plugins:transport-nio') // for http diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 2e96c404bef98..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 deleted file mode 100644 index 67c33e15ec689..0000000000000 --- a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -033a70c87e91968a299f1ee00f4e95050312346d \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 new file mode 100644 index 0000000000000..25bd4d9acd166 --- /dev/null +++ b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 @@ -0,0 +1 @@ +7e542e3d614b197bf10005e98e19f9f19cb943e7 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt +++ b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 22db73ad86796..6da34c4c9caf2 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -71,9 +71,9 @@ dependencies { api "org.apache.pdfbox:fontbox:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.bouncycastle:bcmail-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcmail-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 46010d64015ad..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23d8bcad6b57912e4633ca9955926ffcdf3c5c71 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..f71659316b8cd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +f2bb8aa55dc901ee8b8aae7d1007c03592d65e03 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index a843d972ac681..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ee440dfa1c557c1cc0c46b5dadf5ef3896ccebb \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..05a8b2d5729bd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +ed953791ba0229747dd0fd9911e3d76a462acfd3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 2e96c404bef98..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index ce7ceb5e3d776..fe783e5ddb675 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -112,6 +112,7 @@ final class TikaImpl { /** * parses with tika, throwing any exception hit while parsing the document */ + @SuppressWarnings("removal") static String parse(final byte content[], final Metadata metadata, final int limit) throws TikaException, IOException { // check that its not unprivileged code like a script SpecialPermission.check(); @@ -136,6 +137,7 @@ static String parse(final byte content[], final Metadata metadata, final int lim // apply additional containment for parsers, this is intersected with the current permissions // its hairy, but worth it so we don't have some XML flaw reading random crap from the FS + @SuppressWarnings("removal") private static final AccessControlContext RESTRICTED_CONTEXT = new AccessControlContext( new ProtectionDomain[] { new ProtectionDomain(null, getRestrictedPermissions()) } ); diff --git a/plugins/query-insights/build.gradle b/plugins/query-insights/build.gradle new file mode 100644 index 0000000000000..eabbd395bd3bd --- /dev/null +++ b/plugins/query-insights/build.gradle @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +opensearchplugin { + description 'OpenSearch Query Insights Plugin.' + classname 'org.opensearch.plugin.insights.QueryInsightsPlugin' +} + +dependencies { +} diff --git a/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java new file mode 100644 index 0000000000000..04e715444f50a --- /dev/null +++ b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java @@ -0,0 +1,274 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Transport Action tests for Query Insights Plugin + */ + +@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) +public class QueryInsightsPluginTransportIT extends OpenSearchIntegTestCase { + + private final int TOTAL_NUMBER_OF_NODES = 2; + private final int TOTAL_SEARCH_REQUESTS = 5; + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(QueryInsightsPlugin.class); + } + + /** + * Test Query Insights Plugin is installed + */ + public void testQueryInsightPluginInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.insights.QueryInsightsPlugin")) + ); + } + + /** + * Test get top queries when feature disabled + */ + public void testGetTopQueriesWhenFeatureDisabled() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + } + + /** + * Test update top query record when feature enabled + */ + public void testUpdateRecordWhenFeatureDisabledThenEnabled() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "false").build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true").build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + TopQueriesRequest request2 = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response2 = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request2).actionGet(); + Assert.assertEquals(0, response2.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response2.getNodes().size()); + for (int i = 0; i < TOTAL_NUMBER_OF_NODES; i++) { + Assert.assertEquals(0, response2.getNodes().get(i).getTopQueriesRecord().size()); + } + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries when feature enabled + */ + public void testGetTopQueriesWhenFeatureEnabled() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + // Sleep to wait for queue drained to top queries store + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(TOTAL_SEARCH_REQUESTS, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small top n size + */ + public void testGetTopQueriesWithSmallTopN() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "1") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(2, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small window size + */ + public void testGetTopQueriesWithSmallWindowSize() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "1m") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Thread.sleep(6000); + internalCluster().stopAllNodes(); + } +} diff --git a/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java new file mode 100644 index 0000000000000..57dea6ad8d5ff --- /dev/null +++ b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +/** + * Rest Action tests for Query Insights + */ +public class TopQueriesRestIT extends OpenSearchRestTestCase { + + /** + * test Query Insights is installed + * @throws IOException IOException + */ + @SuppressWarnings("unchecked") + public void testQueryInsightsPluginInstalled() throws IOException { + Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); + Response response = client().performRequest(request); + List<Object> pluginsList = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.getEntity().getContent() + ).list(); + Assert.assertTrue( + pluginsList.stream().map(o -> (Map<String, Object>) o).anyMatch(plugin -> plugin.get("component").equals("query-insights")) + ); + } + + /** + * test enabling top queries + * @throws IOException IOException + */ + public void testTopQueriesResponses() throws IOException { + // Enable Top N Queries feature + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(defaultTopQueriesSettings()); + Response response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Create documents for search + request = new Request("POST", "/my-index-0/_doc"); + request.setJsonEntity(createDocumentsBody()); + response = client().performRequest(request); + + Assert.assertEquals(201, response.getStatusLine().getStatusCode()); + + // Do Search + request = new Request("GET", "/my-index-0/_search?size=20&pretty"); + request.setJsonEntity(searchBody()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Get Top Queries + request = new Request("GET", "/_insights/top_queries?pretty"); + response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + String top_requests = new String(response.getEntity().getContent().readAllBytes(), StandardCharsets.UTF_8); + Assert.assertTrue(top_requests.contains("top_queries")); + Assert.assertEquals(2, top_requests.split("searchType", -1).length - 1); + } + + private String defaultTopQueriesSettings() { + return "{\n" + + " \"persistent\" : {\n" + + " \"search.top_n_queries.latency.enabled\" : \"true\",\n" + + " \"search.top_n_queries.latency.window_size\" : \"600s\",\n" + + " \"search.top_n_queries.latency.top_n_size\" : 5\n" + + " }\n" + + "}"; + } + + private String createDocumentsBody() { + return "{\n" + + " \"@timestamp\": \"2099-11-15T13:12:00\",\n" + + " \"message\": \"this is document 1\",\n" + + " \"user\": {\n" + + " \"id\": \"cyji\"\n" + + " }\n" + + "}"; + } + + private String searchBody() { + return "{}"; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java new file mode 100644 index 0000000000000..4d7e0d486068a --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.rules.transport.top_queries.TransportTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; +import org.opensearch.script.ScriptService; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for Query Insights. + */ +public class QueryInsightsPlugin extends Plugin implements ActionPlugin { + /** + * Default constructor + */ + public QueryInsightsPlugin() {} + + @Override + public Collection<Object> createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<RepositoriesService> repositoriesServiceSupplier + ) { + // create top n queries service + final QueryInsightsService queryInsightsService = new QueryInsightsService(threadPool); + return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService)); + } + + @Override + public List<ExecutorBuilder<?>> getExecutorBuilders(final Settings settings) { + return List.of( + new ScalingExecutorBuilder( + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR, + 1, + Math.min((OpenSearchExecutors.allocatedProcessors(settings) + 1) / 2, QueryInsightsSettings.MAX_THREAD_COUNT), + TimeValue.timeValueMinutes(5) + ) + ); + } + + @Override + public List<RestHandler> getRestHandlers( + final Settings settings, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<DiscoveryNodes> nodesInCluster + ) { + return List.of(new RestTopQueriesAction()); + } + + @Override + public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() { + return List.of(new ActionPlugin.ActionHandler<>(TopQueriesAction.INSTANCE, TransportTopQueriesAction.class)); + } + + @Override + public List<Setting<?>> getSettings() { + return List.of( + // Settings for top N queries + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java new file mode 100644 index 0000000000000..705273f52a567 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; + +/** + * The listener for query insights services. + * It forwards query-related data to the appropriate query insights stores, + * either for each request or for each phase. + * + * @opensearch.internal + */ +public final class QueryInsightsListener extends SearchRequestOperationsListener { + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + private static final Logger log = LogManager.getLogger(QueryInsightsListener.class); + + private final QueryInsightsService queryInsightsService; + + /** + * Constructor for QueryInsightsListener + * + * @param clusterService The Node's cluster service. + * @param queryInsightsService The topQueriesByLatencyService associated with this listener + */ + @Inject + public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) { + this.queryInsightsService = queryInsightsService; + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(TOP_N_LATENCY_QUERIES_ENABLED, v -> this.setEnableTopQueries(MetricType.LATENCY, v)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setTopNSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateTopNSize(v) + ); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_WINDOW_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setWindowSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateWindowSize(v) + ); + this.setEnableTopQueries(MetricType.LATENCY, clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_ENABLED)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setTopNSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_SIZE)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setWindowSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_WINDOW_SIZE)); + } + + /** + * Enable or disable top queries insights collection for {@link MetricType} + * This function will enable or disable the corresponding listeners + * and query insights services. + * + * @param metricType {@link MetricType} + * @param enabled boolean + */ + public void setEnableTopQueries(final MetricType metricType, final boolean enabled) { + boolean isAllMetricsDisabled = !queryInsightsService.isEnabled(); + this.queryInsightsService.enableCollection(metricType, enabled); + if (!enabled) { + // disable QueryInsightsListener only if all metrics collections are disabled now. + if (!queryInsightsService.isEnabled()) { + super.setEnabled(false); + this.queryInsightsService.stop(); + } + } else { + super.setEnabled(true); + // restart QueryInsightsListener only if none of metrics collections is enabled before. + if (isAllMetricsDisabled) { + this.queryInsightsService.stop(); + this.queryInsightsService.start(); + } + } + + } + + @Override + public boolean isEnabled() { + return super.isEnabled(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + final SearchRequest request = context.getRequest(); + try { + Map<MetricType, Number> measurements = new HashMap<>(); + if (queryInsightsService.isCollectionEnabled(MetricType.LATENCY)) { + measurements.put( + MetricType.LATENCY, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos()) + ); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS)); + attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards()); + attributes.put(Attribute.INDICES, request.indices()); + attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap()); + SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes); + queryInsightsService.addRecord(record); + } catch (Exception e) { + log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e)); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java new file mode 100644 index 0000000000000..3cb9cacf7fd1c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Listeners for Query Insights + */ +package org.opensearch.plugin.insights.core.listener; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java new file mode 100644 index 0000000000000..525ca0d4a3d33 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; + +/** + * Service responsible for gathering, analyzing, storing and exporting + * information related to search queries + * + * @opensearch.internal + */ +public class QueryInsightsService extends AbstractLifecycleComponent { + /** + * The internal OpenSearch thread pool that execute async processing and exporting tasks + */ + private final ThreadPool threadPool; + + /** + * Services to capture top n queries for different metric types + */ + private final Map<MetricType, TopQueriesService> topQueriesServices; + + /** + * Flags for enabling insight data collection for different metric types + */ + private final Map<MetricType, Boolean> enableCollect; + + /** + * The internal thread-safe queue to ingest the search query data and subsequently forward to processors + */ + private final LinkedBlockingQueue<SearchQueryRecord> queryRecordsQueue; + + /** + * Holds a reference to delayed operation {@link Scheduler.Cancellable} so it can be cancelled when + * the service closed concurrently. + */ + protected volatile Scheduler.Cancellable scheduledFuture; + + /** + * Constructor of the QueryInsightsService + * + * @param threadPool The OpenSearch thread pool to run async tasks + */ + @Inject + public QueryInsightsService(final ThreadPool threadPool) { + enableCollect = new HashMap<>(); + queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY); + topQueriesServices = new HashMap<>(); + for (MetricType metricType : MetricType.allMetricTypes()) { + enableCollect.put(metricType, false); + topQueriesServices.put(metricType, new TopQueriesService(metricType)); + } + this.threadPool = threadPool; + } + + /** + * Ingest the query data into in-memory stores + * + * @param record the record to ingest + */ + public boolean addRecord(final SearchQueryRecord record) { + boolean shouldAdd = false; + for (Map.Entry<MetricType, TopQueriesService> entry : topQueriesServices.entrySet()) { + if (!enableCollect.get(entry.getKey())) { + continue; + } + List<SearchQueryRecord> currentSnapshot = entry.getValue().getTopQueriesCurrentSnapshot(); + // skip add to top N queries store if the incoming record is smaller than the Nth record + if (currentSnapshot.size() < entry.getValue().getTopNSize() + || SearchQueryRecord.compare(record, currentSnapshot.get(0), entry.getKey()) > 0) { + shouldAdd = true; + break; + } + } + if (shouldAdd) { + return queryRecordsQueue.offer(record); + } + return false; + } + + /** + * Drain the queryRecordsQueue into internal stores and services + */ + public void drainRecords() { + final List<SearchQueryRecord> records = new ArrayList<>(); + queryRecordsQueue.drainTo(records); + records.sort(Comparator.comparingLong(SearchQueryRecord::getTimestamp)); + for (MetricType metricType : MetricType.allMetricTypes()) { + if (enableCollect.get(metricType)) { + // ingest the records into topQueriesService + topQueriesServices.get(metricType).consumeRecords(records); + } + } + } + + /** + * Get the top queries service based on metricType + * @param metricType {@link MetricType} + * @return {@link TopQueriesService} + */ + public TopQueriesService getTopQueriesService(final MetricType metricType) { + return topQueriesServices.get(metricType); + } + + /** + * Set flag to enable or disable Query Insights data collection + * + * @param metricType {@link MetricType} + * @param enable Flag to enable or disable Query Insights data collection + */ + public void enableCollection(final MetricType metricType, final boolean enable) { + this.enableCollect.put(metricType, enable); + this.topQueriesServices.get(metricType).setEnabled(enable); + } + + /** + * Get if the Query Insights data collection is enabled for a MetricType + * + * @param metricType {@link MetricType} + * @return if the Query Insights data collection is enabled + */ + public boolean isCollectionEnabled(final MetricType metricType) { + return this.enableCollect.get(metricType); + } + + /** + * Check if query insights service is enabled + * + * @return if query insights service is enabled + */ + public boolean isEnabled() { + for (MetricType t : MetricType.allMetricTypes()) { + if (isCollectionEnabled(t)) { + return true; + } + } + return false; + } + + @Override + protected void doStart() { + if (isEnabled()) { + scheduledFuture = threadPool.scheduleWithFixedDelay( + this::drainRecords, + QueryInsightsSettings.QUERY_RECORD_QUEUE_DRAIN_INTERVAL, + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR + ); + } + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java new file mode 100644 index 0000000000000..d2c30cbdf98e7 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.PriorityQueue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Service responsible for gathering and storing top N queries + * with high latency or resource usage + * + * @opensearch.internal + */ +public class TopQueriesService { + private boolean enabled; + /** + * The metric type to measure top n queries + */ + private final MetricType metricType; + private int topNSize; + /** + * The window size to keep the top n queries + */ + private TimeValue windowSize; + /** + * The current window start timestamp + */ + private long windowStart; + /** + * The internal thread-safe store that holds the top n queries insight data + */ + private final PriorityQueue<SearchQueryRecord> topQueriesStore; + + /** + * The AtomicReference of a snapshot of the current window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesCurrentSnapshot; + + /** + * The AtomicReference of a snapshot of the last window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesHistorySnapshot; + + TopQueriesService(final MetricType metricType) { + this.enabled = false; + this.metricType = metricType; + this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE; + this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE; + this.windowStart = -1L; + topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>()); + topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>()); + } + + /** + * Set the top N size for TopQueriesService service. + * + * @param topNSize the top N size to set + */ + public void setTopNSize(final int topNSize) { + this.topNSize = topNSize; + } + + /** + * Get the current configured top n size + * + * @return top n size + */ + public int getTopNSize() { + return topNSize; + } + + /** + * Validate the top N size based on the internal constrains + * + * @param size the wanted top N size + */ + public void validateTopNSize(final int size) { + if (size > QueryInsightsSettings.MAX_N_SIZE) { + throw new IllegalArgumentException( + "Top N size setting for [" + + metricType + + "]" + + " should be smaller than max top N size [" + + QueryInsightsSettings.MAX_N_SIZE + + "was (" + + size + + " > " + + QueryInsightsSettings.MAX_N_SIZE + + ")" + ); + } + } + + /** + * Set enable flag for the service + * @param enabled boolean + */ + public void setEnabled(final boolean enabled) { + this.enabled = enabled; + } + + /** + * Set the window size for top N queries service + * + * @param windowSize window size to set + */ + public void setWindowSize(final TimeValue windowSize) { + this.windowSize = windowSize; + // reset the window start time since the window size has changed + this.windowStart = -1L; + } + + /** + * Validate if the window size is valid, based on internal constrains. + * + * @param windowSize the window size to validate + */ + public void validateWindowSize(final TimeValue windowSize) { + if (windowSize.compareTo(QueryInsightsSettings.MAX_WINDOW_SIZE) > 0 + || windowSize.compareTo(QueryInsightsSettings.MIN_WINDOW_SIZE) < 0) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be between [" + + QueryInsightsSettings.MIN_WINDOW_SIZE + + "," + + QueryInsightsSettings.MAX_WINDOW_SIZE + + "]" + + "was (" + + windowSize + + ")" + ); + } + if (!(QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES.contains(windowSize) || windowSize.getMinutes() % 60 == 0)) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be multiple of 1 hour, or one of " + + QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES + + ", was (" + + windowSize + + ")" + ); + } + } + + /** + * Get all top queries records that are in the current top n queries store + * Optionally include top N records from the last window. + * + * By default, return the records in sorted order. + * + * @param includeLastWindow if the top N queries from the last window should be included + * @return List of the records that are in the query insight store + * @throws IllegalArgumentException if query insight is disabled in the cluster + */ + public List<SearchQueryRecord> getTopQueriesRecords(final boolean includeLastWindow) throws IllegalArgumentException { + if (!enabled) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Cannot get top n queries for [%s] when it is not enabled.", metricType.toString()) + ); + } + // read from window snapshots + final List<SearchQueryRecord> queries = new ArrayList<>(topQueriesCurrentSnapshot.get()); + if (includeLastWindow) { + queries.addAll(topQueriesHistorySnapshot.get()); + } + return Stream.of(queries) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .collect(Collectors.toList()); + } + + /** + * Consume records to top queries stores + * + * @param records a list of {@link SearchQueryRecord} + */ + void consumeRecords(final List<SearchQueryRecord> records) { + final long currentWindowStart = calculateWindowStart(System.currentTimeMillis()); + List<SearchQueryRecord> recordsInLastWindow = new ArrayList<>(); + List<SearchQueryRecord> recordsInThisWindow = new ArrayList<>(); + for (SearchQueryRecord record : records) { + // skip the records that does not have the corresponding measurement + if (!record.getMeasurements().containsKey(metricType)) { + continue; + } + if (record.getTimestamp() < currentWindowStart) { + recordsInLastWindow.add(record); + } else { + recordsInThisWindow.add(record); + } + } + // add records in last window, if there are any, to the top n store + addToTopNStore(recordsInLastWindow); + // rotate window and reset window start if necessary + rotateWindowIfNecessary(currentWindowStart); + // add records in current window, if there are any, to the top n store + addToTopNStore(recordsInThisWindow); + // update the current window snapshot for getters to consume + final List<SearchQueryRecord> newSnapShot = new ArrayList<>(topQueriesStore); + newSnapShot.sort((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot.set(newSnapShot); + } + + private void addToTopNStore(final List<SearchQueryRecord> records) { + topQueriesStore.addAll(records); + // remove top elements for fix sizing priority queue + while (topQueriesStore.size() > topNSize) { + topQueriesStore.poll(); + } + } + + /** + * Reset the current window and rotate the data to history snapshot for top n queries, + * This function would be invoked zero time or only once in each consumeRecords call + * + * @param newWindowStart the new windowStart to set to + */ + private void rotateWindowIfNecessary(final long newWindowStart) { + // reset window if the current window is outdated + if (windowStart < newWindowStart) { + final List<SearchQueryRecord> history = new ArrayList<>(); + // rotate the current window to history store only if the data belongs to the last window + if (windowStart == newWindowStart - windowSize.getMillis()) { + history.addAll(topQueriesStore); + } + topQueriesHistorySnapshot.set(history); + topQueriesStore.clear(); + topQueriesCurrentSnapshot.set(new ArrayList<>()); + windowStart = newWindowStart; + } + } + + /** + * Calculate the window start for the given timestamp + * + * @param timestamp the given timestamp to calculate window start + */ + private long calculateWindowStart(final long timestamp) { + final LocalDateTime currentTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), ZoneId.of("UTC")); + LocalDateTime windowStartTime = currentTime.truncatedTo(ChronoUnit.HOURS); + while (!windowStartTime.plusMinutes(windowSize.getMinutes()).isAfter(currentTime)) { + windowStartTime = windowStartTime.plusMinutes(windowSize.getMinutes()); + } + return windowStartTime.toInstant(ZoneOffset.UTC).getEpochSecond() * 1000; + } + + /** + * Get the current top queries snapshot from the AtomicReference. + * + * @return a list of {@link SearchQueryRecord} + */ + public List<SearchQueryRecord> getTopQueriesCurrentSnapshot() { + return topQueriesCurrentSnapshot.get(); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java new file mode 100644 index 0000000000000..5068f28234f6d --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Service Classes for Query Insights + */ +package org.opensearch.plugin.insights.core.service; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java new file mode 100644 index 0000000000000..04d1f9bfff7e1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base Package of Query Insights + */ +package org.opensearch.plugin.insights; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java new file mode 100644 index 0000000000000..9b6b5856f7d27 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions, Requests and Responses for Query Insights + */ +package org.opensearch.plugin.insights.rules.action; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java new file mode 100644 index 0000000000000..26cff82aae52e --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.List; + +/** + * Holds all top queries records by resource usage or latency on a node + * Mainly used in the top N queries node response workflow. + * + * @opensearch.internal + */ +public class TopQueries extends BaseNodeResponse implements ToXContentObject { + /** The store to keep the top queries records */ + private final List<SearchQueryRecord> topQueriesRecords; + + /** + * Create the TopQueries Object from StreamInput + * @param in A {@link StreamInput} object. + * @throws IOException IOException + */ + public TopQueries(final StreamInput in) throws IOException { + super(in); + topQueriesRecords = in.readList(SearchQueryRecord::new); + } + + /** + * Create the TopQueries Object + * @param node A node that is part of the cluster. + * @param searchQueryRecords A list of SearchQueryRecord associated in this TopQueries. + */ + public TopQueries(final DiscoveryNode node, final List<SearchQueryRecord> searchQueryRecords) { + super(node); + topQueriesRecords = searchQueryRecords; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + if (topQueriesRecords != null) { + for (SearchQueryRecord record : topQueriesRecords) { + record.toXContent(builder, params); + } + } + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(topQueriesRecords); + + } + + /** + * Get all top queries records + * + * @return the top queries records in this node response + */ + public List<SearchQueryRecord> getTopQueriesRecord() { + return topQueriesRecords; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java new file mode 100644 index 0000000000000..b8ed69fa5692b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.ActionType; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesAction extends ActionType<TopQueriesResponse> { + + /** + * The TopQueriesAction Instance. + */ + public static final TopQueriesAction INSTANCE = new TopQueriesAction(); + /** + * The name of this Action + */ + public static final String NAME = "cluster:admin/opensearch/insights/top_queries"; + + private TopQueriesAction() { + super(NAME, TopQueriesResponse::new); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java new file mode 100644 index 0000000000000..3bdff2c403161 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.rules.model.MetricType; + +import java.io.IOException; + +/** + * A request to get cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesRequest extends BaseNodesRequest<TopQueriesRequest> { + + final MetricType metricType; + + /** + * Constructor for TopQueriesRequest + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesRequest(final StreamInput in) throws IOException { + super(in); + this.metricType = MetricType.readFromStream(in); + } + + /** + * Get top queries from nodes based on the nodes ids specified. + * If none are passed, cluster level top queries will be returned. + * + * @param metricType {@link MetricType} + * @param nodesIds the nodeIds specified in the request + */ + public TopQueriesRequest(final MetricType metricType, final String... nodesIds) { + super(nodesIds); + this.metricType = metricType; + } + + /** + * Get the type of requested metrics + */ + public MetricType getMetricType() { + return metricType; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(metricType.toString()); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java new file mode 100644 index 0000000000000..2e66bb7f77baf --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Transport response for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesResponse extends BaseNodesResponse<TopQueries> implements ToXContentFragment { + + private static final String CLUSTER_LEVEL_RESULTS_KEY = "top_queries"; + private final MetricType metricType; + private final int top_n_size; + + /** + * Constructor for TopQueriesResponse. + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesResponse(final StreamInput in) throws IOException { + super(in); + top_n_size = in.readInt(); + metricType = in.readEnum(MetricType.class); + } + + /** + * Constructor for TopQueriesResponse + * + * @param clusterName The current cluster name + * @param nodes A list that contains top queries results from all nodes + * @param failures A list that contains FailedNodeException + * @param top_n_size The top N size to return to the user + * @param metricType the {@link MetricType} to be returned in this response + */ + public TopQueriesResponse( + final ClusterName clusterName, + final List<TopQueries> nodes, + final List<FailedNodeException> failures, + final int top_n_size, + final MetricType metricType + ) { + super(clusterName, nodes, failures); + this.top_n_size = top_n_size; + this.metricType = metricType; + } + + @Override + protected List<TopQueries> readNodesFrom(final StreamInput in) throws IOException { + return in.readList(TopQueries::new); + } + + @Override + protected void writeNodesTo(final StreamOutput out, final List<TopQueries> nodes) throws IOException { + out.writeList(nodes); + out.writeLong(top_n_size); + out.writeEnum(metricType); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + final List<TopQueries> results = getNodes(); + postProcess(results); + builder.startObject(); + toClusterLevelResult(builder, params, results); + return builder.endObject(); + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + this.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.toString(); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + /** + * Post process the top queries results to add customized attributes + * + * @param results the top queries results + */ + private void postProcess(final List<TopQueries> results) { + for (TopQueries topQueries : results) { + final String nodeId = topQueries.getNode().getId(); + for (SearchQueryRecord record : topQueries.getTopQueriesRecord()) { + record.addAttribute(Attribute.NODE_ID, nodeId); + } + } + } + + /** + * Merge top n queries results from nodes into cluster level results in XContent format. + * + * @param builder XContent builder + * @param params serialization parameters + * @param results top queries results from all nodes + * @throws IOException if an error occurs + */ + private void toClusterLevelResult(final XContentBuilder builder, final Params params, final List<TopQueries> results) + throws IOException { + final List<SearchQueryRecord> all_records = results.stream() + .map(TopQueries::getTopQueriesRecord) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .limit(top_n_size) + .collect(Collectors.toList()); + builder.startArray(CLUSTER_LEVEL_RESULTS_KEY); + for (SearchQueryRecord record : all_records) { + record.toXContent(builder, params); + } + builder.endArray(); + } + +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java similarity index 55% rename from server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java rename to plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java index 04c0825787b66..3cc7900e5ce7d 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java @@ -6,15 +6,7 @@ * compatible open source license. */ -package org.opensearch.common.cache.store.enums; - /** - * Cache store types in tiered cache. - * - * @opensearch.internal + * Transport Actions, Requests and Responses for Top N Queries */ -public enum CacheStoreType { - - ON_HEAP, - DISK; -} +package org.opensearch.plugin.insights.rules.action.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java new file mode 100644 index 0000000000000..c1d17edf9ff14 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Locale; + +/** + * Valid attributes for a search query record + * + * @opensearch.internal + */ +public enum Attribute { + /** + * The search query type + */ + SEARCH_TYPE, + /** + * The search query source + */ + SOURCE, + /** + * Total shards queried + */ + TOTAL_SHARDS, + /** + * The indices involved + */ + INDICES, + /** + * The per phase level latency map for a search query + */ + PHASE_LATENCY_MAP, + /** + * The node id for this request + */ + NODE_ID; + + /** + * Read an Attribute from a StreamInput + * + * @param in the StreamInput to read from + * @return Attribute + * @throws IOException IOException + */ + static Attribute readFromStream(final StreamInput in) throws IOException { + return Attribute.valueOf(in.readString().toUpperCase(Locale.ROOT)); + } + + /** + * Write Attribute to a StreamOutput + * + * @param out the StreamOutput to write + * @param attribute the Attribute to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final Attribute attribute) throws IOException { + out.writeString(attribute.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java new file mode 100644 index 0000000000000..cdd090fbf4804 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Valid metric types for a search query record + * + * @opensearch.internal + */ +public enum MetricType implements Comparator<Number> { + /** + * Latency metric type + */ + LATENCY, + /** + * CPU usage metric type + */ + CPU, + /** + * JVM heap usage metric type + */ + JVM; + + /** + * Read a MetricType from a StreamInput + * + * @param in the StreamInput to read from + * @return MetricType + * @throws IOException IOException + */ + public static MetricType readFromStream(final StreamInput in) throws IOException { + return fromString(in.readString()); + } + + /** + * Create MetricType from String + * + * @param metricType the String representation of MetricType + * @return MetricType + */ + public static MetricType fromString(final String metricType) { + return MetricType.valueOf(metricType.toUpperCase(Locale.ROOT)); + } + + /** + * Write MetricType to a StreamOutput + * + * @param out the StreamOutput to write + * @param metricType the MetricType to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final MetricType metricType) throws IOException { + out.writeString(metricType.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + /** + * Get all valid metrics + * + * @return A set of String that contains all valid metrics + */ + public static Set<MetricType> allMetricTypes() { + return Arrays.stream(values()).collect(Collectors.toSet()); + } + + /** + * Compare two numbers based on the metric type + * + * @param a the first Number to be compared. + * @param b the second Number to be compared. + * @return a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second + */ + public int compare(final Number a, final Number b) { + switch (this) { + case LATENCY: + return Long.compare(a.longValue(), b.longValue()); + case JVM: + case CPU: + return Double.compare(a.doubleValue(), b.doubleValue()); + } + return -1; + } + + /** + * Parse a value with the correct type based on MetricType + * + * @param o the generic object to parse + * @return {@link Number} + */ + Number parseValue(final Object o) { + switch (this) { + case LATENCY: + return (Long) o; + case JVM: + case CPU: + return (Double) o; + default: + return (Number) o; + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java new file mode 100644 index 0000000000000..060711edb5580 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * SearchQueryRecord represents a minimal atomic record stored in the Query Insight Framework, + * which contains extensive information related to a search query. + * + * @opensearch.internal + */ +public class SearchQueryRecord implements ToXContentObject, Writeable { + private final long timestamp; + private final Map<MetricType, Number> measurements; + private final Map<Attribute, Object> attributes; + + /** + * Constructor of SearchQueryRecord + * + * @param in the StreamInput to read the SearchQueryRecord from + * @throws IOException IOException + * @throws ClassCastException ClassCastException + */ + public SearchQueryRecord(final StreamInput in) throws IOException, ClassCastException { + this.timestamp = in.readLong(); + measurements = new HashMap<>(); + in.readMap(MetricType::readFromStream, StreamInput::readGenericValue) + .forEach(((metricType, o) -> measurements.put(metricType, metricType.parseValue(o)))); + this.attributes = in.readMap(Attribute::readFromStream, StreamInput::readGenericValue); + } + + /** + * Constructor of SearchQueryRecord + * + * @param timestamp The timestamp of the query. + * @param measurements A list of Measurement associated with this query + * @param attributes A list of Attributes associated with this query + */ + public SearchQueryRecord(final long timestamp, Map<MetricType, Number> measurements, final Map<Attribute, Object> attributes) { + if (measurements == null) { + throw new IllegalArgumentException("Measurements cannot be null"); + } + this.measurements = measurements; + this.attributes = attributes; + this.timestamp = timestamp; + } + + /** + * Returns the observation time of the metric. + * + * @return the observation time in milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the measurement associated with the specified name. + * + * @param name the name of the measurement + * @return the measurement object, or null if not found + */ + public Number getMeasurement(final MetricType name) { + return measurements.get(name); + } + + /** + * Returns a map of all the measurements associated with the metric. + * + * @return a map of measurement names to measurement objects + */ + public Map<MetricType, Number> getMeasurements() { + return measurements; + } + + /** + * Returns a map of the attributes associated with the metric. + * + * @return a map of attribute keys to attribute values + */ + public Map<Attribute, Object> getAttributes() { + return attributes; + } + + /** + * Add an attribute to this record + * + * @param attribute attribute to add + * @param value the value associated with the attribute + */ + public void addAttribute(final Attribute attribute, final Object value) { + attributes.put(attribute, value); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("timestamp", timestamp); + for (Map.Entry<Attribute, Object> entry : attributes.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + for (Map.Entry<MetricType, Number> entry : measurements.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + return builder.endObject(); + } + + /** + * Write a SearchQueryRecord to a StreamOutput + * + * @param out the StreamOutput to write + * @throws IOException IOException + */ + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeLong(timestamp); + out.writeMap(measurements, (stream, metricType) -> MetricType.writeTo(out, metricType), StreamOutput::writeGenericValue); + out.writeMap(attributes, (stream, attribute) -> Attribute.writeTo(out, attribute), StreamOutput::writeGenericValue); + } + + /** + * Compare two SearchQueryRecord, based on the given MetricType + * + * @param a the first SearchQueryRecord to compare + * @param b the second SearchQueryRecord to compare + * @param metricType the MetricType to compare on + * @return 0 if the first SearchQueryRecord is numerically equal to the second SearchQueryRecord; + * -1 if the first SearchQueryRecord is numerically less than the second SearchQueryRecord; + * 1 if the first SearchQueryRecord is numerically greater than the second SearchQueryRecord. + */ + public static int compare(final SearchQueryRecord a, final SearchQueryRecord b, final MetricType metricType) { + return metricType.compare(a.getMeasurement(metricType), b.getMeasurement(metricType)); + } + + /** + * Check if a SearchQueryRecord is deep equal to another record + * + * @param o the other SearchQueryRecord record + * @return true if two records are deep equal, false otherwise. + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SearchQueryRecord)) { + return false; + } + final SearchQueryRecord other = (SearchQueryRecord) o; + return timestamp == other.getTimestamp() + && measurements.equals(other.getMeasurements()) + && attributes.size() == other.getAttributes().size(); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, measurements, attributes); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java new file mode 100644 index 0000000000000..c59ec1550f54b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Data Models for Query Insight Records + */ +package org.opensearch.plugin.insights.rules.model; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java new file mode 100644 index 0000000000000..3787f05f65552 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Query Insights + */ +package org.opensearch.plugin.insights.rules.resthandler; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java new file mode 100644 index 0000000000000..6aa511c626ab1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_QUERIES_BASE_URI; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action to get Top N queries by certain metric type + * + * @opensearch.api + */ +public class RestTopQueriesAction extends BaseRestHandler { + /** The metric types that are allowed in top N queries */ + static final Set<String> ALLOWED_METRICS = MetricType.allMetricTypes().stream().map(MetricType::toString).collect(Collectors.toSet()); + + /** + * Constructor for RestTopQueriesAction + */ + public RestTopQueriesAction() {} + + @Override + public List<Route> routes() { + return List.of( + new Route(GET, TOP_QUERIES_BASE_URI), + new Route(GET, String.format(Locale.ROOT, "%s/{nodeId}", TOP_QUERIES_BASE_URI)) + ); + } + + @Override + public String getName() { + return "query_insights_top_queries_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + final TopQueriesRequest topQueriesRequest = prepareRequest(request); + topQueriesRequest.timeout(request.param("timeout")); + + return channel -> client.execute(TopQueriesAction.INSTANCE, topQueriesRequest, topQueriesResponse(channel)); + } + + static TopQueriesRequest prepareRequest(final RestRequest request) { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + final String metricType = request.param("type", MetricType.LATENCY.toString()); + if (!ALLOWED_METRICS.contains(metricType)) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "request [%s] contains invalid metric type [%s]", request.path(), metricType) + ); + } + return new TopQueriesRequest(MetricType.fromString(metricType), nodesIds); + } + + @Override + protected Set<String> responseParams() { + return Settings.FORMAT_PARAMS; + } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } + + private RestResponseListener<TopQueriesResponse> topQueriesResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final TopQueriesResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java new file mode 100644 index 0000000000000..087cf7d765f8c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Top N Queries + */ +package org.opensearch.plugin.insights.rules.resthandler.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java new file mode 100644 index 0000000000000..f3a1c70b9af57 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Query Insights. + */ +package org.opensearch.plugin.insights.rules.transport; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java new file mode 100644 index 0000000000000..ddf614211bc41 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TransportTopQueriesAction extends TransportNodesAction< + TopQueriesRequest, + TopQueriesResponse, + TransportTopQueriesAction.NodeRequest, + TopQueries> { + + private final QueryInsightsService queryInsightsService; + + /** + * Create the TransportTopQueriesAction Object + + * @param threadPool The OpenSearch thread pool to run async tasks + * @param clusterService The clusterService of this node + * @param transportService The TransportService of this node + * @param queryInsightsService The topQueriesByLatencyService associated with this Transport Action + * @param actionFilters the action filters + */ + @Inject + public TransportTopQueriesAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final QueryInsightsService queryInsightsService, + final ActionFilters actionFilters + ) { + super( + TopQueriesAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + TopQueriesRequest::new, + NodeRequest::new, + ThreadPool.Names.GENERIC, + TopQueries.class + ); + this.queryInsightsService = queryInsightsService; + } + + @Override + protected TopQueriesResponse newResponse( + final TopQueriesRequest topQueriesRequest, + final List<TopQueries> responses, + final List<FailedNodeException> failures + ) { + if (topQueriesRequest.getMetricType() == MetricType.LATENCY) { + return new TopQueriesResponse( + clusterService.getClusterName(), + responses, + failures, + clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE), + MetricType.LATENCY + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", topQueriesRequest.getMetricType())); + } + } + + @Override + protected NodeRequest newNodeRequest(final TopQueriesRequest request) { + return new NodeRequest(request); + } + + @Override + protected TopQueries newNodeResponse(final StreamInput in) throws IOException { + return new TopQueries(in); + } + + @Override + protected TopQueries nodeOperation(final NodeRequest nodeRequest) { + final TopQueriesRequest request = nodeRequest.request; + if (request.getMetricType() == MetricType.LATENCY) { + return new TopQueries( + clusterService.localNode(), + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(true) + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", request.getMetricType())); + } + + } + + /** + * Inner Node Top Queries Request + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + + final TopQueriesRequest request; + + /** + * Create the NodeResponse object from StreamInput + * + * @param in the StreamInput to read the object + * @throws IOException IOException + */ + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new TopQueriesRequest(in); + } + + /** + * Create the NodeResponse object from a TopQueriesRequest + * @param request the TopQueriesRequest object + */ + public NodeRequest(final TopQueriesRequest request) { + this.request = request; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java new file mode 100644 index 0000000000000..54da0980deff8 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Top N Queries. + */ +package org.opensearch.plugin.insights.rules.transport.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java new file mode 100644 index 0000000000000..52cc1fbde790f --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Settings for Query Insights Plugin + * + * @opensearch.api + * @opensearch.experimental + */ +public class QueryInsightsSettings { + /** + * Executors settings + */ + public static final String QUERY_INSIGHTS_EXECUTOR = "query_insights_executor"; + /** + * Max number of thread + */ + public static final int MAX_THREAD_COUNT = 5; + /** + * Max number of requests for the consumer to collect at one time + */ + public static final int QUERY_RECORD_QUEUE_CAPACITY = 1000; + /** + * Time interval for record queue consumer to run + */ + public static final TimeValue QUERY_RECORD_QUEUE_DRAIN_INTERVAL = new TimeValue(5, TimeUnit.SECONDS); + /** + * Default Values and Settings + */ + public static final TimeValue MAX_WINDOW_SIZE = new TimeValue(1, TimeUnit.DAYS); + /** + * Minimal window size + */ + public static final TimeValue MIN_WINDOW_SIZE = new TimeValue(1, TimeUnit.MINUTES); + /** + * Valid window sizes + */ + public static final Set<TimeValue> VALID_WINDOW_SIZES_IN_MINUTES = new HashSet<>( + Arrays.asList( + new TimeValue(1, TimeUnit.MINUTES), + new TimeValue(5, TimeUnit.MINUTES), + new TimeValue(10, TimeUnit.MINUTES), + new TimeValue(30, TimeUnit.MINUTES) + ) + ); + + /** Default N size for top N queries */ + public static final int MAX_N_SIZE = 100; + /** Default window size in seconds to keep the top N queries with latency data in query insight store */ + public static final TimeValue DEFAULT_WINDOW_SIZE = new TimeValue(60, TimeUnit.SECONDS); + /** Default top N size to keep the data in query insight store */ + public static final int DEFAULT_TOP_N_SIZE = 3; + /** + * Query Insights base uri + */ + public static final String PLUGINS_BASE_URI = "/_insights"; + + /** + * Settings for Top Queries + * + */ + public static final String TOP_QUERIES_BASE_URI = PLUGINS_BASE_URI + "/top_queries"; + /** Default prefix for top N queries feature */ + public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries"; + /** Default prefix for top N queries by latency feature */ + public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency"; + /** + * Boolean setting for enabling top queries by latency. + */ + public static final Setting<Boolean> TOP_N_LATENCY_QUERIES_ENABLED = Setting.boolSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Int setting to define the top n size for top queries by latency. + */ + public static final Setting<Integer> TOP_N_LATENCY_QUERIES_SIZE = Setting.intSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".top_n_size", + DEFAULT_TOP_N_SIZE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Time setting to define the window size in seconds for top queries by latency. + */ + public static final Setting<TimeValue> TOP_N_LATENCY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".window_size", + DEFAULT_WINDOW_SIZE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Default constructor + */ + public QueryInsightsSettings() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java new file mode 100644 index 0000000000000..f3152bbf966cb --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Settings for Query Insights Plugin + */ +package org.opensearch.plugin.insights.settings; diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java new file mode 100644 index 0000000000000..273b69e483e8c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.rest.RestHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class QueryInsightsPluginTests extends OpenSearchTestCase { + + private QueryInsightsPlugin queryInsightsPlugin; + + private final Client client = mock(Client.class); + private ClusterService clusterService; + private final ThreadPool threadPool = mock(ThreadPool.class); + + @Before + public void setup() { + queryInsightsPlugin = new QueryInsightsPlugin(); + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + + clusterService = new ClusterService(settings, clusterSettings, threadPool); + + } + + public void testGetSettings() { + assertEquals( + Arrays.asList( + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ), + queryInsightsPlugin.getSettings() + ); + } + + public void testCreateComponent() { + List<Object> components = (List<Object>) queryInsightsPlugin.createComponents( + client, + clusterService, + threadPool, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertEquals(2, components.size()); + assertTrue(components.get(0) instanceof QueryInsightsService); + assertTrue(components.get(1) instanceof QueryInsightsListener); + } + + public void testGetExecutorBuilders() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + List<ExecutorBuilder<?>> executorBuilders = queryInsightsPlugin.getExecutorBuilders(settings); + assertEquals(1, executorBuilders.size()); + assertTrue(executorBuilders.get(0) instanceof ScalingExecutorBuilder); + } + + public void testGetRestHandlers() { + List<RestHandler> components = queryInsightsPlugin.getRestHandlers(Settings.EMPTY, null, null, null, null, null, null); + assertEquals(1, components.size()); + assertTrue(components.get(0) instanceof RestTopQueriesAction); + } + + public void testGetActions() { + List<ActionPlugin.ActionHandler<? extends ActionRequest, ? extends ActionResponse>> components = queryInsightsPlugin.getActions(); + assertEquals(1, components.size()); + assertTrue(components.get(0).getAction() instanceof TopQueriesAction); + } + +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java new file mode 100644 index 0000000000000..870ef5b9c8be9 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java @@ -0,0 +1,189 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.Maps; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.OpenSearchTestCase.buildNewFakeTransportAddress; +import static org.opensearch.test.OpenSearchTestCase.random; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween; +import static org.opensearch.test.OpenSearchTestCase.randomArray; +import static org.opensearch.test.OpenSearchTestCase.randomDouble; +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; +import static org.opensearch.test.OpenSearchTestCase.randomLong; +import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +final public class QueryInsightsTestUtils { + + public QueryInsightsTestUtils() {} + + public static List<SearchQueryRecord> generateQueryInsightRecords(int count) { + return generateQueryInsightRecords(count, count, System.currentTimeMillis(), 0); + } + + /** + * Creates a List of random Query Insight Records for testing purpose + */ + public static List<SearchQueryRecord> generateQueryInsightRecords(int lower, int upper, long startTimeStamp, long interval) { + List<SearchQueryRecord> records = new ArrayList<>(); + int countOfRecords = randomIntBetween(lower, upper); + long timestamp = startTimeStamp; + for (int i = 0; i < countOfRecords; ++i) { + Map<MetricType, Number> measurements = Map.of( + MetricType.LATENCY, + randomLongBetween(1000, 10000), + MetricType.CPU, + randomDouble(), + MetricType.JVM, + randomDouble() + ); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + int countOfPhases = randomIntBetween(2, 5); + for (int j = 0; j < countOfPhases; ++j) { + phaseLatencyMap.put(randomAlphaOfLengthBetween(5, 10), randomLong()); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, "{\"size\":20}"); + attributes.put(Attribute.TOTAL_SHARDS, randomIntBetween(1, 100)); + attributes.put(Attribute.INDICES, randomArray(1, 3, Object[]::new, () -> randomAlphaOfLengthBetween(5, 10))); + attributes.put(Attribute.PHASE_LATENCY_MAP, phaseLatencyMap); + + records.add(new SearchQueryRecord(timestamp, measurements, attributes)); + timestamp += interval; + } + return records; + } + + public static TopQueries createRandomTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = generateQueryInsightRecords(10); + + return new TopQueries(node, records); + } + + public static TopQueries createFixedTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = new ArrayList<>(); + records.add(createFixedSearchQueryRecord()); + + return new TopQueries(node, records); + } + + public static SearchQueryRecord createFixedSearchQueryRecord() { + long timestamp = 1706574180000L; + Map<MetricType, Number> measurements = Map.of(MetricType.LATENCY, 1L); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + + return new SearchQueryRecord(timestamp, measurements, attributes); + } + + public static void compareJson(ToXContent param1, ToXContent param2) throws IOException { + if (param1 == null || param2 == null) { + assertNull(param1); + assertNull(param2); + return; + } + + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + + XContentBuilder param2Builder = jsonBuilder(); + param2.toXContent(param2Builder, params); + + assertEquals(param1Builder.toString(), param2Builder.toString()); + } + + @SuppressWarnings("unchecked") + public static boolean checkRecordsEquals(List<SearchQueryRecord> records1, List<SearchQueryRecord> records2) { + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!records1.get(i).equals(records2.get(i))) { + return false; + } + Map<Attribute, Object> attributes1 = records1.get(i).getAttributes(); + Map<Attribute, Object> attributes2 = records2.get(i).getAttributes(); + for (Map.Entry<Attribute, Object> entry : attributes1.entrySet()) { + Attribute attribute = entry.getKey(); + Object value = entry.getValue(); + if (!attributes2.containsKey(attribute)) { + return false; + } + if (value instanceof Object[] && !Arrays.deepEquals((Object[]) value, (Object[]) attributes2.get(attribute))) { + return false; + } else if (value instanceof Map + && !Maps.deepEquals((Map<Object, Object>) value, (Map<Object, Object>) attributes2.get(attribute))) { + return false; + } + } + } + return true; + } + + public static boolean checkRecordsEqualsWithoutOrder( + List<SearchQueryRecord> records1, + List<SearchQueryRecord> records2, + MetricType metricType + ) { + Set<SearchQueryRecord> set2 = new TreeSet<>((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + set2.addAll(records2); + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!set2.contains(records1.get(i))) { + return false; + } + } + return true; + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java new file mode 100644 index 0000000000000..f340950017a5c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.core.service.TopQueriesService; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Unit Tests for {@link QueryInsightsListener}. + */ +public class QueryInsightsListenerTests extends OpenSearchTestCase { + private final SearchRequestContext searchRequestContext = mock(SearchRequestContext.class); + private final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); + private final SearchRequest searchRequest = mock(SearchRequest.class); + private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class); + private final TopQueriesService topQueriesService = mock(TopQueriesService.class); + private ClusterService clusterService; + + @Before + public void setup() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + clusterService = new ClusterService(settings, clusterSettings, null); + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); + } + + public void testOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext); + + verify(queryInsightsService, times(1)).addRecord(any()); + } + + public void testConcurrentOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + final List<QueryInsightsListener> searchListenersList = new ArrayList<>(); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + int numRequests = 50; + Thread[] threads = new Thread[numRequests]; + Phaser phaser = new Phaser(numRequests + 1); + CountDownLatch countDownLatch = new CountDownLatch(numRequests); + + for (int i = 0; i < numRequests; i++) { + searchListenersList.add(new QueryInsightsListener(clusterService, queryInsightsService)); + } + + for (int i = 0; i < numRequests; i++) { + int finalI = i; + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + QueryInsightsListener thisListener = searchListenersList.get(finalI); + thisListener.onRequestEnd(searchPhaseContext, searchRequestContext); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + verify(queryInsightsService, times(numRequests)).addRecord(any()); + } + + public void testSetEnabled() { + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, true); + assertTrue(queryInsightsListener.isEnabled()); + + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.JVM)).thenReturn(false); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false); + assertFalse(queryInsightsListener.isEnabled()); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java new file mode 100644 index 0000000000000..c29b48b9690d1 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import static org.mockito.Mockito.mock; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class QueryInsightsServiceTests extends OpenSearchTestCase { + private final ThreadPool threadPool = mock(ThreadPool.class); + private QueryInsightsService queryInsightsService; + + @Before + public void setup() { + queryInsightsService = new QueryInsightsService(threadPool); + queryInsightsService.enableCollection(MetricType.LATENCY, true); + queryInsightsService.enableCollection(MetricType.CPU, true); + queryInsightsService.enableCollection(MetricType.JVM, true); + } + + public void testAddRecordToLimitAndDrain() { + SearchQueryRecord record = QueryInsightsTestUtils.generateQueryInsightRecords(1, 1, System.currentTimeMillis(), 0).get(0); + for (int i = 0; i < QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY; i++) { + assertTrue(queryInsightsService.addRecord(record)); + } + // exceed capacity + assertFalse(queryInsightsService.addRecord(record)); + queryInsightsService.drainRecords(); + assertEquals( + QueryInsightsSettings.DEFAULT_TOP_N_SIZE, + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size() + ); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java new file mode 100644 index 0000000000000..060df84a89485 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.cluster.coordination.DeterministicTaskQueue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class TopQueriesServiceTests extends OpenSearchTestCase { + private TopQueriesService topQueriesService; + + @Before + public void setup() { + topQueriesService = new TopQueriesService(MetricType.LATENCY); + topQueriesService.setTopNSize(Integer.MAX_VALUE); + topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE)); + topQueriesService.setEnabled(true); + } + + public void testIngestQueryDataWithLargeWindow() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.consumeRecords(records); + assertTrue( + QueryInsightsTestUtils.checkRecordsEqualsWithoutOrder( + topQueriesService.getTopQueriesRecords(false), + records, + MetricType.LATENCY + ) + ); + } + + public void testRollingWindows() { + List<SearchQueryRecord> records; + // Create 5 records at Now - 10 minutes to make sure they belong to the last window + records = QueryInsightsTestUtils.generateQueryInsightRecords(5, 5, System.currentTimeMillis() - 1000 * 60 * 10, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(0, topQueriesService.getTopQueriesRecords(true).size()); + + // Create 10 records at now + 1 minute, to make sure they belong to the current window + records = QueryInsightsTestUtils.generateQueryInsightRecords(10, 10, System.currentTimeMillis() + 1000 * 60, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(10, topQueriesService.getTopQueriesRecords(true).size()); + } + + public void testSmallNSize() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.setTopNSize(1); + topQueriesService.consumeRecords(records); + assertEquals(1, topQueriesService.getTopQueriesRecords(false).size()); + } + + public void testValidateTopNSize() { + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); + } + + public void testGetTopQueriesWhenNotEnabled() { + topQueriesService.setEnabled(false); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); + } + + public void testValidateWindowSize() { + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MAX_WINDOW_SIZE.getSeconds() + 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MIN_WINDOW_SIZE.getSeconds() - 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(2, TimeUnit.DAYS)); }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(7, TimeUnit.MINUTES)); }); + } + + private static void runUntilTimeoutOrFinish(DeterministicTaskQueue deterministicTaskQueue, long duration) { + final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + duration; + while (deterministicTaskQueue.getCurrentTimeMillis() < endTime + && (deterministicTaskQueue.hasRunnableTasks() || deterministicTaskQueue.hasDeferredTasks())) { + if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { + deterministicTaskQueue.advanceTime(); + } else if (deterministicTaskQueue.hasRunnableTasks()) { + deterministicTaskQueue.runRandomTask(); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java new file mode 100644 index 0000000000000..619fd4b33a3dc --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Granular tests for the {@link TopQueriesRequest} class. + */ +public class TopQueriesRequestTests extends OpenSearchTestCase { + + /** + * Check that we can set the metric type + */ + public void testSetMetricType() throws Exception { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY, randomAlphaOfLength(5)); + TopQueriesRequest deserializedRequest = roundTripRequest(request); + assertEquals(request.getMetricType(), deserializedRequest.getMetricType()); + } + + /** + * Serialize and deserialize a request. + * @param request A request to serialize. + * @return The deserialized, "round-tripped" request. + */ + private static TopQueriesRequest roundTripRequest(TopQueriesRequest request) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesRequest(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java new file mode 100644 index 0000000000000..eeee50d3da703 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Granular tests for the {@link TopQueriesResponse} class. + */ +public class TopQueriesResponseTests extends OpenSearchTestCase { + + /** + * Check serialization and deserialization + */ + public void testSerialize() throws Exception { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + TopQueriesResponse deserializedResponse = roundTripResponse(response); + assertEquals(response.toString(), deserializedResponse.toString()); + } + + public void testToXContent() throws IOException { + char[] expectedXcontent = + "{\"top_queries\":[{\"timestamp\":1706574180000,\"node_id\":\"node_for_top_queries_test\",\"search_type\":\"query_then_fetch\",\"latency\":1}]}" + .toCharArray(); + TopQueries topQueries = QueryInsightsTestUtils.createFixedTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + char[] xContent = BytesReference.bytes(response.toXContent(builder, ToXContent.EMPTY_PARAMS)).utf8ToString().toCharArray(); + Arrays.sort(expectedXcontent); + Arrays.sort(xContent); + + assertEquals(Arrays.hashCode(expectedXcontent), Arrays.hashCode(xContent)); + } + + /** + * Serialize and deserialize a TopQueriesResponse. + * @param response A response to serialize. + * @return The deserialized, "round-tripped" response. + */ + private static TopQueriesResponse roundTripResponse(TopQueriesResponse response) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesResponse(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java new file mode 100644 index 0000000000000..7db08b53ad1df --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +/** + * Tests for {@link TopQueries}. + */ +public class TopQueriesTests extends OpenSearchTestCase { + + public void testTopQueries() throws IOException { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + topQueries.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + TopQueries readTopQueries = new TopQueries(in); + assertTrue( + QueryInsightsTestUtils.checkRecordsEquals(topQueries.getTopQueriesRecord(), readTopQueries.getTopQueriesRecord()) + ); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java new file mode 100644 index 0000000000000..793d5878e2300 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Granular tests for the {@link SearchQueryRecord} class. + */ +public class SearchQueryRecordTests extends OpenSearchTestCase { + + /** + * Check that if the serialization, deserialization and equals functions are working as expected + */ + public void testSerializationAndEquals() throws Exception { + List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + List<SearchQueryRecord> copiedRecords = new ArrayList<>(); + for (SearchQueryRecord record : records) { + copiedRecords.add(roundTripRecord(record)); + } + assertTrue(QueryInsightsTestUtils.checkRecordsEquals(records, copiedRecords)); + + } + + public void testAllMetricTypes() { + Set<MetricType> allMetrics = MetricType.allMetricTypes(); + Set<MetricType> expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.JVM)); + assertEquals(expected, allMetrics); + } + + public void testCompare() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(0, SearchQueryRecord.compare(record1, record2, MetricType.LATENCY)); + } + + public void testEqual() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(record1, record2); + } + + /** + * Serialize and deserialize a SearchQueryRecord. + * @param record A SearchQueryRecord to serialize. + * @return The deserialized, "round-tripped" record. + */ + private static SearchQueryRecord roundTripRecord(SearchQueryRecord record) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + record.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new SearchQueryRecord(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java new file mode 100644 index 0000000000000..ac19fa2a7348f --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction.ALLOWED_METRICS; + +public class RestTopQueriesActionTests extends OpenSearchTestCase { + + public void testEmptyNodeIdsValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertEquals(0, actual.nodesIds().length); + } + + public void testNodeIdsValid() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + String[] nodes = randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(5, 10)); + params.put("nodeId", String.join(",", nodes)); + + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertArrayEquals(nodes, actual.nodesIds()); + } + + public void testInValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomAlphaOfLengthBetween(5, 10).toUpperCase(Locale.ROOT)); + + RestRequest restRequest = buildRestRequest(params); + Exception exception = assertThrows(IllegalArgumentException.class, () -> { RestTopQueriesAction.prepareRequest(restRequest); }); + assertEquals( + String.format(Locale.ROOT, "request [/_insights/top_queries] contains invalid metric type [%s]", params.get("type")), + exception.getMessage() + ); + } + + public void testGetRoutes() { + RestTopQueriesAction action = new RestTopQueriesAction(); + List<RestHandler.Route> routes = action.routes(); + assertEquals(2, routes.size()); + assertEquals("query_insights_top_queries_action", action.getName()); + } + + private FakeRestRequest buildRestRequest(Map<String, String> params) { + return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_insights/top_queries") + .withParams(params) + .build(); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java new file mode 100644 index 0000000000000..a5f36b6e8cce0 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class TransportTopQueriesActionTests extends OpenSearchTestCase { + + private final ThreadPool threadPool = mock(ThreadPool.class); + + private final Settings.Builder settingsBuilder = Settings.builder(); + private final Settings settings = settingsBuilder.build(); + private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + private final TransportService transportService = mock(TransportService.class); + private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); + private final ActionFilters actionFilters = mock(ActionFilters.class); + private final TransportTopQueriesAction transportTopQueriesAction = new TransportTopQueriesAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + private final DummyParentAction dummyParentAction = new DummyParentAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + + class DummyParentAction extends TransportTopQueriesAction { + public DummyParentAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + QueryInsightsService topQueriesByLatencyService, + ActionFilters actionFilters + ) { + super(threadPool, clusterService, transportService, topQueriesByLatencyService, actionFilters); + } + + public TopQueriesResponse createNewResponse() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + return newResponse(request, List.of(), List.of()); + } + } + + @Before + public void setup() { + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + } + + public void testNewResponse() { + TopQueriesResponse response = dummyParentAction.createNewResponse(); + assertNotNull(response); + } + +} diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 0232fc58f9357..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 deleted file mode 100644 index 3b6cd3524d978..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b126ceba61275f38297cacd5ea0cd6d3addee04 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..114d77a1bb95f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9d01e814971f2..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50a2d899a8f8a68daed1a9b6d7750184310cc45f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5a4bde479eb38 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 987b524aedc98..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 deleted file mode 100644 index 5eaf96739ed72..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 new file mode 100644 index 0000000000000..c30a99a2338b4 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 @@ -0,0 +1 @@ +3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 deleted file mode 100644 index 091125169c696..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 new file mode 100644 index 0000000000000..ab3171cd02b73 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 @@ -0,0 +1 @@ +c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 986720ec431fe..1ba16422c9214 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -39,6 +39,8 @@ import com.azure.storage.common.implementation.Constants; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; @@ -188,6 +190,7 @@ protected String requestUniqueId(final HttpExchange exchange) { @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHandler { + private static final Logger testLogger = LogManager.getLogger(AzureHTTPStatsCollectorHandler.class); private static final Pattern listPattern = Pattern.compile("GET /[a-zA-Z0-9]+\\??.+"); private static final Pattern getPattern = Pattern.compile("GET /[^?/]+/[^?/]+\\??.*"); @@ -197,6 +200,7 @@ private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { @Override protected void maybeTrack(String request, Headers headers) { + testLogger.info(request, headers); if (getPattern.matcher(request).matches()) { trackRequest("GetBlob"); } else if (Regex.simpleMatch("HEAD /*/*", request)) { diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index b60701ba5e533..74edd4f3eb23c 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -389,6 +389,7 @@ private static class NioThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; + @SuppressWarnings("removal") NioThreadFactory() { SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java index 0fbe9797f726f..a206c3b883870 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java @@ -49,6 +49,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1bef5146f1db9..0ddcf0f6dddca 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -54,19 +54,19 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:2.35.0' - api 'com.google.api:gax-httpjson:0.103.1' + api 'com.google.api:gax-httpjson:2.42.0' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.25.1' + api 'com.google.api.grpc:proto-google-common-protos:2.33.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.cloud:google-cloud-core:2.5.10' + api 'com.google.cloud:google-cloud-core:2.30.0' api 'com.google.cloud:google-cloud-core-http:2.23.0' api 'com.google.cloud:google-cloud-storage:1.113.1' @@ -78,7 +78,7 @@ dependencies { api 'com.google.http-client:google-http-client:1.43.3' api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.43.3' + api 'com.google.http-client:google-http-client-jackson2:1.44.1' api 'com.google.oauth-client:google-oauth-client:1.34.1' @@ -206,7 +206,10 @@ thirdPartyAudit { // commons-logging provided dependencies 'javax.jms.Message', 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' + 'javax.servlet.ServletContextListener', + // Bump for gax 2.42.0 + 'com.google.api.gax.rpc.EndpointContext', + 'com.google.api.gax.rpc.RequestMutator' ) } diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 deleted file mode 100644 index 11315004e233d..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -041d99172fda933bc879bdfd8de9420c5c34107e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 new file mode 100644 index 0000000000000..672506572ed4d --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 @@ -0,0 +1 @@ +4db06bc31c2fb34b0490362e8666c20fdc1fb3f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 new file mode 100644 index 0000000000000..10f8f90df108f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 @@ -0,0 +1 @@ +b48ea27cbdccd5f225d8a35ea28e2cd01c25918b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 deleted file mode 100644 index 34c3dc6805500..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d979bfe28551eb78cddae9282833ede147a9331 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 deleted file mode 100644 index 8380b9fb770b5..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689da86469d19a01c726c8c24477b95c8a834bbe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 deleted file mode 100644 index cd065dabb8e8a..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb90049537b621e39610a110c58ce0b914ee3cc5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 new file mode 100644 index 0000000000000..746e4e99fd881 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 @@ -0,0 +1 @@ +644e11df1cec6d38a63a9a06a701e48c398b87d0 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java new file mode 100644 index 0000000000000..5002ab9a2e704 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.gcs; + +import com.google.auth.oauth2.GoogleCredentials; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.IOException; + +/** + * This class facilitates to fetch Application Default Credentials + * see <a href="https://cloud.google.com/docs/authentication/application-default-credentials">How Application Default Credentials works</a> + */ +public class GoogleApplicationDefaultCredentials { + private static final Logger logger = LogManager.getLogger(GoogleApplicationDefaultCredentials.class); + + public GoogleCredentials get() { + GoogleCredentials credentials = null; + try { + credentials = SocketAccess.doPrivilegedIOException(GoogleCredentials::getApplicationDefault); + } catch (IOException e) { + logger.error("Failed to retrieve \"Application Default Credentials\"", e); + } + return credentials; + } +} diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index e15b37f209c5f..620f8e98d5f20 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -106,6 +106,7 @@ class GoogleCloudStorageRetryingInputStream extends InputStream { currentStream = openStream(); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "need access to storage client") private static com.google.api.services.storage.Storage getStorage(Storage client) { return AccessController.doPrivileged((PrivilegedAction<com.google.api.services.storage.Storage>) () -> { diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index c9ebb3acaf3e5..83a4146c99b99 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -36,6 +36,7 @@ import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.ServiceOptions; import com.google.cloud.http.HttpTransportOptions; @@ -70,6 +71,16 @@ public class GoogleCloudStorageService { */ private volatile Map<String, Storage> clientCache = emptyMap(); + final private GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials; + + public GoogleCloudStorageService() { + this.googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + } + + public GoogleCloudStorageService(GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials) { + this.googleApplicationDefaultCredentials = googleApplicationDefaultCredentials; + } + /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed @@ -213,10 +224,11 @@ StorageOptions createStorageOptions( storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } if (clientSettings.getCredential() == null) { - logger.warn( - "\"Application Default Credentials\" are not supported out of the box." - + " Additional file system permissions have to be granted to the plugin." - ); + logger.info("\"Application Default Credentials\" will be in use"); + final GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + if (credentials != null) { + storageOptionsBuilder.setCredentials(credentials); + } } else { ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); // override token server URI diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 35127d6ea4060..f8c451749480b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -48,6 +48,7 @@ * needs {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access * in {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index a531555debefb..58e412684ed5a 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -33,8 +33,10 @@ package org.opensearch.repositories.gcs; import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -42,30 +44,38 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; +import java.io.IOException; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.util.Base64; import java.util.Locale; import java.util.UUID; +import org.mockito.Mockito; + import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class GoogleCloudStorageServiceTests extends OpenSearchTestCase { + final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String endpoint = randomFrom("http://", "https://") + + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + public void testClientInitializer() throws Exception { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final String endpoint = randomFrom("http://", "https://") - + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") - + ":" - + randomIntBetween(1, 65535); - final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put( GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), @@ -82,31 +92,35 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - final GoogleCloudStorageService service = new GoogleCloudStorageService(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + final GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> service.client("another_client", "repo", statsCollector) ); - assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + MatcherAssert.assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( new Setting<?>[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) } ); final Storage storage = service.client(clientName, "repo", statsCollector); - assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); - assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); - assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); - assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); - assertThat( + MatcherAssert.assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + MatcherAssert.assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); + MatcherAssert.assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); + MatcherAssert.assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), Matchers.is((int) connectTimeValue.millis()) ); - assertThat( + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), Matchers.is((int) readTimeValue.millis()) ); - assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); + MatcherAssert.assertThat(storage.getOptions().getCredentials(), Matchers.instanceOf(Credentials.class)); } public void testReinitClientSettings() throws Exception { @@ -122,33 +136,33 @@ public void testReinitClientSettings() throws Exception { final GoogleCloudStorageService storageService = plugin.storageService; GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final Storage client11 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); final Storage client12 = storageService.client("gcs2", "repo2", statsCollector); - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // client 3 is missing final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs3", "repo3", statsCollector) ); - assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); + MatcherAssert.assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); // update client settings plugin.reload(settings2); // old client 1 not changed - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); // new client 1 is changed final Storage client21 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); + MatcherAssert.assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); // old client 2 not changed - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // new client2 is gone final IllegalArgumentException e2 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs2", "repo2", statsCollector) ); - assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); + MatcherAssert.assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); // client 3 emerged final Storage client23 = storageService.client("gcs3", "repo3", statsCollector); - assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); + MatcherAssert.assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); } } @@ -193,4 +207,72 @@ public void testToTimeout() { assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); assertEquals(0, GoogleCloudStorageService.toTimeout(TimeValue.MINUS_ONE).intValue()); } + + /** + * The following method test the Google Application Default Credential instead of + * using service account file. + * Considered use of JUnit Mocking due to static method GoogleCredentials.getApplicationDefault + * and avoiding environment variables to set which later use GCE. + * @throws Exception + */ + public void testApplicationDefaultCredential() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + assertNotNull(storageOptions); + assertEquals(storageOptions.getCredentials().toString(), mockGoogleCredentials.toString()); + } + + /** + * The application default credential throws exception when there are + * no Environment Variables provided or Google Compute Engine is not running + * @throws Exception + */ + public void testApplicationDefaultCredentialsWhenNoSettingProvided() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleCloudStorageService service = new GoogleCloudStorageService(); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + assertNotNull(storageOptions); + assertNull(storageOptions.getCredentials()); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * The application default credential throws IOException when it is + * used without GoogleCloudStorageService + */ + public void testDefaultCredentialsThrowsExceptionWithoutGCStorageService() { + GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + assertNull(credentials); + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * This is a helper method to provide GCS Client settings without credentials + * @return GoogleCloudStorageClientSettings + * @throws URISyntaxException + */ + private GoogleCloudStorageClientSettings getGCSClientSettingsWithoutCredentials() throws URISyntaxException { + return new GoogleCloudStorageClientSettings( + null, + endpoint, + projectIdName, + connectTimeValue, + readTimeValue, + applicationName, + new URI(""), + new ProxySettings(Proxy.Type.DIRECT, null, 0, null, null) + ); + } + } diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9110503f67304..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d75246285e5fac6f6dad47e387ed4f46f36e521d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..0e3595fecb0d2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java index 119d060374be2..af49cd3c579e6 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java @@ -52,6 +52,7 @@ import java.util.Collections; import java.util.Map; +@SuppressWarnings("removal") public final class HdfsPlugin extends Plugin implements RepositoryPlugin { // initialize some problematic classes with elevated privileges diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index f0ffec5713c1d..4b38e62b2525a 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -254,6 +254,7 @@ private static String getHostName() { } } + @SuppressWarnings("removal") @Override protected HdfsBlobStore createBlobStore() { // initialize our blobstore using elevated privileges. diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 07d1d29eecfc4..5a27eb937ff9c 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -57,6 +57,7 @@ * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ +@SuppressWarnings("removal") class HdfsSecurityContext { private static final Permission[] SIMPLE_AUTH_PERMISSIONS; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index d0b63f17e3887..89ba8d51cf7f7 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -62,6 +62,7 @@ /** * Integration test that runs against an HA-Enabled HDFS instance */ +@SuppressWarnings("removal") public class HaHdfsFailoverTestSuiteIT extends OpenSearchRestTestCase { public void testHAFailoverWithRepository() throws Exception { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 3a6eb0e205ccb..5f7454df4ecfc 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -64,6 +64,7 @@ @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsBlobStoreContainerTests extends OpenSearchTestCase { + @SuppressWarnings("removal") private FileContext createTestContext() { FileContext fileContext; try { diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 deleted file mode 100644 index 522d85a3bf12e..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -700fdbabab44709b0eccffe8f91c4226a5787356 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..6b9a35acb2c20 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 3a55fcb0bdbcd..25f361b40636e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -514,7 +514,7 @@ private static List<ListObjectsV2Response> executeListing( for (ListObjectsV2Response listObjectsV2Response : listObjectsIterable) { results.add(listObjectsV2Response); totalObjects += listObjectsV2Response.contents().size(); - if (limit != -1 && totalObjects > limit) { + if (limit != -1 && totalObjects >= limit) { break; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 4fda0ee95a3ec..e44f408e6dd12 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -201,6 +201,12 @@ final class S3ClientSettings { key -> Setting.intSetting(key, 500, Property.NodeScope) ); + static final Setting.AffixSetting<Integer> MAX_SYNC_CONNECTIONS_SETTING = Setting.affixKeySetting( + PREFIX, + "max_sync_connections", + key -> Setting.intSetting(key, 500, Property.NodeScope) + ); + /** Connection acquisition timeout for new connections to S3. */ static final Setting.AffixSetting<TimeValue> CONNECTION_ACQUISITION_TIMEOUT = Setting.affixKeySetting( PREFIX, @@ -284,10 +290,13 @@ final class S3ClientSettings { /** The connection TTL for the s3 client */ final int connectionTTLMillis; - /** The max number of connections for the s3 client */ + /** The max number of connections for the s3 async client */ final int maxConnections; - /** The connnection acquisition timeout for the s3 async client */ + /** The max number of connections for the s3 sync client */ + final int maxSyncConnections; + + /** The connnection acquisition timeout for the s3 sync and async client */ final int connectionAcquisitionTimeoutMillis; /** The number of retries to use for the s3 client. */ @@ -318,6 +327,7 @@ private S3ClientSettings( int connectionTimeoutMillis, int connectionTTLMillis, int maxConnections, + int maxSyncConnections, int connectionAcquisitionTimeoutMillis, int maxRetries, boolean throttleRetries, @@ -336,6 +346,7 @@ private S3ClientSettings( this.connectionTimeoutMillis = connectionTimeoutMillis; this.connectionTTLMillis = connectionTTLMillis; this.maxConnections = maxConnections; + this.maxSyncConnections = maxSyncConnections; this.connectionAcquisitionTimeoutMillis = connectionAcquisitionTimeoutMillis; this.maxRetries = maxRetries; this.throttleRetries = throttleRetries; @@ -386,6 +397,9 @@ S3ClientSettings refine(Settings repositorySettings) { ).millis() ); final int newMaxConnections = Math.toIntExact(getRepoSettingOrDefault(MAX_CONNECTIONS_SETTING, normalizedSettings, maxConnections)); + final int newMaxSyncConnections = Math.toIntExact( + getRepoSettingOrDefault(MAX_SYNC_CONNECTIONS_SETTING, normalizedSettings, maxConnections) + ); final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean newPathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); @@ -433,6 +447,7 @@ S3ClientSettings refine(Settings repositorySettings) { newConnectionTimeoutMillis, newConnectionTTLMillis, newMaxConnections, + newMaxSyncConnections, newConnectionAcquisitionTimeoutMillis, newMaxRetries, newThrottleRetries, @@ -563,6 +578,7 @@ static S3ClientSettings getClientSettings(final Settings settings, final String Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TIMEOUT_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TTL_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, MAX_CONNECTIONS_SETTING)), + Math.toIntExact(getConfigValue(settings, clientName, MAX_SYNC_CONNECTIONS_SETTING)), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_ACQUISITION_TIMEOUT).millis()), getConfigValue(settings, clientName, MAX_RETRIES_SETTING), getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING), diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 24387fb98a425..fe81da31432f4 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -279,6 +279,8 @@ protected PasswordAuthentication getPasswordAuthentication() { } clientBuilder.socketTimeout(Duration.ofMillis(clientSettings.readTimeoutMillis)); + clientBuilder.maxConnections(clientSettings.maxSyncConnections); + clientBuilder.connectionAcquisitionTimeout(Duration.ofMillis(clientSettings.connectionAcquisitionTimeoutMillis)); return clientBuilder; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java index 4888764dbc720..f88aa46e61806 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 8e1926d40302f..f84d953baae8e 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -75,6 +75,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") @SuppressForbidden(reason = "test requires to set a System property to allow insecure settings when running in IDE") public class RepositoryCredentialsTests extends OpenSearchSingleNodeTestCase implements ConfigPathSupport { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 58ad290a31e85..2b45e9cfe2d4b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -916,6 +916,15 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanPageSize() testListBlobsByPrefixInLexicographicOrder(2, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + /** + * Test the boundary value at page size to ensure + * unnecessary calls are not made to S3 by fetching the next page. + * @throws IOException + */ + public void testListBlobsByPrefixInLexicographicOrderWithLimitEqualToPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(5, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSize() throws IOException { testListBlobsByPrefixInLexicographicOrder(8, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index f27c8387b6e45..b47749553aeba 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -74,6 +74,8 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.connectionTimeoutMillis, is(10 * 1000)); assertThat(defaultSettings.connectionTTLMillis, is(5 * 1000)); assertThat(defaultSettings.maxConnections, is(500)); + assertThat(defaultSettings.maxSyncConnections, is(500)); + assertThat(defaultSettings.connectionAcquisitionTimeoutMillis, is(15 * 60 * 1000)); assertThat(defaultSettings.maxRetries, is(3)); assertThat(defaultSettings.throttleRetries, is(true)); } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 9be83e30c3183..735cbd92b691a 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -16,7 +16,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Opentelemetry based telemetry implementation.' classname 'org.opensearch.telemetry.OTelTelemetryPlugin' - hasClientJar = true + hasClientJar = false } dependencies { diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 deleted file mode 100644 index 2c038aad4b934..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5c081d8f877225732efe13908f350029c811709 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..e81b44b9e057f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.35.0.jar.sha1 @@ -0,0 +1 @@ +388c49986bc20f3b4bea58470eb16decd230c2db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 deleted file mode 100644 index 3243f524432eb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5f8bb68084ea5709a27e935907b1bb49d0bd049 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..0054417ef7b30 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.35.0.jar.sha1 @@ -0,0 +1 @@ +6e2aa0e28c5069121cf11b2c93225942358f1423 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 deleted file mode 100644 index 1d7da47286ae0..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3643061da474061ffa7f2036a58a7a0d40212276 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..27f96d15d6a70 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.35.0.jar.sha1 @@ -0,0 +1 @@ +d0784bf59b74a2dc369551cc6d200e243ce8cca0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 deleted file mode 100644 index 3fab0e47adcbe..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab56c7223112fac13a66e3f667c5fc666f4a3707 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..dc21252f19d11 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.35.0.jar.sha1 @@ -0,0 +1 @@ +5f038bc2b9a1f415c8f74c4a35e0d92fae64c430 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 deleted file mode 100644 index f93cf7a63bfad..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5752d171cd08ac84f9273258a315bc5f97e1187e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..088f2475af00f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.35.0.jar.sha1 @@ -0,0 +1 @@ +1a7baebfbc6c569163bc74a5add9819cc411d582 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 deleted file mode 100644 index 2fc33b62aee54..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b41cd66a385d513b58b6617f20b701435b64abd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..438e431f1a7d4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.35.0.jar.sha1 @@ -0,0 +1 @@ +5506cb34a43fb733564a2aee47763d34cada9a7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 deleted file mode 100644 index 99f758b047aa2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9346006cead763247a786b5cabf3e1ae3c88eadb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..7fb0f09d60c0f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.35.0.jar.sha1 @@ -0,0 +1 @@ +d5abbce20bf88dff97b9ec7104bf13d163042f30 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 deleted file mode 100644 index 705a342a684c4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fab56e187e3fb3c70c18223184d53a76500114ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..8ec097d471e16 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.35.0-alpha.jar.sha1 @@ -0,0 +1 @@ +72720d7880110d02aad6d69066cc0311c568f17d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 deleted file mode 100644 index 31818695cc774..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -504de8cc7dc68e84c8c7c2757522d934e9c50d35 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..7c9200f50e438 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.35.0.jar.sha1 @@ -0,0 +1 @@ +82fe6a227fb3148aae2e61978cf77f7005a66bca \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 deleted file mode 100644 index 3cf3080a98bd9..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -454c7a6afab864de9f0c166246f28f16aaa824c1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..9fd80da7597c2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.35.0.jar.sha1 @@ -0,0 +1 @@ +5d2bc29d8f2ef2cf5a2239ac6990a2c89118456d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 deleted file mode 100644 index 41b0dca07556e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b054760243906af0a327a8f5bd99adc2826ccd88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..d4dc7528c83d6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.35.0.jar.sha1 @@ -0,0 +1 @@ +da2122051bd95d3a36bf34f72f1b0dd9b105fd1f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 deleted file mode 100644 index 2f71fd5cc780a..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bff24f085193e105d4e23e3db27bf81ccb3d830e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..7e1b206d42ba4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.35.0.jar.sha1 @@ -0,0 +1 @@ +97942849d51081e766a29646175b752bb79d7ce0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 deleted file mode 100644 index f0060b8a0f78f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d80ad3210fa890a856a1d04379d134ab44a09501 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..47125e70aa884 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.35.0.jar.sha1 @@ -0,0 +1 @@ +861b6a3c43a15ca3782f1fa17b024b9afa4b3ea6 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java index bcdcb657c4f42..e77e69d121036 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -15,6 +15,7 @@ import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.OpenSearchIntegTestCase; @@ -53,10 +54,13 @@ public void testSanityChecksWhenMetricsDisabled() throws Exception { Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); counter.add(1.0); + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "1"); + Thread.sleep(2000); assertTrue(metricsRegistry instanceof NoopMetricsRegistry); assertTrue(counter instanceof NoopCounter); + assertTrue(histogram instanceof NoopHistogram); } } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java index ed341595d327d..1b8f694709a9c 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -23,6 +23,7 @@ import java.util.stream.Collectors; import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramPointData; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { @@ -92,6 +93,31 @@ public void testUpDownCounter() throws Exception { assertEquals(-1.0, value, 0.0); } + public void testHistogram() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "ms"); + histogram.record(2.0); + histogram.record(1.0); + histogram.record(3.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + ImmutableExponentialHistogramPointData histogramPointData = ((ImmutableExponentialHistogramPointData) ((ArrayList) exporter + .getFinishedMetricItems() + .stream() + .filter(a -> a.getName().contains("test-histogram")) + .collect(Collectors.toList()) + .get(0) + .getExponentialHistogramData() + .getPoints()).get(0)); + assertEquals(1.0, histogramPointData.getSum(), 6.0); + assertEquals(1.0, histogramPointData.getMax(), 3.0); + assertEquals(1.0, histogramPointData.getMin(), 1.0); + } + @After public void reset() { InMemorySingletonMetricsExporter.INSTANCE.reset(); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index 297ae8873636f..000fd09d43c18 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -53,7 +53,9 @@ public List<Setting<?>> getSettings() { OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, - OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING + OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY ); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java index 8e23f724b4570..95ce6918fcb70 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java @@ -13,15 +13,21 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.List; import io.opentelemetry.exporter.logging.LoggingMetricExporter; import io.opentelemetry.exporter.logging.LoggingSpanExporter; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; /** * OTel specific telemetry settings. @@ -66,7 +72,7 @@ private OTelTelemetrySettings() {} /** * Span Exporter type setting. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "removal" }) public static final Setting<Class<SpanExporter>> OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING = new Setting<>( "telemetry.otel.tracer.span.exporter.class", LoggingSpanExporter.class.getName(), @@ -90,7 +96,7 @@ private OTelTelemetrySettings() {} /** * Metrics Exporter type setting. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "removal" }) public static final Setting<Class<MetricExporter>> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>( "telemetry.otel.metrics.exporter.class", LoggingMetricExporter.class.getName(), @@ -110,4 +116,40 @@ private OTelTelemetrySettings() {} Setting.Property.NodeScope, Setting.Property.Final ); + + /** + * Samplers orders setting. + */ + @SuppressWarnings("unchecked") + public static final Setting<List<Class<Sampler>>> OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS = Setting.listSetting( + "telemetry.otel.tracer.span.sampler.classes", + Arrays.asList(ProbabilisticTransportActionSampler.class.getName(), ProbabilisticSampler.class.getName()), + sampler -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction<Class<Sampler>>) () -> { + final ClassLoader loader = OTelSamplerFactory.class.getClassLoader(); + return (Class<Sampler>) loader.loadClass(sampler); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load sampler class: " + sampler, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Probability of action based sampler + */ + public static final Setting<Double> TRACER_SAMPLER_ACTION_PROBABILITY = Setting.doubleSetting( + "telemetry.tracer.action.sampler.probability", + 0.001d, + 0.000d, + 1.00d, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java new file mode 100644 index 0000000000000..73bb0d8adff62 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleHistogram; + +/** + * OTel aware implementation {@link Histogram} + */ +class OTelHistogram implements Histogram { + + private final DoubleHistogram otelDoubleHistogram; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelHistogram(DoubleHistogram otelDoubleCounter) { + this.otelDoubleHistogram = otelDoubleCounter; + } + + @Override + public void record(double value) { + otelDoubleHistogram.record(value); + } + + @Override + public void record(double value, Tags tags) { + otelDoubleHistogram.record(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java index 6160e5106c041..82ae2cdd198b2 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -17,6 +17,7 @@ import java.security.PrivilegedAction; import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleHistogram; import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; @@ -42,6 +43,7 @@ public OTelMetricsTelemetry(RefCountedReleasable<OpenTelemetrySdk> openTelemetry this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } + @SuppressWarnings("removal") @Override public Counter createCounter(String name, String description, String unit) { DoubleCounter doubleCounter = AccessController.doPrivileged( @@ -54,6 +56,7 @@ public Counter createCounter(String name, String description, String unit) { return new OTelCounter(doubleCounter); } + @SuppressWarnings("removal") @Override public Counter createUpDownCounter(String name, String description, String unit) { DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged( @@ -66,6 +69,23 @@ public Counter createUpDownCounter(String name, String description, String unit) return new OTelUpDownCounter(doubleUpDownCounter); } + /** + * Creates the Otel Histogram. In {@link org.opensearch.telemetry.tracing.OTelResourceProvider} + * we can configure the bucketing/aggregation strategy through view. Default startegy configured + * is the {@link io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation}. + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram + */ + @Override + public Histogram createHistogram(String name, String description, String unit) { + DoubleHistogram doubleHistogram = AccessController.doPrivileged( + (PrivilegedAction<DoubleHistogram>) () -> otelMeter.histogramBuilder(name).setUnit(unit).setDescription(description).build() + ); + return new OTelHistogram(doubleHistogram); + } + @Override public void close() throws IOException { meterProvider.close(); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java index ef5a31e4003ca..9c548044484fd 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java @@ -51,6 +51,7 @@ public static MetricExporter create(Settings settings) { return metricExporter; } + @SuppressWarnings("removal") private static MetricExporter instantiateExporter(Class<MetricExporter> exporterProviderClass) { try { // Check we ourselves are not being called by unprivileged code. diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index 14a19f122c17b..475fc09d04bff 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -12,7 +12,7 @@ import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; -import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; import org.opensearch.telemetry.tracing.sampler.RequestSampler; import java.security.AccessController; @@ -23,8 +23,12 @@ import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; import io.opentelemetry.context.propagation.ContextPropagators; import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.View; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; @@ -40,6 +44,7 @@ * This class encapsulates all OpenTelemetry related resources */ public final class OTelResourceProvider { + private OTelResourceProvider() {} /** @@ -48,13 +53,14 @@ private OTelResourceProvider() {} * @param settings cluster settings * @return OpenTelemetrySdk instance */ + @SuppressWarnings("removal") public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) { return AccessController.doPrivileged( (PrivilegedAction<OpenTelemetrySdk>) () -> get( settings, OTelSpanExporterFactory.create(settings), ContextPropagators.create(W3CTraceContextPropagator.getInstance()), - Sampler.parentBased(new RequestSampler(new ProbabilisticSampler(telemetrySettings))) + Sampler.parentBased(new RequestSampler(OTelSamplerFactory.create(telemetrySettings, settings))) ) ); } @@ -91,6 +97,10 @@ private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resou .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) .build() ) + .registerView( + InstrumentSelector.builder().setType(InstrumentType.HISTOGRAM).build(), + View.builder().setAggregation(Base2ExponentialHistogramAggregation.getDefault()).build() + ) .build(); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java index da7ce5c47d9ca..e9d7e78882c7d 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java @@ -51,6 +51,7 @@ public static SpanExporter create(Settings settings) { return spanExporter; } + @SuppressWarnings("removal") private static SpanExporter instantiateSpanExporter(Class<SpanExporter> spanExporterProviderClass) { try { // Check we ourselves are not being called by unprivileged code. diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java new file mode 100644 index 0000000000000..b9d5c07a40cd8 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.ListIterator; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +/** + * Factory class to create the instance of OTelSampler + */ +public class OTelSamplerFactory { + + /** + * Logger instance for logging messages related to the OTelSamplerFactory. + */ + private static final Logger logger = LogManager.getLogger(OTelSamplerFactory.class); + + /** + * Base constructor. + */ + private OTelSamplerFactory() { + + } + + /** + * Creates the {@link Sampler} instances based on the TRACER_SPAN_SAMPLER_CLASSES value. + * + * @param telemetrySettings TelemetrySettings. + * @param settings the settings + * @return list of samplers. + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings) { + List<Class<Sampler>> samplersNameList = OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.get(settings); + ListIterator<Class<Sampler>> li = samplersNameList.listIterator(samplersNameList.size()); + + Sampler fallbackSampler = null; + + // Iterating samplers list in reverse order to create chain of sampler + while (li.hasPrevious()) { + Class<Sampler> samplerName = li.previous(); + fallbackSampler = instantiateSampler(samplerName, telemetrySettings, settings, fallbackSampler); + } + + return fallbackSampler; + } + + private static Sampler instantiateSampler( + Class<Sampler> samplerClassName, + TelemetrySettings telemetrySettings, + Settings settings, + Sampler fallbackSampler + ) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + return AccessController.doPrivileged((PrivilegedExceptionAction<Sampler>) () -> { + try { + // Define the method type which receives TelemetrySettings & Sampler as arguments + MethodType methodType = MethodType.methodType(Sampler.class, TelemetrySettings.class, Settings.class, Sampler.class); + + return (Sampler) MethodHandles.publicLookup() + .findStatic(samplerClassName, "create", methodType) + .invokeExact(telemetrySettings, settings, fallbackSampler); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create method exist in [" + samplerClassName + "]", e.getCause()); + } else { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } + }); + } catch (Exception e) { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java index 774070aa39df6..d7fe92b1f3495 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing.sampler; +import org.opensearch.common.settings.Settings; import org.opensearch.telemetry.TelemetrySettings; import java.util.List; @@ -18,14 +19,18 @@ import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; import io.opentelemetry.sdk.trace.samplers.SamplingResult; /** - * ProbabilisticSampler implements a head-based sampling strategy based on provided settings. + * ProbabilisticSampler implements a probability sampling strategy based on configured sampling ratio. */ public class ProbabilisticSampler implements Sampler { private Sampler defaultSampler; private final TelemetrySettings telemetrySettings; + private final Settings settings; + private final Sampler fallbackSampler; + private double samplingRatio; /** @@ -33,21 +38,24 @@ public class ProbabilisticSampler implements Sampler { * * @param telemetrySettings Telemetry settings. */ - public ProbabilisticSampler(TelemetrySettings telemetrySettings) { + private ProbabilisticSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); this.samplingRatio = telemetrySettings.getSamplingProbability(); this.defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + this.fallbackSampler = fallbackSampler; } - Sampler getSampler() { - double newSamplingRatio = telemetrySettings.getSamplingProbability(); - if (isSamplingRatioChanged(newSamplingRatio)) { - synchronized (this) { - this.samplingRatio = newSamplingRatio; - defaultSampler = Sampler.traceIdRatioBased(samplingRatio); - } - } - return defaultSampler; + /** + * Create probabilistic sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticSampler(telemetrySettings, settings, fallbackSampler); } private boolean isSamplingRatioChanged(double newSamplingRatio) { @@ -67,7 +75,19 @@ public SamplingResult shouldSample( Attributes attributes, List<LinkData> parentLinks ) { - return getSampler().shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + double newSamplingRatio = telemetrySettings.getSamplingProbability(); + if (isSamplingRatioChanged(newSamplingRatio)) { + synchronized (this) { + this.samplingRatio = newSamplingRatio; + defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + } + } + final SamplingResult result = defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } else { + return result; + } } @Override diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java new file mode 100644 index 0000000000000..93a8edaaaa760 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.List; +import java.util.Objects; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; + +/** + * ProbabilisticTransportActionSampler sampler samples request with action based on defined probability + */ +public class ProbabilisticTransportActionSampler implements Sampler { + + private final Sampler fallbackSampler; + private Sampler actionSampler; + private final TelemetrySettings telemetrySettings; + private final Settings settings; + private double actionSamplingRatio; + + /** + * Creates ProbabilisticTransportActionSampler sampler + * @param telemetrySettings TelemetrySettings + */ + private ProbabilisticTransportActionSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); + this.actionSamplingRatio = OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY.get(settings); + this.actionSampler = Sampler.traceIdRatioBased(actionSamplingRatio); + this.fallbackSampler = fallbackSampler; + } + + /** + * Create probabilistic transport action sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic transport action sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticTransportActionSampler(telemetrySettings, settings, fallbackSampler); + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List<LinkData> parentLinks + ) { + final String action = attributes.get(AttributeKey.stringKey(TRANSPORT_ACTION)); + if (action != null) { + final SamplingResult result = actionSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return result; + } + if (fallbackSampler != null) return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + + return SamplingResult.drop(); + } + + double getSamplingRatio() { + return actionSamplingRatio; + } + + @Override + public String getDescription() { + return "Transport Action Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java index 9ea681370a3ec..87c2849173aff 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java @@ -18,21 +18,20 @@ import io.opentelemetry.sdk.trace.samplers.Sampler; import io.opentelemetry.sdk.trace.samplers.SamplingResult; +import static org.opensearch.telemetry.tracing.AttributeNames.TRACE; + /** - * HeadBased sampler + * RequestSampler based on HeadBased sampler */ public class RequestSampler implements Sampler { - private final Sampler defaultSampler; - - // TODO: Pick value of TRACE from PR #9415. - private static final String TRACE = "trace"; + private final Sampler fallbackSampler; /** - * Creates Head based sampler - * @param defaultSampler defaultSampler + * Creates request sampler which applies based on all applicable sampler + * @param fallbackSampler Sampler */ - public RequestSampler(Sampler defaultSampler) { - this.defaultSampler = defaultSampler; + public RequestSampler(Sampler fallbackSampler) { + this.fallbackSampler = fallbackSampler; } @Override @@ -44,15 +43,15 @@ public SamplingResult shouldSample( Attributes attributes, List<LinkData> parentLinks ) { - final String trace = attributes.get(AttributeKey.stringKey(TRACE)); if (trace != null) { return (Boolean.parseBoolean(trace) == true) ? SamplingResult.recordAndSample() : SamplingResult.drop(); - } else { - return defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); } - + if (fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return SamplingResult.recordAndSample(); } @Override diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java index 2fcf89947e537..4a1301588dad2 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -30,9 +30,11 @@ import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY; import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; @@ -69,7 +71,9 @@ public void testGetTelemetry() { TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, - OTEL_METRICS_EXPORTER_CLASS_SETTING + OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTEL_METRICS_EXPORTER_CLASS_SETTING, + TRACER_SAMPLER_ACTION_PROBABILITY ), oTelTelemetryPlugin.getSettings() ); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java index 9de575b69774a..4b39e3d0d607d 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -17,12 +17,15 @@ import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.metrics.DoubleCounter; import io.opentelemetry.api.metrics.DoubleCounterBuilder; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.DoubleHistogramBuilder; import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder; import io.opentelemetry.api.metrics.LongCounterBuilder; import io.opentelemetry.api.metrics.LongUpDownCounterBuilder; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import org.mockito.Mockito; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -118,4 +121,32 @@ public void testUpDownCounter() { counter.add(-2.0, tags); verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags)); } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testHistogram() { + String histogramName = "test-histogram"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleHistogram mockOTelDoubleHistogram = mock(DoubleHistogram.class); + DoubleHistogramBuilder mockOTelDoubleHistogramBuilder = mock(DoubleHistogramBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.histogramBuilder(Mockito.contains(histogramName))).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setDescription(description)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setUnit(unit)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.build()).thenReturn(mockOTelDoubleHistogram); + + Histogram histogram = metricsTelemetry.createHistogram(histogramName, description, unit); + histogram.record(1.0); + verify(mockOTelDoubleHistogram).record(1.0); + Tags tags = Tags.create().addTag("test", "test"); + histogram.record(2.0, tags); + verify(mockOTelDoubleHistogram).record(2.0, OTelAttributesConverter.convert(tags)); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java new file mode 100644 index 0000000000000..39ccf299dfdc4 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class OTelSamplerFactoryTests extends OpenSearchTestCase { + + public void testDefaultCreate() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + assertEquals(sampler.getClass(), ProbabilisticTransportActionSampler.class); + } + + public void testCreateWithSingleSampler() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.getKey(), ProbabilisticSampler.class.getName()) + .build(); + + ClusterSettings clusterSettings = new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, settings); + assertTrue(sampler instanceof ProbabilisticSampler); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java index 639dc341ef0db..a094cd0119f5e 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java @@ -15,18 +15,21 @@ import java.util.Set; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.samplers.Sampler; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.mockito.Mockito.mock; public class ProbabilisticSamplerTests extends OpenSearchTestCase { // When ProbabilisticSampler is created with OTelTelemetrySettings as null public void testProbabilisticSamplerWithNullSettings() { // Verify that the constructor throws IllegalArgumentException when given null settings - assertThrows(NullPointerException.class, () -> { new ProbabilisticSampler(null); }); + assertThrows(NullPointerException.class, () -> { ProbabilisticSampler.create(null, null, null); }); } public void testDefaultGetSampler() { @@ -37,10 +40,9 @@ public void testDefaultGetSampler() { ); // Probabilistic Sampler - ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); - assertNotNull(probabilisticSampler.getSampler()); - assertEquals(0.01, probabilisticSampler.getSamplingRatio(), 0.0d); + assertEquals(0.01, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); } public void testGetSamplerWithUpdatedSamplingRatio() { @@ -51,14 +53,16 @@ public void testGetSamplerWithUpdatedSamplingRatio() { ); // Probabilistic Sampler - ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); - assertEquals(0.01d, probabilisticSampler.getSamplingRatio(), 0.0d); + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); + + assertEquals(0.01d, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); telemetrySettings.setSamplingProbability(0.02); + // Need to call shouldSample() to update the value of samplingRatio + probabilisticSampler.shouldSample(mock(Context.class), "00000000000000000000000000000000", "", SpanKind.INTERNAL, null, null); + // Need to call getSampler() to update the value of tracerHeadSamplerSamplingRatio - Sampler updatedProbabilisticSampler = probabilisticSampler.getSampler(); - assertEquals(0.02, probabilisticSampler.getSamplingRatio(), 0.0d); + assertEquals(0.02, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); } - } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java new file mode 100644 index 0000000000000..261b0252fef60 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.Set; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; +import static org.mockito.Mockito.mock; + +public class ProbabilisticTransportActionSamplerTests extends OpenSearchTestCase { + + public void testGetSamplerWithActionSamplingRatio() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // ProbabilisticTransportActionSampler + Sampler probabilisticTransportActionSampler = ProbabilisticTransportActionSampler.create(telemetrySettings, Settings.EMPTY, null); + + SamplingResult result = probabilisticTransportActionSampler.shouldSample( + mock(Context.class), + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(), + Collections.emptyList() + ); + // Verify that ProbabilisticTransportActionSampler returned SamplingResult.recordAndSample() as all actions will be sampled + assertEquals(SamplingResult.recordAndSample(), result); + assertEquals(0.001, ((ProbabilisticTransportActionSampler) probabilisticTransportActionSampler).getSamplingRatio(), 0.000d); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java index facf04623ec46..da234ca13dc9d 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java @@ -8,9 +8,14 @@ package org.opensearch.telemetry.tracing.sampler; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Collections; +import java.util.Set; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -19,29 +24,29 @@ import io.opentelemetry.sdk.trace.samplers.Sampler; import io.opentelemetry.sdk.trace.samplers.SamplingResult; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; public class RequestSamplerTests extends OpenSearchTestCase { + private ClusterSettings clusterSettings; + private TelemetrySettings telemetrySettings; + private RequestSampler requestSampler; + private Context parentContext; + + @Before + public void init() { + clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler fallbackSampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + requestSampler = new RequestSampler(fallbackSampler); + parentContext = mock(Context.class); + } public void testShouldSampleWithTraceAttributeAsTrue() { - - // Create a mock default sampler - Sampler defaultSampler = mock(Sampler.class); - when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn(SamplingResult.drop()); - - // Create an instance of HeadSampler with the mock default sampler - RequestSampler requestSampler = new RequestSampler(defaultSampler); - - // Create a mock Context and Attributes - Context parentContext = mock(Context.class); Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "true"); - // Call shouldSample on HeadSampler SamplingResult result = requestSampler.shouldSample( parentContext, "traceId", @@ -50,43 +55,85 @@ public void testShouldSampleWithTraceAttributeAsTrue() { attributes, Collections.emptyList() ); - assertEquals(SamplingResult.recordAndSample(), result); + } + + public void testShouldSampleWithTraceAttributeAsFalse() { + Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "false"); - // Verify that the default sampler's shouldSample method was not called - verify(defaultSampler, never()).shouldSample(any(), anyString(), anyString(), any(), any(), any()); + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + assertEquals(SamplingResult.drop(), result); } - public void testShouldSampleWithoutTraceAttribute() { + public void testShouldSampleForProbabilisticSampler() { + clusterSettings.applySettings( + Settings.builder() + .put("telemetry.tracer.sampler.probability", "1.0") + .put("telemetry.otel.tracer.span.sampler.classes", "org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler") + .build() + ); + + Attributes attributes = Attributes.builder().build(); + + SamplingResult result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); - // Create a mock default sampler - Sampler defaultSampler = mock(Sampler.class); - when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn( - SamplingResult.recordAndSample() + // Verify that request is sampled + assertEquals(SamplingResult.recordAndSample(), result); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.0").build()); + result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() ); + assertEquals(SamplingResult.drop(), result); - // Create an instance of HeadSampler with the mock default sampler - RequestSampler requestSampler = new RequestSampler(defaultSampler); + } - // Create a mock Context and Attributes + public void testShouldSampleForProbabilisticTransportActionSampler() { + clusterSettings.applySettings( + Settings.builder() + .put( + "telemetry.otel.tracer.span.sampler.classes", + "org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler" + ) + .build() + ); + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.action.sampler.probability", "1.0").build()); + + // Create a mock Context and Attributes with dummy action Context parentContext = mock(Context.class); - Attributes attributes = Attributes.empty(); + Attributes attributes = Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(); - // Call shouldSample on HeadSampler + // Calling shouldSample to update samplingRatio SamplingResult result = requestSampler.shouldSample( parentContext, - "traceId", + "00000000000000000000000000000000", "spanName", SpanKind.INTERNAL, attributes, Collections.emptyList() ); - // Verify that HeadSampler returned SamplingResult.recordAndSample() + // Verify that request is sampled assertEquals(SamplingResult.recordAndSample(), result); - - // Verify that the default sampler's shouldSample method was called - verify(defaultSampler).shouldSample(any(), anyString(), anyString(), any(), any(), any()); } } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 0232fc58f9357..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 987b524aedc98..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 deleted file mode 100644 index 5eaf96739ed72..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 new file mode 100644 index 0000000000000..c30a99a2338b4 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 @@ -0,0 +1 @@ +3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 deleted file mode 100644 index 091125169c696..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 new file mode 100644 index 0000000000000..ab3171cd02b73 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 @@ -0,0 +1 @@ +c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java index db532f9a1c503..4ea23e415c994 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java @@ -50,6 +50,7 @@ * Unit tests for OpenSearchPolicy: these cannot run with security manager, * we don't allow messing with the policy */ +@SuppressWarnings("removal") public class OpenSearchPolicyUnitTests extends OpenSearchTestCase { /** * Test policy with null codesource. diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java index 56d6c72705a78..99c9ee7e96d01 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java @@ -41,6 +41,7 @@ public class SystemCallFilterTests extends OpenSearchTestCase { /** command to try to run in tests */ static final String EXECUTABLE = Constants.WINDOWS ? "calc" : "ls"; + @SuppressWarnings("removal") @Override public void setUp() throws Exception { super.setUp(); diff --git a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java index 430df1f899708..04eae95f6fe12 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; /** Tests plugin manager security check */ +@SuppressWarnings("removal") public class PluginSecurityTests extends OpenSearchTestCase { /** Test that we can parse the set of permissions correctly for a simple policy */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java new file mode 100644 index 0000000000000..d4e7017aab8c2 --- /dev/null +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.upgrades; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends AbstractFullClusterRestartTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + // Any issue in PluginInfo serialization logic will result into connection failures + // and hence reduced number of nodes. + assertEquals(2, nodeMap.keySet().size()); + } +} diff --git a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java index 73c546b80d431..c2f799d7d48d2 100644 --- a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -62,6 +62,7 @@ public void testSuccessfulStartupWithCustomConfig() throws Exception { }); } + @SuppressWarnings("removal") private List<String> readAllLines(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<List<String>>) () -> { try { diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java new file mode 100644 index 0000000000000..47e454a7549cb --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.backwards; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends OpenSearchRestTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + assertEquals(4, nodeMap.keySet().size()); + } +} diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 3dff452be855f..777377f04e8b9 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -62,6 +62,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'http.content_type.required', 'true' + systemProperty 'opensearch.experimental.optimize_doc_id_lookup.fuzzy_set.enabled', 'true' } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f963f8d221bb5..8e8734b5d62b3 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -40,10 +40,10 @@ import org.opensearch.common.Booleans; import org.opensearch.common.io.Streams; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.EngineConfig; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -262,7 +262,6 @@ public void testIndexing() throws Exception { * @throws Exception if index creation fail * @throws UnsupportedOperationException if cluster type is unknown */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); @@ -345,6 +344,88 @@ public void testIndexingWithSegRep() throws Exception { } } + public void testIndexingWithFuzzyFilterPostings() throws Exception { + if (UPGRADE_FROM_VERSION.onOrBefore(Version.V_2_11_1)) { + logger.info("--> Skip test for version {} where fuzzy filter postings format feature is not available", UPGRADE_FROM_VERSION); + return; + } + final String indexName = "test-index-fuzzy-set"; + final int shardCount = 3; + final int replicaCount = 1; + logger.info("--> Case {}", CLUSTER_TYPE); + printClusterNodes(); + logger.info("--> _cat/shards before test execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); + switch (CLUSTER_TYPE) { + case OLD: + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), replicaCount) + .put( + EngineConfig.INDEX_CODEC_SETTING.getKey(), + randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + }) + ) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); + createIndex(indexName, settings.build()); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + bulk(indexName, "_OLD", 5); + break; + case MIXED: + waitForClusterHealthWithNoShardMigration(indexName, "yellow"); + break; + case UPGRADED: + Settings.Builder settingsBuilder = Settings.builder() + .put(IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + updateIndexSettings(indexName, settingsBuilder); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount(indexName, expectedCount); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk(indexName, "_" + CLUSTER_TYPE, 5); + logger.info("--> Index one doc (to be deleted next) and verify doc count"); + Request toBeDeleted = new Request("PUT", "/" + indexName + "/_doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount(indexName, expectedCount + 6); + + logger.info("--> Delete previously added doc and verify doc count"); + Request delete = new Request("DELETE", "/" + indexName + "/_doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + assertCount(indexName, expectedCount + 5); + + //forceMergeAndVerify(indexName, shardCount * (1 + replicaCount)); + } + } + public void testAutoIdWithOpTypeCreate() throws IOException { final String indexName = "auto_id_and_op_type_create_index"; StringBuilder b = new StringBuilder(); diff --git a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index d14c834405f32..8a6e5d62112c8 100644 --- a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -61,6 +61,7 @@ protected Matcher<String> nodeNameMatcher() { return equalTo(HOSTNAME); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> { diff --git a/release-notes/opensearch.release-notes-2.12.0.md b/release-notes/opensearch.release-notes-2.12.0.md new file mode 100644 index 0000000000000..49955c1f969f0 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.12.0.md @@ -0,0 +1,175 @@ +## 2024-02-09 Version 2.12.0 Release Notes + +## [2.12.0] +### Added +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967), [#11983](https://github.com/opensearch-project/OpenSearch/pull/11983)) +- [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) +- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Enable must_exist parameter for update aliases API ([#11210](https://github.com/opensearch-project/OpenSearch/pull/11210)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) +- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Add experimental SIMD implementation of B-tree to round down dates ([#11194](https://github.com/opensearch-project/OpenSearch/issues/11194)) +- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) +- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) +- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) +- Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) +- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) +- Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) +- Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) +- Add remove_by_pattern ingest processor ([#11920](https://github.com/opensearch-project/OpenSearch/pull/11920)) +- Support index level allocation filtering for searchable snapshot index ([#11563](https://github.com/opensearch-project/OpenSearch/pull/11563)) +- Add `org.opensearch.rest.MethodHandlers` and `RestController#getAllHandlers` ([11876](https://github.com/opensearch-project/OpenSearch/pull/11876)) +- New DateTime format for RFC3339 compatible date fields ([#11465](https://github.com/opensearch-project/OpenSearch/pull/11465)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Add support for Google Application Default Credentials in repository-gcs ([#8394](https://github.com/opensearch-project/OpenSearch/pull/8394)) +- Remove concurrent segment search feature flag for GA launch ([#12074](https://github.com/opensearch-project/OpenSearch/pull/12074)) +- Enable Fuzzy codec for doc id fields using a bloom filter ([#11022](https://github.com/opensearch-project/OpenSearch/pull/11022)) +- [Metrics Framework] Adds support for Histogram metric ([#12062](https://github.com/opensearch-project/OpenSearch/pull/12062)) + +### Dependencies +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.8.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630), [#12167](https://github.com/opensearch-project/OpenSearch/pull/12167)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) +- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) +- Bump `actions/github-script` from 6 to 7.0.1 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271), [#12166](https://github.com/opensearch-project/OpenSearch/pull/12166)) +- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) +- Bump `netty` from 4.1.100.Final to 4.1.106.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)), [#12034](https://github.com/opensearch-project/OpenSearch/pull/12034)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) +- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) +- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) +- Bump `reactor-netty-core` from 1.1.12 to 1.1.15 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)), ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.2 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629), [#12056](https://github.com/opensearch-project/OpenSearch/pull/12056)) +- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) +- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) +- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) +- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) +- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) +- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) +- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) +- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) +- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) +- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) +- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) +- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) +- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) +- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) +- Bump `Lucene` from 9.8.0 to 9.9.2 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)), ([#12063](https://github.com/opensearch-project/OpenSearch/pull/12063)) +- Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.2.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886), [#11963](https://github.com/opensearch-project/OpenSearch/pull/11963)) +- Bump `com.google.api:gax-httpjson` from 0.103.1 to 2.42.0 ([#11794](https://github.com/opensearch-project/OpenSearch/pull/11794), [#12165](https://github.com/opensearch-project/OpenSearch/pull/12165)) +- Bump `com.google.oauth-client:google-oauth-client` from 1.34.1 to 1.35.0 ([#11960](https://github.com/opensearch-project/OpenSearch/pull/11960)) +- Bump `com.diffplug.spotless` from 6.23.2 to 6.25.0 ([#11962](https://github.com/opensearch-project/OpenSearch/pull/11962), [#12055](https://github.com/opensearch-project/OpenSearch/pull/12055)) +- Bump `com.google.cloud:google-cloud-core` from 2.5.10 to 2.30.0 ([#11961](https://github.com/opensearch-project/OpenSearch/pull/11961)) +- Bump `reactor-core` from 3.5.11 to 3.5.14 ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `org.apache.shiro:shiro-core` from 1.11.0 to 1.13.0 ([#12200](https://github.com/opensearch-project/OpenSearch/pull/12200)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.3 to 1.44.1 ([#12059](https://github.com/opensearch-project/OpenSearch/pull/12059)) +- Bump `peter-evans/create-issue-from-file` from 4 to 5 ([#12057](https://github.com/opensearch-project/OpenSearch/pull/12057)) +- Bump `org.gradle.test-retry` from 1.5.4 to 1.5.8 ([#12168](https://github.com/opensearch-project/OpenSearch/pull/12168)) +- Bump `org.apache.kerby:kerb-admin` from 1.0.1 to 2.0.3 ([#12194](https://github.com/opensearch-project/OpenSearch/pull/12194)) + +### Changed +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) +- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) +- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Apply the fast filter optimization to composite aggregation of date histogram source ([#11505](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) +- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) +- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) +- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) +- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) +- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) +- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) +- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) +- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) +- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) +- Extract cluster management for integration tests into JUnit test rule out of OpenSearchIntegTestCase ([#11877](https://github.com/opensearch-project/OpenSearch/pull/11877)), ([#12000](https://github.com/opensearch-project/OpenSearch/pull/12000)) +- Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2 ([#11968](https://github.com/opensearch-project/OpenSearch/pull/11968)) +- Updates IpField to be searchable when only `doc_values` are enabled ([#11508](https://github.com/opensearch-project/OpenSearch/pull/11508)) +- [Query Insights] Query Insights Framework which currently supports retrieving the most time-consuming queries within the last configured time window ([#11903](https://github.com/opensearch-project/OpenSearch/pull/11903)) +- [Query Insights] Implement Top N Queries feature to collect and gather information about high latency queries in a window ([#11904](https://github.com/opensearch-project/OpenSearch/pull/11904)) +- Add override support for sampling based on action ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Added custom sampler support based on transport action in request ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Disable concurrent search for composite aggregation([#12375](https://github.com/opensearch-project/OpenSearch/pull/12375)) + +### Removed +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) + +### Fixed +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) +- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) +- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) +- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) +- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) +- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) +- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) +- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485),[#11917](https://github.com/opensearch-project/OpenSearch/pull/11917)) +- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) +- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) +- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) +- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) +- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) +- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) +- Fix memory leak issue in ReorganizingLongHash ([#11953](https://github.com/opensearch-project/OpenSearch/issues/11953)) +- Prevent setting remote_snapshot store type on index creation ([#11867](https://github.com/opensearch-project/OpenSearch/pull/11867)) +- [BUG] Fix remote shards balancer when filtering throttled nodes ([#11724](https://github.com/opensearch-project/OpenSearch/pull/11724)) +- Add advance(int) for numeric values in order to allow point based optimization to kick in ([#12089](https://github.com/opensearch-project/OpenSearch/pull/12089)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e0fbeeb83ffc4..e78d49a67a98a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -229,6 +229,11 @@ "search_pipeline": { "type": "string", "description": "The search pipeline to use to execute this request" + }, + "include_named_queries_score":{ + "type": "boolean", + "description":"Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false)", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index b572ed9e62ea9..29fbf55417961 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,7 +1,7 @@ "Help": - skip: version: " - 2.11.99" - reason: deleted docs added in 2.12.0 + reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: @@ -66,6 +66,10 @@ search.query_current .+ \n search.query_time .+ \n search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 00ec838489f63..39c8040993f2a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -11,6 +11,33 @@ / #node_name name active queue rejected ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + - do: + cat.thread_pool: + thread_pool_patterns: search,search_throttled,generic + h: name,total_wait_time,twt + v: true + + - match: + $body: | + /^ name \s+ total_wait_time \s+ twt \n + (generic \s+ -1 \s+ -1 \n + search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ + +--- +"Test cat thread_pool total_wait_time output with concurrent search thread_pool": + - skip: + version: " - 2.11.99" + reason: index_search thread_pool was introduced in V_2.12.0 + + - do: + cat.thread_pool: {} + + - match: + $body: | + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + - do: cat.thread_pool: thread_pool_patterns: search,search_throttled,index_searcher,generic @@ -21,6 +48,7 @@ $body: | /^ name \s+ total_wait_time \s+ twt \n (generic \s+ -1 \s+ -1 \n + index_searcher \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml index a36f807e63e0e..a65908b238013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml @@ -1,9 +1,5 @@ --- "Return empty object if field doesn't exist, but index does": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/opensearch-project/OpenSearch/issues/2440" - - do: indices.create: index: test_index @@ -18,7 +14,5 @@ indices.get_field_mapping: index: test_index fields: not_existent - ignore: 404 # ignore 404 failures for now - # see: https://github.com/opensearch-project/OpenSearch/issues/2440 - match: { 'test_index.mappings': {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml index dd8c2a2deb721..2192873623715 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml @@ -23,8 +23,8 @@ setup: --- "Test noop_update_total metric can be updated by both update API and bulk API": - skip: - version: " - 2.99.99" #TODO: change to 2.11.99 after the PR is backported to 2.x branch - reason: "fixed in 3.0" + version: " - 2.11.99" + reason: "fixed in 2.12.0" - do: update: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml index dbf6a4fad3295..b9457f0290897 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml @@ -1,8 +1,8 @@ --- "Throw aliases missing exception when removing non-existing alias with setting must_exist to true": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -47,8 +47,8 @@ --- "Throw aliases missing exception when all of the specified aliases are non-existing": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -81,8 +81,8 @@ --- "Remove successfully when some specified aliases are non-existing": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -116,8 +116,8 @@ --- "Remove silently when all of the specified aliases are non-existing and must_exist is false": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 09278690f5d05..2808be8cd7045 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -509,6 +509,134 @@ setup: - match: { aggregations.1.2.buckets.1.key.nested: 1000 } - match: { aggregations.1.2.buckets.1.doc_count: 1 } +--- +"Composite aggregation with filtered nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 2 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 10 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + - match: { aggregations.1.2.3.buckets.1.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.1.doc_count: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + after: { "nested": 10 } + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 1 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + +--- +"Composite aggregation with filtered reverse nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 20 + aggs: + 3: + reverse_nested: {} + aggs: + 4: + composite: + sources: [ + { + "long": { + "terms": { + "field": "long" + } + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.4.buckets: 4 } + - match: { aggregations.1.2.3.4.buckets.0.key.long: 0 } + - match: { aggregations.1.2.3.4.buckets.0.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.0.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.1.key.long: 10 } + - match: { aggregations.1.2.3.4.buckets.1.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.1.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.2.key.long: 20 } + - match: { aggregations.1.2.3.4.buckets.2.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.2.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.3.key.long: 100 } + - match: { aggregations.1.2.3.4.buckets.3.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.3.doc_count: 1 } + --- "Composite aggregation with unmapped field": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index a18ac45e62175..4ee905972d106 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -78,3 +78,15 @@ setup: index: test1 body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}}} - match: { error.root_cause.0.type: "illegal_argument_exception" } + +--- +"Plain highlighter on a field WITHOUT OFFSETS using max_analyzer_offset should SUCCEED": + - skip: + version: " - 2.1.99" + reason: only starting supporting the parameter max_analyzer_offset on version 2.2 + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field1" : "quick"}}, "highlight" : {"type" : "plain", "fields" : {"field1" : {"max_analyzer_offset": 10}}}} + - match: {hits.hits.0.highlight.field1.0: "The <em>quick</em> "} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index c7b00d5fbbef2..d5ece1719dc48 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1,8 +1,3 @@ -setup: - - skip: - features: [ "headers" ] - version: " - 2.11.99" - reason: "searching with only doc_values was added in 2.12.0" --- "search on fields with both index and doc_values enabled": - do: @@ -47,6 +42,10 @@ setup: type: unsigned_long index: true doc_values: true + ip_field: + type: ip + index: true + doc_values: true - do: bulk: @@ -54,11 +53,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-iodvq", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-iodvq", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-iodvq", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -162,7 +161,6 @@ setup: - match: { hits.total: 1 } - - do: search: rest_total_hits_as_int: true @@ -174,6 +172,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} - do: search: @@ -186,7 +194,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -264,6 +271,17 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + - do: search: rest_total_hits_as_int: true @@ -384,6 +402,19 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } + --- "search on fields with only index enabled": - do: @@ -428,6 +459,10 @@ setup: type: unsigned_long index: true doc_values: false + ip_field: + type: ip + index: true + doc_values: false - do: bulk: @@ -435,11 +470,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-index", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-index", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-index", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -465,7 +500,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -555,6 +589,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} - do: search: @@ -567,7 +611,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -645,6 +688,17 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + - do: search: rest_total_hits_as_int: true @@ -765,8 +819,24 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } --- "search on fields with only doc_values enabled": + - skip: + features: [ "headers" ] + version: " - 2.99.99" + reason: "searching with only doc_values was added in 3.0.0" - do: indices.create: index: test-doc-values @@ -809,6 +879,10 @@ setup: type: unsigned_long index: false doc_values: true + ip_field: + type: ip + index: false + doc_values: true - do: bulk: @@ -816,11 +890,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-doc-values", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-doc-values", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -846,7 +920,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -924,7 +997,6 @@ setup: - match: { hits.total: 1 } - - do: search: rest_total_hits_as_int: true @@ -936,6 +1008,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + ip_field: "192.168.0.3" + + - match: { hits.total: 1 } - do: search: @@ -948,7 +1030,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -1145,3 +1226,16 @@ setup: } - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml new file mode 100644 index 0000000000000..08a20df093c01 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml @@ -0,0 +1,103 @@ +setup: + - skip: + version: " - 2.12.99" + reason: "implemented for versions 2.13.0 and above" + +--- +"matched queries": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: {hits.total.value: 2} + - length: {hits.hits.0.matched_queries: 2} + - match: {hits.hits.0.matched_queries: [ "match_field_1", "match_field_2" ]} + - length: {hits.hits.1.matched_queries: 1} + - match: {hits.hits.1.matched_queries: [ "match_field_1" ]} + +--- + +"matched queries with scores": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + include_named_queries_score: true + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: { hits.total.value: 2 } + - length: { hits.hits.0.matched_queries: 2 } + - match: { hits.hits.0.matched_queries.match_field_1: 1 } + - match: { hits.hits.0.matched_queries.match_field_2: 10 } + - length: { hits.hits.1.matched_queries: 1 } + - match: { hits.hits.1.matched_queries.match_field_1: 1 } diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9cab77f4e7394 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +6f0cfa07a5e4b36423e398cd1fd51c6825773d9c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 deleted file mode 100644 index c9e6120da7497..0000000000000 --- a/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24c8401b530308f9568eb7b408c2029c63f564c6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2d5d1a281a0f0 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ab201b997c8449db1ecd2fa88bd42d2f457286fa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 deleted file mode 100644 index 69ecf6aa68200..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11c46007366bb037be7d271ab0a5849b1d544662 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/server/licenses/lucene-core-9.9.1.jar.sha1 b/server/licenses/lucene-core-9.9.1.jar.sha1 deleted file mode 100644 index ae596196d9e6a..0000000000000 --- a/server/licenses/lucene-core-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..27d4f0f5874e9 --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a988f92842e48195c75a49377432533c9170d93d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.9.1.jar.sha1 b/server/licenses/lucene-grouping-9.9.1.jar.sha1 deleted file mode 100644 index e7df056400661..0000000000000 --- a/server/licenses/lucene-grouping-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f2785e17c5c823cc8f41a7ddb4647aaca8ee773 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2545822f2ac7b --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7c669e2c01565d3bdf175cd61a1e4d0bdfc44311 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.9.1.jar.sha1 b/server/licenses/lucene-highlighter-9.9.1.jar.sha1 deleted file mode 100644 index 828c7294aa586..0000000000000 --- a/server/licenses/lucene-highlighter-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30928513461bf79a5cb057e84da7d34a1e53227d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..fe44ad772335f --- /dev/null +++ b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +633a6d44b4cde8e149daa3407e8b8f644eece951 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.9.1.jar.sha1 b/server/licenses/lucene-join-9.9.1.jar.sha1 deleted file mode 100644 index 34b44ca8c6ad5..0000000000000 --- a/server/licenses/lucene-join-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9c8cc99632280148f92b4c0a64111c482d5d0ac \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..85bfbe066ff56 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +25390259c9e5592354efbc2f250bb396402016b2 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.9.1.jar.sha1 b/server/licenses/lucene-memory-9.9.1.jar.sha1 deleted file mode 100644 index b75fba4c331e9..0000000000000 --- a/server/licenses/lucene-memory-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49f820b1b321860fa42a4f7583e8ed8f77b9c1c2 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..4dba5837b66de --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +056f87a3d30c223b08d2f45fe465ddf11210b85f \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.9.1.jar.sha1 b/server/licenses/lucene-misc-9.9.1.jar.sha1 deleted file mode 100644 index f1e1e056004e9..0000000000000 --- a/server/licenses/lucene-misc-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db7c30217602dfcda394a4d0f0a9e68140d385a6 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..b6b8c441eefb1 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +e640f850b4fb13190be8422fe74c14c9d6603bb5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.9.1.jar.sha1 b/server/licenses/lucene-queries-9.9.1.jar.sha1 deleted file mode 100644 index 888b9b4a05ec8..0000000000000 --- a/server/licenses/lucene-queries-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d157547bd24edc8e9d9d59c273107dc3ac5fde5e \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..d0e77b04db51a --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8eb57762bf408fa51d7511f5e3b917627be61d1d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.9.1.jar.sha1 b/server/licenses/lucene-queryparser-9.9.1.jar.sha1 deleted file mode 100644 index 1ce8a069a0f4e..0000000000000 --- a/server/licenses/lucene-queryparser-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12d844fe224f6f97c510ac20d68903ed7f626f6c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..179df9f07a594 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +615b4a90c5402084c2d5916a4c1fadc9d9177782 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.9.1.jar.sha1 b/server/licenses/lucene-sandbox-9.9.1.jar.sha1 deleted file mode 100644 index 14fd86dadc404..0000000000000 --- a/server/licenses/lucene-sandbox-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -272e588fd3d8c0a401b28a1ac715f27044bf62ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9b88b24c21b12 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +19b42cdb5f76f63dece3ef5128207ebdd3741d48 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 deleted file mode 100644 index 0efd5a7595bfe..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e066432e7ab02b2a4914f989bcd8c44adbf340ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..789ab1d52ea8c --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +882691fe917e716fe6bcf8c0dd984b153495d015 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 deleted file mode 100644 index 7f06466e4c721..0000000000000 --- a/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa54c9b962778e28ebc0efb9f75297781350361a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8cfaf60763724 --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ef6d483960f776d5dbdd1009863786ee09ba5707 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.9.1.jar.sha1 b/server/licenses/lucene-suggest-9.9.1.jar.sha1 deleted file mode 100644 index 06732480d1b6c..0000000000000 --- a/server/licenses/lucene-suggest-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9554de5b22ae7483b344b94a9a956960b7a5d49c \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.11.jar.sha1 b/server/licenses/reactor-core-3.5.11.jar.sha1 deleted file mode 100644 index e5ffdbc8a7840..0000000000000 --- a/server/licenses/reactor-core-3.5.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db2299757f562261eb775d13658e86ff06f91e8a \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.14.jar.sha1 b/server/licenses/reactor-core-3.5.14.jar.sha1 new file mode 100644 index 0000000000000..3b58e7a68bade --- /dev/null +++ b/server/licenses/reactor-core-3.5.14.jar.sha1 @@ -0,0 +1 @@ +6e0c97c2e78273a00fd4ed38016b19ff3c6de59e \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java index 5605e4872887a..c81d491719e4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java @@ -449,6 +449,7 @@ public void onFailure(Exception e) { } } + @SuppressWarnings("removal") private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); try { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index c4dcedcc722cf..bdb36b62ada21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -52,7 +52,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -69,7 +68,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +98,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -public class CancellableTasksIT extends ParameterizedOpenSearchIntegTestCase { +public class CancellableTasksIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int idGenerator = 0; static final Map<TestRequest, CountDownLatch> beforeSendLatches = ConcurrentCollections.newConcurrentMap(); @@ -107,8 +106,8 @@ public class CancellableTasksIT extends ParameterizedOpenSearchIntegTestCase { static final Map<TestRequest, CountDownLatch> beforeExecuteLatches = ConcurrentCollections.newConcurrentMap(); static final Map<TestRequest, CountDownLatch> completedLatches = ConcurrentCollections.newConcurrentMap(); - public CancellableTasksIT(Settings dynamicSettings) { - super(dynamicSettings); + public CancellableTasksIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -119,11 +118,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void resetTestStates() { idGenerator = 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index e6fd9139d45f2..8b3c40c43e2d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -15,9 +15,9 @@ import org.opensearch.common.settings.FeatureFlagSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.SearchService; import org.opensearch.tasks.TaskInfo; import org.hamcrest.MatcherAssert; @@ -44,6 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put("thread_pool.index_searcher.size", INDEX_SEARCHER_THREADS) .put("thread_pool.index_searcher.queue_size", INDEX_SEARCHER_THREADS) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) .build(); } @@ -66,7 +67,6 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } - featureSettings.put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true); return featureSettings.build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index a081110e6c5a1..f50e8fd0a38cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -50,6 +50,8 @@ import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.VersionUtils; +import java.util.concurrent.ExecutionException; + import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -130,4 +132,61 @@ public void testCreateCloneIndex() { } + public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = 1; + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = 2; + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + setFailRate(REPOSITORY_NAME, 100); + + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setWaitForActiveShards(0) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get(); + + Thread.sleep(2000); + ensureYellow("target"); + + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } finally { + setFailRate(REPOSITORY_NAME, 0); + ensureGreen(); + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java new file mode 100644 index 0000000000000..85c70e098652c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 2) +public class ViewIT extends ViewTestBase { + + public void testCreateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = randomAlphaOfLength(8); + + logger.info("Testing createView with valid parameters"); + final View view = createView(viewName, indexPattern).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(1)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + + logger.info("Testing createView with existing view name"); + final Exception ex = assertThrows(ViewAlreadyExistsException.class, () -> createView(viewName, randomAlphaOfLength(8))); + MatcherAssert.assertThat(ex.getMessage(), is("View [" + viewName + "] already exists")); + } + + public void testCreateViewTargetsSet() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = "a" + randomAlphaOfLength(8); + final String indexPattern2 = "b" + randomAlphaOfLength(8); + final List<String> targetPatterns = List.of(indexPattern2, indexPattern, indexPattern); + + logger.info("Testing createView with targets that will be reordered and deduplicated"); + final View view = createView(viewName, targetPatterns).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(2)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + MatcherAssert.assertThat(view.getTargets().last().getIndexPattern(), is(indexPattern2)); + } + + public void testGetView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + final View view = getView(viewName).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + + logger.info("Testing getView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> getView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testDeleteView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + logger.info("Testing deleteView with existing view"); + deleteView(viewName); + final Exception whenDeletedEx = assertThrows(ViewNotFoundException.class, () -> getView(viewName)); + MatcherAssert.assertThat(whenDeletedEx.getMessage(), is("View [" + viewName + "] does not exist")); + + logger.info("Testing deleteView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> deleteView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testUpdateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String originalIndexPattern = randomAlphaOfLength(8); + final View originalView = createView(viewName, originalIndexPattern).getView(); + + logger.info("Testing updateView with existing view"); + final String newDescription = randomAlphaOfLength(20); + final String newIndexPattern = "newPattern-" + originalIndexPattern; + final View updatedView = updateView(viewName, newDescription, newIndexPattern).getView(); + + MatcherAssert.assertThat(updatedView, not(is(originalView))); + MatcherAssert.assertThat(updatedView.getDescription(), is(newDescription)); + MatcherAssert.assertThat(updatedView.getTargets(), hasSize(1)); + MatcherAssert.assertThat(updatedView.getTargets().first().getIndexPattern(), is(newIndexPattern)); + + logger.info("Testing updateView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> updateView(nonExistentView, null, "index-*")); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testListViewNames() throws Exception { + logger.info("Testing listViewNames when no views have been created"); + MatcherAssert.assertThat(listViewNames(), is(List.of())); + + final String view1 = "view1"; + final String view2 = "view2"; + createView(view1, "index-1-*"); + createView(view2, "index-2-*"); + + logger.info("Testing listViewNames"); + final List<String> views = listViewNames(); + MatcherAssert.assertThat(views, containsInAnyOrder(view1, view2)); + + logger.info("Testing listViewNames after deleting a view"); + deleteView(view1); + final List<String> viewsAfterDeletion = listViewNames(); + MatcherAssert.assertThat(viewsAfterDeletion, not(contains(view1))); + MatcherAssert.assertThat(viewsAfterDeletion, contains(view2)); + } + + public void testSearchOperations() throws Exception { + final String indexInView1 = "index-1"; + final String indexInView2 = "index-2"; + final String indexNotInView = "another-index-1"; + + final int indexInView1DocCount = createIndexWithDocs(indexInView1); + final int indexInView2DocCount = createIndexWithDocs(indexInView2); + createIndexWithDocs(indexNotInView); + + logger.info("Testing view with no matches"); + createView("no-matches", "this-pattern-will-match-nothing"); + final Exception ex = assertThrows(IndexNotFoundException.class, () -> searchView("no-matches")); + MatcherAssert.assertThat(ex.getMessage(), is("no such index [this-pattern-will-match-nothing]")); + + logger.info("Testing view with exact index match"); + createView("only-index-1", "index-1"); + assertHitCount(searchView("only-index-1"), indexInView1DocCount); + + logger.info("Testing view with wildcard matches"); + createView("both-indices", "index-*"); + assertHitCount(searchView("both-indices"), indexInView1DocCount + indexInView2DocCount); + + logger.info("Testing searchView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> searchView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java new file mode 100644 index 0000000000000..a44ba0cf7c717 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public abstract class ViewTestBase extends OpenSearchIntegTestCase { + + protected int createIndexWithDocs(final String indexName) throws Exception { + createIndex(indexName); + ensureGreen(indexName); + + final int numOfDocs = scaledRandomIntBetween(0, 200); + try (final BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { + waitForDocs(numOfDocs, indexer); + } + + refresh(indexName); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); + return numOfDocs; + } + + protected GetViewAction.Response createView(final String name, final String indexPattern) throws Exception { + return createView(name, List.of(indexPattern)); + } + + protected GetViewAction.Response createView(final String name, final List<String> targets) throws Exception { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + null, + targets.stream().map(CreateViewAction.Request.Target::new).collect(Collectors.toList()) + ); + return client().admin().indices().createView(request).actionGet(); + } + + protected GetViewAction.Response getView(final String name) { + return client().admin().indices().getView(new GetViewAction.Request(name)).actionGet(); + + } + + protected void deleteView(final String name) { + client().admin().indices().deleteView(new DeleteViewAction.Request(name)).actionGet(); + performRemoteStoreTestAction(); + } + + protected List<String> listViewNames() { + return client().listViewNames(new ListViewNamesAction.Request()).actionGet().getViewNames(); + } + + protected SearchResponse searchView(final String viewName) throws Exception { + final SearchViewAction.Request request = new SearchViewAction.Request(viewName, new SearchRequest()); + final SearchResponse response = client().searchView(request).actionGet(); + return response; + } + + protected GetViewAction.Response updateView(final String name, final String description, final String indexPattern) { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + description, + List.of(new CreateViewAction.Request.Target(indexPattern)) + ); + final GetViewAction.Response response = client().admin().indices().updateView(request).actionGet(); + return response; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java index 7bd1467933e00..280f574b1baf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java @@ -74,8 +74,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { - public GetTermVectorsIT(Settings dynamicSettings) { - super(dynamicSettings); + public GetTermVectorsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java index 7c6c47c682281..3fc3235701f17 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java @@ -52,8 +52,8 @@ import static org.hamcrest.Matchers.nullValue; public class MultiTermVectorsIT extends AbstractTermVectorsTestCase { - public MultiTermVectorsIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiTermVectorsIT(Settings staticSettings) { + super(staticSettings); } public void testDuelESLucene() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index f0337e9c0c84c..af5900b1cba6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -97,12 +97,17 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(PrivateCustomPlugin.class); } + @Override + protected boolean useRandomReplicationStrategy() { + return true; + } + @Before public void indexData() throws Exception { index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject()); index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject()); index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject()); - refresh(); + refreshAndWaitForReplication(); } public void testRoutingTable() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 2e0dd579d6910..d6d22c95ee5a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -13,12 +13,14 @@ import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; @@ -715,4 +717,144 @@ public void testClusterHealthResponseWithEnsureNodeWeighedInParam() throws Excep assertFalse(nodeLocalHealth.isTimedOut()); assertTrue(nodeLocalHealth.hasDiscoveredClusterManager()); } + + public void testReadWriteWeightedRoutingMetadataOnNodeRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startClusterManagerOnlyNode(Settings.builder().put(commonSettings).build()); + + logger.info("--> starting 1 nodes on zones 'a' & 'b' & 'c'"); + List<String> nodes_in_zone_a = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List<String> nodes_in_zone_b = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List<String> nodes_in_zone_c = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map<String, Double> weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .setVersion(-1) + .get(); + assertEquals(response.isAcknowledged(), true); + + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().setVersion(0).get(); + assertTrue(deleteResponse.isAcknowledged()); + + // check weighted routing metadata after node restart, ensure node comes healthy after restart + internalCluster().restartNode(nodes_in_zone_a.get(0), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + internalCluster().restartNode(internalCluster().getClusterManagerName(), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 089a91a30dd17..cc8747e5f5666 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -245,8 +245,10 @@ public void testIndexCreateBlockIsRemovedWhenAnyNodesNotExceedHighWatermarkWithA (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, TOTAL_SPACE_BYTES, TOTAL_SPACE_BYTES) ); - // Validate if index create block is removed on the cluster + // Validate if index create block is removed on the cluster. Need to refresh this periodically as well to remove + // the node from high watermark breached list. assertBusy(() -> { + clusterInfoService.refresh(); ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).get().getState(); assertFalse(state1.blocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id())); }, 30L, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index fb97ae59aae91..5eef7074e1dd6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -245,23 +245,22 @@ public void testIndexCreationOverLimitForDotIndexesFail() { assertFalse(clusterState.getMetadata().hasIndex(".test-index")); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6287") public void testCreateIndexWithMaxClusterShardSetting() { - int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - setMaxShardLimit(dataNodes, shardsPerNodeKey); + int maxAllowedShardsPerNode = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + setMaxShardLimit(maxAllowedShardsPerNode, shardsPerNodeKey); - int maxAllowedShards = dataNodes + 1; - int extraShardCount = maxAllowedShards + 1; + // Always keep + int maxAllowedShardsPerCluster = maxAllowedShardsPerNode * 1000; + int extraShardCount = 1; // Getting total active shards in the cluster. int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); try { - setMaxShardLimit(maxAllowedShards, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); + setMaxShardLimit(maxAllowedShardsPerCluster, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); prepareCreate("test_index_with_cluster_shard_limit").setSettings( Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, extraShardCount).put(SETTING_NUMBER_OF_REPLICAS, 0).build() ).get(); } catch (final IllegalArgumentException ex) { - verifyException(Math.min(maxAllowedShards, dataNodes * dataNodes), currentActiveShards, extraShardCount, ex); + verifyException(maxAllowedShardsPerCluster, currentActiveShards, extraShardCount, ex); } finally { setMaxShardLimit(-1, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java new file mode 100644 index 0000000000000..2b6a5b4ee6867 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.store.ShardAttributes; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.OpenSearchIntegTestCase.client; +import static org.opensearch.test.OpenSearchIntegTestCase.internalCluster; +import static org.opensearch.test.OpenSearchIntegTestCase.resolveIndex; + +public class GatewayRecoveryTestUtils { + + public static DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List<DiscoveryNode> nodes = new LinkedList<>(clusterStateResponse.getState().nodes().getDataNodes().values()); + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public static Map<ShardId, ShardAttributes> prepareRequestMap(String[] indices, int primaryShardCount) { + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = new HashMap<>(); + for (String indexName : indices) { + final Index index = resolveIndex(indexName); + final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get( + client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName) + ); + for (int shardIdNum = 0; shardIdNum < primaryShardCount; shardIdNum++) { + final ShardId shardId = new ShardId(index, shardIdNum); + shardIdShardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + } + } + return shardIdShardAttributesMap; + } + + public static void corruptShard(String nodeName, ShardId shardId) throws IOException, InterruptedException { + for (Path path : internalCluster().getInstance(NodeEnvironment.class, nodeName).availableShardPaths(shardId)) { + final Path indexPath = path.resolve(ShardPath.INDEX_FOLDER_NAME); + if (Files.exists(indexPath)) { // multi data path might only have one path in use + try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) { + for (Path item : stream) { + if (item.getFileName().toString().startsWith("segments_")) { + Files.delete(item); + } + } + } + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 229cd7bffad2f..9da1336642a64 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -36,6 +36,8 @@ import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.stats.IndexStats; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -60,6 +62,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster.RestartCallback; @@ -85,6 +88,9 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.prepareRequestMap; import static org.opensearch.gateway.GatewayService.RECOVER_AFTER_NODES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -734,4 +740,97 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { internalCluster().fullRestart(); ensureGreen("test"); } + + public void testSingleShardFetchUsingBatchAction() { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(searchShardsResponse.getNodes()[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNodeGatewayStartedShardsHappyCase(nodeGatewayStartedShards); + } + + public void testShardFetchMultiNodeMultiIndexesUsingBatchAction() { + // start node + internalCluster().startNode(); + String indexName1 = "test1"; + String indexName2 = "test2"; + int numShards = internalCluster().numDataNodes(); + // assign one primary shard each to the data nodes + prepareIndex(indexName1, numShards); + prepareIndex(indexName2, numShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName1, indexName2 }, numShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); + assertEquals(internalCluster().numDataNodes(), searchShardsResponse.getNodes().length); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + assertEquals(1, clusterSearchShardsGroup.getShards().length); + String nodeId = clusterSearchShardsGroup.getShards()[0].currentNodeId(); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(nodeId) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNodeGatewayStartedShardsHappyCase(nodeGatewayStartedShards); + } + } + + public void testShardFetchCorruptedShardsUsingBatchAction() throws Exception { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + corruptShard(searchShardsResponse.getNodes()[0].getName(), shardId); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + internalCluster().restartNode(searchShardsResponse.getNodes()[0].getName()); + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(getDiscoveryNodes(), shardIdShardAttributesMap) + ); + DiscoveryNode[] discoveryNodes = getDiscoveryNodes(); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(discoveryNodes[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNotNull(nodeGatewayStartedShards.storeException()); + assertNotNull(nodeGatewayStartedShards.allocationId()); + assertTrue(nodeGatewayStartedShards.primary()); + } + + private void assertNodeGatewayStartedShardsHappyCase( + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards + ) { + assertNull(nodeGatewayStartedShards.storeException()); + assertNotNull(nodeGatewayStartedShards.allocationId()); + assertTrue(nodeGatewayStartedShards.primary()); + } + + private void prepareIndex(String indexName, int numberOfPrimaryShards) { + createIndex( + indexName, + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + index(indexName, "type", "1", Collections.emptyMap()); + flush(indexName); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java index bb6e356db188f..369c9f9b1a653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java @@ -39,9 +39,8 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -51,11 +50,11 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; -public class IndexSortIT extends ParameterizedOpenSearchIntegTestCase { +public class IndexSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final XContentBuilder TEST_MAPPING = createTestMapping(); - public IndexSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public IndexSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static XContentBuilder createTestMapping() { try { return jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java index 2d28578dbebcc..df423edeca9c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java @@ -38,10 +38,9 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -55,12 +54,12 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class MatchPhraseQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class MatchPhraseQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX = "test"; - public MatchPhraseQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public MatchPhraseQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java index 6332b1b97426f..a1ff2da249d69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java @@ -44,13 +44,12 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -68,10 +67,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SuggestStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class SuggestStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SuggestStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SuggestStatsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -82,11 +81,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java index 73d6d9aff7b72..8cb54631b593f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java @@ -32,19 +32,23 @@ package org.opensearch.indexing; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.common.settings.Settings; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.indices.InvalidIndexNameException; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Random; @@ -57,7 +61,17 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class IndexActionIT extends OpenSearchIntegTestCase { +public class IndexActionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IndexActionIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + /** * This test tries to simulate load while creating an index and indexing documents * while the index is being created. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 51dba07a8f9f8..82577eb1501f3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -42,14 +42,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.time.ZoneId; @@ -69,7 +68,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class IndicesRequestCacheIT extends ParameterizedOpenSearchIntegTestCase { +public class IndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public IndicesRequestCacheIT(Settings settings) { super(settings); } @@ -83,8 +82,8 @@ public static Collection<Object[]> parameters() { } @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + protected boolean useRandomReplicationStrategy() { + return true; } // One of the primary purposes of the query cache is to cache aggs results @@ -186,7 +185,7 @@ public void testQueryRewrite() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -256,7 +255,7 @@ public void testQueryRewriteMissingValues() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -322,7 +321,7 @@ public void testQueryRewriteDates() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -395,7 +394,7 @@ public void testQueryRewriteDatesWithNow() throws Exception { .setFlush(true) .get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index-1", "index-2", "index-3"); assertCacheState(client, "index-1", 0, 0); @@ -466,7 +465,7 @@ public void testCanCache() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -560,7 +559,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("index"); @@ -667,7 +666,7 @@ public void testCacheWithInvalidation() throws Exception { assertCacheState(client, "index", 1, 1); // Explicit refresh would invalidate cache - refresh(); + refreshAndWaitForReplication(); // Hit same query again resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); assertSearchResponse(resp); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 0c5780210901d..73e888eea362c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -50,7 +50,6 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.breaker.NoopCircuitBreaker; @@ -63,7 +62,7 @@ import org.opensearch.search.SearchService; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -94,9 +93,9 @@ * Integration tests for InternalCircuitBreakerService */ @ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1) -public class CircuitBreakerServiceIT extends ParameterizedOpenSearchIntegTestCase { - public CircuitBreakerServiceIT(Settings dynamicSettings) { - super(dynamicSettings); +public class CircuitBreakerServiceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CircuitBreakerServiceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -107,11 +106,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index c049c8ed2d4a6..9decd17d95eab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -66,19 +66,16 @@ public void testPrimaryRelocationWhileIndexing() throws Exception { ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); - numAutoGenDocs.incrementAndGet(); - } + Thread indexingThread = new Thread(() -> { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { + IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); } - }; + }); indexingThread.start(); ClusterState initialState = client().admin().cluster().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index e4f1f8717f899..72e680e22ed75 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -523,12 +523,12 @@ public void testRerouteRecovery() throws Exception { logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); - assertBusy(() -> { + assertBusyWithFixedSleepTime(() -> { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); - }); + }, TimeValue.timeValueSeconds(10), TimeValue.timeValueMillis(500)); logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index f485d4e402b41..30edea6551067 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -22,6 +22,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -128,6 +129,7 @@ public void testPerIndexPrimaryAllocation() throws Exception { * ensures the primary shard distribution is balanced. * */ + @TestLogging(reason = "Enable debug logs from cluster and index replication package", value = "org.opensearch.cluster:DEBUG,org.opensearch.indices.replication:DEBUG") public void testSingleIndexShardAllocation() throws Exception { internalCluster().startClusterManagerOnlyNode(); final int maxReplicaCount = 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 641f714d33414..be849452c0f5e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.index.SegmentInfos; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -16,10 +17,11 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationShardStats; @@ -28,12 +30,14 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -175,17 +179,6 @@ private IndexShard getIndexShard(ClusterState state, ShardRouting routing, Strin return getIndexShard(state.nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); } - /** - * Fetch IndexShard by shardId, multiple shards per node allowed. - */ - protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { - final Index index = resolveIndex(indexName); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); - final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid == shardId.id()).findFirst(); - return indexService.getShard(id.get()); - } - /** * Fetch IndexShard, assumes only a single shard per node. */ @@ -242,4 +235,14 @@ protected void assertReplicaCheckpointUpdated(IndexShard primaryShard) throws Ex } }, 30, TimeUnit.SECONDS); } + + /** + * Returns the latest SIS for a shard but does not incref the segments. + */ + protected SegmentInfos getLatestSegmentInfos(IndexShard shard) throws IOException { + final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = shard.getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable<SegmentInfos> closeable = tuple.v1()) { + return closeable.get(); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 5511bc7945d65..87dd48de38d3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -53,12 +53,11 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.common.action.ActionFuture; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; @@ -73,7 +72,6 @@ import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.FileChunkRequest; -import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; import org.opensearch.search.SearchService; @@ -92,6 +90,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -594,6 +594,67 @@ public void testCancellation() throws Exception { assertDocCounts(docCount, primaryNode); } + public void testCancellationDuringGetCheckpointInfo() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO); + } + + public void testCancellationDuringGetSegments() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES); + } + + private void cancelDuringReplicaAction(String actionToblock) throws Exception { + // this test stubs transport calls specific to node-node replication. + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store.", + segmentReplicationWithRemoteEnabled() + ); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final SegmentReplicationTargetService targetService = internalCluster().getInstance( + SegmentReplicationTargetService.class, + replicaNode + ); + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + CountDownLatch startCancellationLatch = new CountDownLatch(1); + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + ); + primaryTransportService.addRequestHandlingBehavior(actionToblock, (handler, request, channel, task) -> { + logger.info("action {}", actionToblock); + try { + startCancellationLatch.countDown(); + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + // index a doc and trigger replication + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // remove the replica and ensure it is cleaned up. + startCancellationLatch.await(); + SegmentReplicationTarget target = targetService.get(replicaShard.shardId()); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertEquals("Replication not closed: " + target.getId(), 0, target.refCount()); + assertEquals("Store has a positive refCount", 0, replicaShard.store().refCount()); + // stop the replica, this will do additional checks on shutDown to ensure the replica and its store are closed properly + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + latch.countDown(); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); @@ -992,32 +1053,31 @@ private void assertAllocationIdsInReplicaShardStats(Set<String> expected, Set<Se public void testScrollCreatedOnReplica() throws Exception { // create the cluster with one primary node containing primary shard and replica node containing replica shard final String primary = internalCluster().startDataOnlyNode(); - createIndex(INDEX_NAME); + prepareCreate( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - // index 10 docs - for (int i = 0; i < 10; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - } + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(0)) + .setSource(jsonBuilder().startObject().field("field", 0).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + assertBusy( () -> assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ) ); - final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); - final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = replicaShard.getLatestSegmentInfosAndCheckpoint(); - final Collection<String> snapshottedSegments; - try (final GatedCloseable<SegmentInfos> closeable = tuple.v1()) { - snapshottedSegments = closeable.get().files(false); - } + // opens a scrolled query before a flush is called. // this is for testing scroll segment consistency between refresh and flush SearchResponse searchResponse = client(replica).prepareSearch() @@ -1031,17 +1091,20 @@ public void testScrollCreatedOnReplica() throws Exception { .setScroll(TimeValue.timeValueDays(1)) .get(); - // force call flush - flush(INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + SegmentInfos latestSegmentInfos = getLatestSegmentInfos(replicaShard); + final Set<String> snapshottedSegments = new HashSet<>(latestSegmentInfos.files(false)); + logger.info("Segments {}", snapshottedSegments); - for (int i = 3; i < 5; i++) { - client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + // index more docs and force merge down to 1 segment + for (int i = 1; i < 5; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); refresh(INDEX_NAME); - if (randomBoolean()) { - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); - flush(INDEX_NAME); - } } + // create new on-disk segments and copy them out. assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), @@ -1049,13 +1112,19 @@ public void testScrollCreatedOnReplica() throws Exception { ); }); + // force merge and flush. client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + // wait for replication to complete assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ); }); + logger.info("Local segments after force merge and commit {}", getLatestSegmentInfos(replicaShard).files(false)); + List<String> filesBeforeClearScroll = List.of(replicaShard.store().directory().listAll()); + assertTrue("Files should be preserved", filesBeforeClearScroll.containsAll(snapshottedSegments)); + // Test stats logger.info("--> Collect all scroll query hits"); long scrollHits = 0; @@ -1064,20 +1133,23 @@ public void testScrollCreatedOnReplica() throws Exception { searchResponse = client(replica).prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueDays(1)).get(); assertAllSuccessful(searchResponse); } while (searchResponse.getHits().getHits().length > 0); - - List<String> currentFiles = List.of(replicaShard.store().directory().listAll()); - assertTrue("Files should be preserved", currentFiles.containsAll(snapshottedSegments)); + assertEquals(1, scrollHits); client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - - assertBusy( - () -> assertFalse( - "Files should be cleaned up post scroll clear request", - List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) - ) + final Set<String> filesAfterClearScroll = Arrays.stream(replicaShard.store().directory().listAll()).collect(Collectors.toSet()); + // there should be no active readers, snapshots, or on-disk commits containing the snapshotted files, check that they have been + // deleted. + Set<String> latestCommitSegments = new HashSet<>(replicaShard.store().readLastCommittedSegmentsInfo().files(false)); + assertEquals( + "Snapshotted files are no longer part of the latest commit", + Collections.emptySet(), + Sets.intersection(latestCommitSegments, snapshottedSegments) + ); + assertEquals( + "All snapshotted files should be deleted", + Collections.emptySet(), + Sets.intersection(filesAfterClearScroll, snapshottedSegments) ); - assertEquals(10, scrollHits); - } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index dbe0b43441f54..a7be63bc61bc2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -219,6 +219,7 @@ public void testPrimaryRelocationWithSegRepFailure() throws Exception { * This test verifies primary recovery behavior with continuous ingestion * */ + @TestLogging(reason = "Enable trace logs from replication and recovery package", value = "org.opensearch.indices.recovery:TRACE,org.opensearch.indices.replication:TRACE") public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { final String primary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java index fb06a97bd51c2..69411b2ff640a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java @@ -87,7 +87,8 @@ public void testCreateShrinkIndexThrowsExceptionWhenReplicasBehind() throws Exce .get() ); assertEquals( - " For index [test] replica shards haven't caught up with primary, please retry after sometime.", + "Replication still in progress for index [test]. Please wait for replication to complete and retry. " + + "Use the _cat/segment_replication/test api to check if the index is up to date (e.g. bytes_behind == 0).", exception.getMessage() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 766471fdc0756..89aef6f0be1a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -268,12 +268,12 @@ public void testMultipleIndices() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String index_2 = "tst-index-2"; List<String> nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); nodes.add(primaryNode); createIndex(INDEX_NAME, index_2); ensureYellowAndNoInitializingShards(INDEX_NAME, index_2); - nodes.add(internalCluster().startNode()); + nodes.add(internalCluster().startDataOnlyNode()); ensureGreen(INDEX_NAME, index_2); final long numDocs = scaledRandomIntBetween(50, 100); @@ -284,6 +284,7 @@ public void testMultipleIndices() throws Exception { refresh(INDEX_NAME, index_2); waitForSearchableDocs(INDEX_NAME, numDocs, nodes); waitForSearchableDocs(index_2, numDocs, nodes); + ensureSearchable(INDEX_NAME, index_2); final IndexShard index_1_primary = getIndexShard(primaryNode, INDEX_NAME); final IndexShard index_2_primary = getIndexShard(primaryNode, index_2); @@ -291,37 +292,39 @@ public void testMultipleIndices() throws Exception { assertTrue(index_1_primary.routingEntry().primary()); assertTrue(index_2_primary.routingEntry().primary()); - // test both indices are returned in the response. - SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() - .indices() - .prepareSegmentReplicationStats() - .execute() - .actionGet(); + assertBusy(() -> { + // test both indices are returned in the response. + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats() + .execute() + .actionGet(); - Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); - assertEquals(2, replicationStats.size()); - List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); - assertEquals(1, replicationPerGroupStats.size()); - SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); - Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(2, replicationStats.size()); + List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); + Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } - replicationPerGroupStats = replicationStats.get(index_2); - assertEquals(1, replicationPerGroupStats.size()); - perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); - replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + replicationPerGroupStats = replicationStats.get(index_2); + assertEquals(1, replicationPerGroupStats.size()); + perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); + replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + }, 30, TimeUnit.SECONDS); // test only single index queried. - segmentReplicationStatsResponse = client().admin() + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() .indices() .prepareSegmentReplicationStats() .setIndices(index_2) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index 547f9e7a8d380..87e5df8c48981 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -287,7 +287,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - assertAcked(client().admin().indices().prepareDelete(indexToDelete)); + assertAcked(client().admin().indices().prepareDelete(indexToDelete).setTimeout("60s")); } catch (final Exception e) { assertException(e, indexToDelete); } @@ -301,7 +301,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - client().admin().indices().prepareClose(indexToClose).get(); + client().admin().indices().prepareClose(indexToClose).setTimeout("60s").get(); } catch (final Exception e) { assertException(e, indexToClose); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 9c96d4861d426..1d5da9370cce3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -57,7 +57,6 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; @@ -85,7 +84,7 @@ import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -108,6 +107,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; @@ -122,7 +122,7 @@ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0) @SuppressCodecs("*") // requires custom completion format -public class IndexStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class IndexStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public IndexStatsIT(Settings settings) { super(settings); } @@ -131,15 +131,11 @@ public IndexStatsIT(Settings settings) { public static Collection<Object[]> parameters() { return Arrays.asList( new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); @@ -181,7 +177,7 @@ public void testFieldDataStats() throws InterruptedException { ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); @@ -305,7 +301,7 @@ public void testClearAllCaches() throws Exception { client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); @@ -673,7 +669,7 @@ public void testSimpleStats() throws Exception { client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - refresh(); + refreshAndWaitForReplication(); NumShards test1 = getNumShards("test1"); long test1ExpectedWrites = 2 * test1.dataCopies; @@ -688,7 +684,13 @@ public void testSimpleStats() throws Exception { assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(0L)); assertThat(stats.getPrimaries().getIndexing().getTotal().isThrottled(), equalTo(false)); assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L)); - assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + + // This assert should not be done on segrep enabled indices because we are asserting Indexing/Write operations count on + // all primary and replica shards. But in case of segrep, Indexing/Write operation don't happen on replica shards. So we can + // ignore this assert check for segrep enabled indices. + if (isSegmentReplicationEnabledForIndex("test1") == false && isSegmentReplicationEnabledForIndex("test2") == false) { + assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + } assertThat(stats.getTotal().getStore(), notNullValue()); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getFlush(), notNullValue()); @@ -831,6 +833,7 @@ public void testMergeStats() { client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); stats = client().admin().indices().prepareStats().setMerge(true).execute().actionGet(); + refreshAndWaitForReplication(); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0L)); } @@ -857,7 +860,7 @@ public void testSegmentsStats() { client().admin().indices().prepareFlush().get(); client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); - client().admin().indices().prepareRefresh().get(); + refreshAndWaitForReplication(); stats = client().admin().indices().prepareStats().setSegments(true).get(); assertThat(stats.getTotal().getSegments(), notNullValue()); @@ -875,7 +878,7 @@ public void testAllFlags() throws Exception { client().prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); Flag[] values = CommonStatsFlags.Flag.values(); for (Flag flag : values) { @@ -1459,6 +1462,7 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .get() .status() ); + refreshAndWaitForReplication(); ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).setTranslog(true).get().getShards()[0]; RemoteSegmentStats remoteSegmentStatsFromIndexStats = shard.getStats().getSegments().getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromIndexStats); diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index e2cedea331412..9481a6116cdbc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -32,6 +32,8 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; @@ -57,6 +59,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -73,7 +76,16 @@ import static org.hamcrest.core.Is.is; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class IngestClientIT extends OpenSearchIntegTestCase { +public class IngestClientIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IngestClientIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 38f1375bc7504..4c949e11459ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -32,13 +32,17 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.NodeService; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -52,12 +56,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class IngestProcessorNotInstalledOnAllNodesIT extends OpenSearchIntegTestCase { +public class IngestProcessorNotInstalledOnAllNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - private final BytesReference pipelineSource; - private volatile boolean installPlugin; - - public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { + public IngestProcessorNotInstalledOnAllNodesIT(Settings settings) throws IOException { + super(settings); pipelineSource = BytesReference.bytes( jsonBuilder().startObject() .startArray("processors") @@ -70,6 +72,14 @@ public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { ); } + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + + private final BytesReference pipelineSource; + private volatile boolean installPlugin; + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return installPlugin ? Arrays.asList(IngestTestPlugin.class) : Collections.emptyList(); diff --git a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java index f77ae80a55276..2f0d4959d217b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java @@ -42,11 +42,10 @@ import org.opensearch.action.get.MultiGetResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -63,10 +62,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SimpleMgetIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleMgetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SimpleMgetIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleMgetIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java new file mode 100644 index 0000000000000..3cc10b0c0b858 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.containsString; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class PluginsServiceIT extends OpenSearchIntegTestCase { + + public void testNodeBootstrapWithCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithRangeCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithInCompatiblePlugin() throws IOException { + // Prepare the plugins directory with an incompatible plugin and attempt to start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + String incompatibleRange = "~" + + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + incompatibleRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + IllegalArgumentException e = assertThrows( + IllegalArgumentException.class, + () -> internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)) + ); + assertThat(e.getMessage(), containsString("Plugin [dummyPlugin] was built for OpenSearch version ")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index d28df90216beb..0752ab7c9d0f1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -44,15 +46,27 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class FullRollingRestartIT extends OpenSearchIntegTestCase { +public class FullRollingRestartIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FullRollingRestartIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { ClusterHealthResponse clusterHealth = requestBuilder.get(); if (clusterHealth.isTimedOut()) { @@ -121,7 +135,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> refreshing and checking data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -154,7 +168,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> stopped two nodes, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -188,7 +202,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> one node left, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java index 30d5af58df545..988aeac7da541 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -55,7 +57,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -74,7 +76,17 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -public class RecoveryWhileUnderLoadIT extends OpenSearchIntegTestCase { +public class RecoveryWhileUnderLoadIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RecoveryWhileUnderLoadIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { @@ -150,7 +162,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -211,7 +223,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -325,7 +337,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception ); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -375,7 +387,7 @@ public void testRecoverWhileRelocating() throws Exception { ensureGreen(TimeValue.timeValueMinutes(5)); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numShards, 10, indexer.getIds()); } @@ -474,10 +486,11 @@ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iterat ); } - private void refreshAndAssert() throws Exception { + private void assertAfterRefreshAndWaitForReplication() throws Exception { assertBusy(() -> { RefreshResponse actionGet = client().admin().indices().prepareRefresh().get(); assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); + waitForReplication(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index 8c69424939b57..8d8aea7fc1f3b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.tests.util.English; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -73,9 +75,9 @@ import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockIndexEventListener; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.Transport; @@ -114,7 +116,17 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class RelocationIT extends OpenSearchIntegTestCase { +public class RelocationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RelocationIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @Override @@ -158,7 +170,7 @@ public void testSimpleRelocationNoIndexing() { } logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); @@ -186,7 +198,7 @@ public void testSimpleRelocationNoIndexing() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -265,7 +277,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refreshAndWaitForReplication("test"); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { @@ -650,7 +662,7 @@ public void testRelocateWhileWaitingForRefresh() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -726,7 +738,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> verifying count"); assertBusy(() -> { - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java index 85f90738b19ce..1f5fbae6e58e9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java @@ -32,21 +32,34 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.get.GetResponse; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.client.Requests.flushRequest; import static org.opensearch.client.Requests.getRequest; import static org.opensearch.client.Requests.indexRequest; -import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public class SimpleRecoveryIT extends OpenSearchIntegTestCase { +public class SimpleRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + @Override public Settings indexSettings() { return Settings.builder().put(super.indexSettings()).put(recoverySettings()).build(); @@ -72,7 +85,7 @@ public void testSimpleRecovery() throws Exception { assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); client().index(indexRequest("test").id("2").source(source("2", "test"), MediaTypeRegistry.JSON)).actionGet(); - RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet(); + RefreshResponse refreshResponse = refreshAndWaitForReplication("test"); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index 5f0922615a557..bf0533143cf91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.admin.cluster.node.stats.NodeStats; @@ -47,6 +49,7 @@ import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -65,7 +68,16 @@ @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) @SuppressCodecs("*") // test relies on exact file extensions -public class TruncatedRecoveryIT extends OpenSearchIntegTestCase { +public class TruncatedRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public TruncatedRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java new file mode 100644 index 0000000000000..5240949ff87b9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class DocRepMigrationTestCase extends MigrationBaseTestCase { + + public void testMixedModeAddDocRep() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + addRemote = false; + internalCluster().startNode(); + String[] allNodes = internalCluster().getNodeNames(); + assertBusy(() -> { assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), allNodes.length); }); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java new file mode 100644 index 0000000000000..88d6f6897ee68 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; + +public class MigrationBaseTestCase extends OpenSearchIntegTestCase { + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + + protected Path segmentRepoPath; + protected Path translogRepoPath; + + boolean addRemote = false; + + protected Settings nodeSettings(int nodeOrdinal) { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (addRemote) { + logger.info("Adding remote store node"); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms") + .build(); + } else { + logger.info("Adding docrep node"); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("discovery.initial_state_timeout", "500ms").build(); + } + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java new file mode 100644 index 0000000000000..a31d203058565 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoteStoreMigrationTestCase extends MigrationBaseTestCase { + public void testMixedModeAddRemoteNodes() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // add remote node in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + // add docrep mode in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + assertBusy(() -> { + assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), internalCluster().getNodeNames().length); + }); + + // add incompatible remote node in remote mixed cluster + Settings.Builder badSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, "REPOSITORY_2_NAME", translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms"); + String badNode = internalCluster().startNode(badSettings); + assertTrue(client.admin().cluster().prepareClusterStats().get().getNodes().size() < internalCluster().getNodeNames().length); + internalCluster().stopRandomNode(settings -> settings.get("node.name").equals(badNode)); + } + + public void testMigrationDirections() { + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + // add remote node in docrep cluster + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "docrep")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "random")); + assertThrows(IllegalArgumentException.class, () -> client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 67316ed0e6e6b..869032a84c2c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -44,6 +44,7 @@ public Settings indexSettings() { .build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index c957f1b338bfe..6de61cf203c60 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -157,10 +157,4 @@ public void testDisconnectsDuringRecovery() { public void testReplicaRecovery() { } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9580") - public void testRerouteRecovery() { - - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index d23e634bb3368..e43ff9a412784 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -9,6 +9,8 @@ package org.opensearch.remotestore; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.bulk.BulkItemResponse; @@ -37,7 +39,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -60,6 +62,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { @@ -84,6 +87,10 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { ); protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, String index) { + return indexData(numberOfIterations, invokeFlush, false, index); + } + + protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, String index) { long totalOperations = 0; long refreshedOrFlushedOperations = 0; long maxSeqNo = -1; @@ -96,6 +103,11 @@ protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlus } else { refresh(index); } + + // skip indexing if last iteration as we dont want to have any data in remote translog + if (emptyTranslog && i == numberOfIterations - 1) { + continue; + } maxSeqNoRefreshedOrFlushed = maxSeqNo; indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED + "-shard-" + shardId, maxSeqNoRefreshedOrFlushed); refreshedOrFlushedOperations = totalOperations; @@ -137,6 +149,18 @@ protected Settings nodeSettings(int nodeOrdinal) { } } + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() + ); + } + public Settings indexSettings() { return defaultIndexSettings(); } @@ -215,10 +239,10 @@ public static Settings buildRemoteStoreNodeAttributes( return buildRemoteStoreNodeAttributes( segmentRepoName, segmentRepoPath, - FsRepository.TYPE, + ReloadableFsRepository.TYPE, translogRepoName, translogRepoPath, - FsRepository.TYPE, + ReloadableFsRepository.TYPE, withRateLimiterAttributes ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index c61e2ec6e4f6c..3f90732f1f13d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -310,10 +310,16 @@ private void validateCurrentMetadata() throws Exception { internalCluster().getClusterManagerName() ); assertBusy(() -> { - ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( - getClusterState().getClusterName().value(), - getClusterState().metadata().clusterUUID() - ).get(); + ClusterMetadataManifest manifest; + try { + manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + } catch (IllegalStateException e) { + // AssertionError helps us use assertBusy and retry validation if failed due to a race condition. + throw new AssertionError("Error while validating latest cluster metadata", e); + } ClusterState clusterState = getClusterState(); Metadata currentMetadata = clusterState.metadata(); assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 7626e3dba6424..94acf2b1dbb27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -17,8 +17,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.indices.IndicesService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -44,7 +50,7 @@ public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { - testRestoreFlow(1, true, randomIntBetween(1, 5)); + testRestoreFlow(1, true, true, randomIntBetween(1, 5)); } /** @@ -52,7 +58,7 @@ public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { - testRestoreFlow(1, false, randomIntBetween(1, 5)); + testRestoreFlow(1, false, true, randomIntBetween(1, 5)); } /** @@ -61,7 +67,7 @@ public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { - testRestoreFlow(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); + testRestoreFlow(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); } /** @@ -70,7 +76,7 @@ public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithCommittedData() throws Exception { - testRestoreFlow(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); + testRestoreFlow(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); } /** @@ -78,7 +84,7 @@ public void testRemoteTranslogRestoreWithCommittedData() throws Exception { * @throws IOException IO Exception. */ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(1, true, true, randomIntBetween(1, 5)); } /** @@ -86,7 +92,7 @@ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Except * @throws IOException IO Exception. */ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(1, false, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(1, false, true, randomIntBetween(1, 5)); } /** @@ -95,7 +101,7 @@ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Excep * @throws IOException IO Exception. */ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); } /** @@ -104,7 +110,7 @@ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception * @throws IOException IO Exception. */ public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); } private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long> indexStats) throws Exception { @@ -122,9 +128,9 @@ private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { + private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) throws Exception { prepareCluster(1, 3, INDEX_NAME, 0, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(REFRESHED_OR_FLUSHED_OPERATIONS)); @@ -135,15 +141,64 @@ private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int sh restoreAndVerify(shardCount, 0, indexStats); } + public void testMultipleWriters() throws Exception { + prepareCluster(1, 2, INDEX_NAME, 1, 1); + Map<String, Long> indexStats = indexData(randomIntBetween(2, 5), true, true, INDEX_NAME); + assertEquals(2, getNumShards(INDEX_NAME).totalNumShards); + + // ensure replica has latest checkpoint + flushAndRefresh(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + + Index indexObj = clusterService().state().metadata().indices().get(INDEX_NAME).getIndex(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName(INDEX_NAME)); + IndexService indexService = indicesService.indexService(indexObj); + IndexShard indexShard = indexService.getShard(0); + RemoteSegmentMetadata remoteSegmentMetadataBeforeFailover = indexShard.getRemoteDirectory().readLatestMetadataFile(); + + // ensure all segments synced to replica + assertBusy( + () -> assertHitCount( + client(primaryNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + assertBusy( + () -> assertHitCount( + client(replicaNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + + String newPrimaryNodeName = replicaNodeName(INDEX_NAME); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureYellow(INDEX_NAME); + + indicesService = internalCluster().getInstance(IndicesService.class, newPrimaryNodeName); + indexService = indicesService.indexService(indexObj); + indexShard = indexService.getShard(0); + IndexShard finalIndexShard = indexShard; + assertBusy(() -> assertTrue(finalIndexShard.isStartedPrimary() && finalIndexShard.isPrimaryMode())); + assertEquals( + finalIndexShard.getLatestSegmentInfosAndCheckpoint().v2().getPrimaryTerm(), + remoteSegmentMetadataBeforeFailover.getPrimaryTerm() + 1 + ); + } + /** * Helper function to test restoring an index having replicas from remote store when all the nodes housing the primary/replica drop. * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { + private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) + throws Exception { prepareCluster(1, 2, INDEX_NAME, 1, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); assertEquals(shardCount * 2, getNumShards(INDEX_NAME).totalNumShards); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(INDEX_NAME))); @@ -391,7 +446,7 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws Excep * @throws IOException IO Exception. */ public void testRTSRestoreDataOnlyInTranslog() throws Exception { - testRestoreFlow(0, true, randomIntBetween(1, 5)); + testRestoreFlow(0, true, false, randomIntBetween(1, 5)); } public void testRateLimitedRemoteDownloads() throws Exception { @@ -425,7 +480,14 @@ public void testRateLimitedRemoteDownloads() throws Exception { settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); - assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); @@ -454,7 +516,14 @@ public void testRateLimitedRemoteDownloads() throws Exception { // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 settings.remove("max_remote_download_bytes_per_sec"); - assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java index 8372135fc55c4..3d8d001b17ddf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -23,8 +23,6 @@ import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.indices.replication.common.ReplicationCollection; -import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.SlowClusterStateProcessing; @@ -33,6 +31,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + /** * This class runs tests with remote store + segRep while blocking file downloads */ @@ -59,22 +59,18 @@ public void testCancelReplicationWhileSyncingSegments() throws Exception { indexSingleDoc(); refresh(INDEX_NAME); waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); - final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); - assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); - ReplicationCollection.ReplicationRef<SegmentReplicationTarget> segmentReplicationTargetReplicationRef = targetService.get( - state.getReplicationId() - ); - final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); - // close the target ref here otherwise it will hold a refcount - segmentReplicationTargetReplicationRef.close(); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_FILES, segmentReplicationTarget.state().getStage()); assertTrue(segmentReplicationTarget.refCount() > 0); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); - assertBusy(() -> { - assertTrue(indexShard.routingEntry().primary()); - assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); - assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); - }); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); unblockNode(REPOSITORY_NAME, replicaNode); cleanupRepo(); } @@ -85,7 +81,6 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { final Set<String> dataNodeNames = internalCluster().getDataNodeNames(); final String replicaNode = getNode(dataNodeNames, false); - final String primaryNode = getNode(dataNodeNames, true); SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); ensureGreen(INDEX_NAME); @@ -94,22 +89,18 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { indexSingleDoc(); refresh(INDEX_NAME); waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); - final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); - assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); - ReplicationCollection.ReplicationRef<SegmentReplicationTarget> segmentReplicationTargetReplicationRef = targetService.get( - state.getReplicationId() - ); - final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); - // close the target ref here otherwise it will hold a refcount - segmentReplicationTargetReplicationRef.close(); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, segmentReplicationTarget.state().getStage()); assertTrue(segmentReplicationTarget.refCount() > 0); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); - assertBusy(() -> { - assertTrue(indexShard.routingEntry().primary()); - assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); - assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); - }); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.get(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); unblockNode(REPOSITORY_NAME, replicaNode); cleanupRepo(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java index 2fbaf4ea5a4d3..42c257eb79eff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java @@ -12,7 +12,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.MockEngineFactoryPlugin; @@ -21,7 +20,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.MockSearchService; import org.opensearch.test.MockHttpTransport; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; @@ -38,7 +37,7 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.apache.logging.log4j.core.util.Throwables.getRootCause; -public class ScriptCacheIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ScriptCacheIT(Settings settings) { super(settings); } @@ -51,11 +50,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java index 18b4625761c51..5a19e2b841c08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java @@ -50,7 +50,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -62,7 +61,7 @@ import org.opensearch.search.lookup.LeafFieldsLookup; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.transport.TransportException; import org.junit.After; @@ -91,7 +90,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchCancellationIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchCancellationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private TimeValue requestCancellationTimeout = TimeValue.timeValueSeconds(1); private TimeValue clusterCancellationTimeout = TimeValue.timeValueMillis(1500); @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ScriptedBlockPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index 52cc797ddd8da..ef7da395d2151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -59,7 +58,7 @@ import static org.opensearch.search.SearchTimeoutIT.ScriptedTimeoutPlugin.SCRIPT_NAME; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchTimeoutIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchTimeoutIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchTimeoutIT(Settings settings) { super(settings); } @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ScriptedTimeoutPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java index 24c72a66da6d0..b7f71b00d802f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -52,10 +51,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchWithRejectionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRejectionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRejectionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRejectionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java index a61102b9db144..7ed3526cabe3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -53,7 +52,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @ClusterScope(scope = SUITE) -public class StressSearchServiceReaperIT extends ParameterizedOpenSearchIntegTestCase { +public class StressSearchServiceReaperIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public StressSearchServiceReaperIT(Settings settings) { super(settings); } @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { // very frequent checks diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 257786c1e9ce5..6059abce53c8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -62,7 +61,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationsIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class AggregationsIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; @@ -71,8 +70,8 @@ public class AggregationsIntegrationIT extends ParameterizedOpenSearchIntegTestC + LARGE_STRING.length() + "] used in the request has exceeded the allowed maximum"; - public AggregationsIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public AggregationsIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -83,11 +82,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("index").setMapping("f", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index 3d3cf1943dfe3..1826dd69cd804 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -37,12 +37,11 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.missing.Missing; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.Arrays; @@ -61,10 +60,10 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.core.IsNull.notNullValue; -public class CombiIT extends ParameterizedOpenSearchIntegTestCase { +public class CombiIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CombiIT(Settings dynamicSettings) { - super(dynamicSettings); + public CombiIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Making sure that if there are multiple aggregations, working on the same field, yet require different * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index 2ffdf5fb32778..302ec3116d187 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; @@ -56,7 +55,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.Sum; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -94,10 +93,10 @@ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like * the growth of dynamic arrays is tested. */ -public class EquivalenceIT extends ParameterizedOpenSearchIntegTestCase { +public class EquivalenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public EquivalenceIT(Settings dynamicSettings) { - super(dynamicSettings); + public EquivalenceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -108,11 +107,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java index 1bc0cb36f5fe3..b650855083eed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java @@ -37,11 +37,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -57,10 +56,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class MetadataIT extends ParameterizedOpenSearchIntegTestCase { +public class MetadataIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MetadataIT(Settings dynamicSettings) { - super(dynamicSettings); + public MetadataIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testMetadataSetOnAggregationResult() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("name", "type=keyword").get()); IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index e6325987d330f..bdd16c7e74dc0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -65,10 +64,10 @@ import static org.hamcrest.Matchers.closeTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MissingValueIT extends ParameterizedOpenSearchIntegTestCase { +public class MissingValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MissingValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public MissingValueIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -79,11 +78,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int maximumNumberOfShards() { return 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java index cd0922606ec99..557ec9a37978d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -75,7 +74,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AdjacencyMatrixIT extends ParameterizedOpenSearchIntegTestCase { +public class AdjacencyMatrixIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numSingleTag1Docs, numSingleTag2Docs, numTag1Docs, numTag2Docs, numMultiTagDocs; static final int MAX_NUM_FILTERS = 3; @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java index 7ab1a44ce220c..9a1efb3336212 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java @@ -36,11 +36,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -53,7 +52,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BooleanTermsIT extends ParameterizedOpenSearchIntegTestCase { +public class BooleanTermsIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "b_value"; private static final String MULTI_VALUED_FIELD_NAME = "b_values"; @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java new file mode 100644 index 0000000000000..5a38ba670f1dc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class CompositeAggIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CompositeAggIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") + ); + waitForRelocation(ClusterHealthStatus.GREEN); + + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); + refresh("idx"); + + waitForRelocation(ClusterHealthStatus.GREEN); + refresh(); + } + + public void testCompositeAggWithNoSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation(new CompositeAggregationBuilder("my_composite", getTestValueSources())) + .get(); + assertSearchResponse(rsp); + } + + public void testCompositeAggWithSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( + new CompositeAggregationBuilder("my_composite", getTestValueSources()).subAggregation( + new MaxAggregationBuilder("max").field("score") + ) + ) + .get(); + assertSearchResponse(rsp); + } + + private List<CompositeValuesSourceBuilder<?>> getTestValueSources() { + final List<CompositeValuesSourceBuilder<?>> sources = new ArrayList<>(); + sources.add(new TermsValuesSourceBuilder("keyword_vs").field("type")); + sources.add(new TermsValuesSourceBuilder("num_vs").field("num")); + return sources; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index ee94e574228df..6a15490cbfe63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -59,7 +58,7 @@ import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -98,7 +97,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateHistogramIT extends ParameterizedOpenSearchIntegTestCase { +public class DateHistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static Map<ZonedDateTime, Map<String, Object>> expectedMultiSortBuckets; @@ -106,8 +105,8 @@ private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - public DateHistogramIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateHistogramIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(String date) { return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } @@ -183,9 +177,9 @@ public void setupSuiteScopeCluster() throws Exception { indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 - indexDoc(3, 23, 6) + indexDoc(3, 23, 6) // date: Mar 23, dates: Mar 23, Apr 24 ) - ); // date: Mar 23, dates: Mar 23, Apr 24 + ); indexRandom(true, builders); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java index d44071e1ef9c5..eea896e01afe1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -38,12 +38,11 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -69,13 +68,13 @@ */ @OpenSearchIntegTestCase.SuiteScopeTestCase @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class DateHistogramOffsetIT extends ParameterizedOpenSearchIntegTestCase { +public class DateHistogramOffsetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); - public DateHistogramOffsetIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateHistogramOffsetIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(String date) { return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java index ae4243019ffb1..f00b601a54b80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.time.ZoneId; @@ -81,10 +80,10 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateRangeIT extends ParameterizedOpenSearchIntegTestCase { +public class DateRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public DateRangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateRangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 1d5f7f93e7410..b62e5f0f7f3b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,12 +70,12 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DiversifiedSamplerIT extends ParameterizedOpenSearchIntegTestCase { +public class DiversifiedSamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; - public DiversifiedSamplerIT(Settings dynamicSettings) { - super(dynamicSettings); + public DiversifiedSamplerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -87,11 +86,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java index 88bb41923e53f..ccb4af8386472 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java @@ -88,8 +88,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class DoubleTermsIT extends AbstractTermsTestCase { - public DoubleTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public DoubleTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java index 7aa98803403e0..2863711d49580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -68,12 +67,12 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FilterIT extends ParameterizedOpenSearchIntegTestCase { +public class FilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs; - public FilterIT(Settings dynamicSettings) { - super(dynamicSettings); + public FilterIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java new file mode 100644 index 0000000000000..e051265d4b3bc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; + +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class FilterRewriteIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + + // simulate segment level match all + private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); + private static final Map<String, Long> expected = new HashMap<>(); + + public FilterRewriteIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx").get()); + expected.clear(); + + final int repeat = randomIntBetween(2, 10); + final Set<Long> longTerms = new HashSet<>(); + + for (int i = 0; i < repeat; i++) { + final List<IndexRequestBuilder> indexRequests = new ArrayList<>(); + + long longTerm; + do { + longTerm = randomInt(repeat * 2); + } while (!longTerms.add(longTerm)); + ZonedDateTime time = ZonedDateTime.of(2024, 1, ((int) longTerm) + 1, 0, 0, 0, 0, ZoneOffset.UTC); + String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); + + final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); + for (int j = 0; j < frequency; j++) { + indexRequests.add( + client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field("date", dateTerm).field("match", true).endObject()) + ); + } + expected.put(dateTerm + "T00:00:00.000Z", (long) frequency); + + indexRandom(true, false, indexRequests); + } + + ensureSearchable(); + } + + public void testMinDocCountOnDateHistogram() throws Exception { + final SearchResponse allResponse = client().prepareSearch("idx") + .setSize(0) + .setQuery(QUERY) + .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto = allResponse.getAggregations().get("histo"); + Map<String, Long> results = new HashMap<>(); + allHisto.getBuckets().forEach(bucket -> results.put(bucket.getKeyAsString(), bucket.getDocCount())); + + for (Map.Entry<String, Long> entry : expected.entrySet()) { + assertEquals(entry.getValue(), results.get(entry.getKey())); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java index b6cf515df78ba..e64877a1d4030 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -72,7 +71,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FiltersIT extends ParameterizedOpenSearchIntegTestCase { +public class FiltersIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs, numTag2Docs, numOtherDocs; @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java index 025bebf8b254d..ed0bd3aad5bab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.InternalAggregation; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; @@ -76,10 +75,10 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoDistanceIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); } @Override @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); source.startArray("location"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java index be31a3afadad0..a4aea6096a6e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java @@ -37,13 +37,12 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -61,12 +60,12 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GlobalIT extends ParameterizedOpenSearchIntegTestCase { +public class GlobalIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; - public GlobalIT(Settings dynamicSettings) { - super(dynamicSettings); + public GlobalIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index 75f57d1cc4c0e..4abd068d6fe37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -56,7 +55,7 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -91,7 +90,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class HistogramIT extends ParameterizedOpenSearchIntegTestCase { +public class HistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; @@ -102,8 +101,8 @@ public class HistogramIT extends ParameterizedOpenSearchIntegTestCase { static long[] valueCounts, valuesCounts; static Map<Long, Map<String, Object>> expectedMultiSortBuckets; - public HistogramIT(Settings dynamicSettings) { - super(dynamicSettings); + public HistogramIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -114,11 +113,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java index 14a3685bd183e..44789ea63f536 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -45,7 +44,7 @@ import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class IpRangeIT extends ParameterizedOpenSearchIntegTestCase { +public class IpRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public IpRangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public IpRangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class DummyScriptPlugin extends MockScriptPlugin { @Override public Map<String, Function<Map<String, Object>, Object>> pluginScripts() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java index c712c97af5c71..4d2da4fa1d14b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java @@ -51,8 +51,8 @@ public class IpTermsIT extends AbstractTermsTestCase { - public IpTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public IpTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index 345cbdae8ef07..49031bfd3fc1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -86,8 +86,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class LongTermsIT extends AbstractTermsTestCase { - public LongTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public LongTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index 90dafc0d57887..781d2acc5e2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -82,8 +82,8 @@ public class MinDocCountIT extends AbstractTermsTestCase { private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); private static int cardinality; - public MinDocCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public MinDocCountIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index ea5a59d89309f..09133f720f9f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -34,8 +34,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class MultiTermsIT extends BaseStringTermsTestCase { - public MultiTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiTermsIT(Settings staticSettings) { + super(staticSettings); } // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 6289cd5e36151..3eb813dcb91ef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.Comparators; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -67,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NaNSortingIT extends ParameterizedOpenSearchIntegTestCase { +public class NaNSortingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private enum SubAggregation { AVG("avg") { @@ -139,8 +138,8 @@ public String sortKey() { public abstract double getValue(Aggregation aggregation); } - public NaNSortingIT(Settings dynamicSettings) { - super(dynamicSettings); + public NaNSortingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -151,11 +150,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("string_value", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index 7af2ac218800d..288d4d2c4e525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -57,7 +56,7 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -92,14 +91,14 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NestedIT extends ParameterizedOpenSearchIntegTestCase { +public class NestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int numParents; private static int[] numChildren; private static SubAggCollectionMode aggCollectionMode; - public NestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public NestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -110,11 +109,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index 5812b7796c33e..50cee4e9ecd92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -79,15 +78,15 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class RangeIT extends ParameterizedOpenSearchIntegTestCase { +public class RangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; static int numDocs; - public RangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public RangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -98,11 +97,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java index 2716db6b7e745..3bf9233d3441d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.ValueCount; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -76,10 +75,10 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ReverseNestedIT extends ParameterizedOpenSearchIntegTestCase { +public class ReverseNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ReverseNestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public ReverseNestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -90,11 +89,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index c7b03d21cb6bb..3decab92acbff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,7 +70,7 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SamplerIT extends ParameterizedOpenSearchIntegTestCase { +public class SamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; @@ -79,8 +78,8 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } - public SamplerIT(Settings dynamicSettings) { - super(dynamicSettings); + public SamplerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index 66d761c56634e..4cab6deb08bb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -77,10 +76,10 @@ * we can make sure that the reduce is properly propagated by checking that empty buckets were created. */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ShardReduceIT extends ParameterizedOpenSearchIntegTestCase { +public class ShardReduceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ShardReduceIT(Settings dynamicSettings) { - super(dynamicSettings); + public ShardReduceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index 7c7cc12888307..66cce21bcf86f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -47,8 +47,8 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { - public ShardSizeTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ShardSizeTermsIT(Settings staticSettings) { + super(staticSettings); } public void testNoShardSizeString() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index e914b87754865..f2e9265fa5cf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -65,7 +64,7 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ScriptHeuristic; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods; import java.io.IOException; @@ -95,14 +94,14 @@ import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SignificantTermsSignificanceScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class SignificantTermsSignificanceScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String INDEX_NAME = "testidx"; static final String TEXT_FIELD = "text"; static final String CLASS_FIELD = "class"; - public SignificantTermsSignificanceScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public SignificantTermsSignificanceScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(TestScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 343cea4b94c87..add6b71cb1753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -38,14 +38,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -67,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class TermsDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; private static final String LONG_FIELD_NAME = "l_value"; @@ -79,8 +78,8 @@ public static String randomExecutionHint() { private static int numRoutingValues; - public TermsDocCountErrorIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsDocCountErrorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java index 5ad913e8c7086..422af15d2881d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java @@ -14,27 +14,27 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.IndexSettings.MINIMUM_REFRESH_INTERVAL; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) -public class TermsFixedDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsFixedDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; - public TermsFixedDocCountErrorIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsFixedDocCountErrorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -45,11 +45,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleAggErrorMultiShard() throws Exception { // size = 1, shard_size = 2 // Shard_1 [A, A, A, A, B, B, C, C, D, D] -> Buckets {"A" : 4, "B" : 2} @@ -71,7 +66,10 @@ public void testSimpleAggErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -89,7 +87,10 @@ public void testSimpleAggErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -127,7 +128,10 @@ public void testSimpleAggErrorSingleShard() throws Exception { assertAcked( prepareCreate("idx_shard_error").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -170,7 +174,10 @@ public void testSliceLevelDocCountErrorSingleShard() throws Exception { assertAcked( prepareCreate("idx_slice_error").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -248,7 +255,10 @@ public void testSliceLevelDocCountErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -288,7 +298,10 @@ public void testSliceLevelDocCountErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 3851b16551795..1cc250c00dba9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.BucketOrder; @@ -44,7 +43,7 @@ import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -61,12 +60,12 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -public class TermsShardMinDocCountIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsShardMinDocCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String index = "someindex"; - public TermsShardMinDocCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsShardMinDocCountIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java index 20caa4fd076fe..79aa4a648310a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java @@ -38,8 +38,8 @@ public class BaseStringTermsTestCase extends AbstractTermsTestCase { protected static final String MULTI_VALUED_FIELD_NAME = "s_values"; protected static Map<String, Map<String, Object>> expectedMultiSortBuckets; - public BaseStringTermsTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public BaseStringTermsTestCase(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index 8c727d280ec52..edf9cd432dda2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -79,8 +79,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class StringTermsIT extends BaseStringTermsTestCase { - public StringTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public StringTermsIT(Settings staticSettings) { + super(staticSettings); } // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index 9ebec21367164..db4ee3571d141 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,10 +70,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class CardinalityIT extends ParameterizedOpenSearchIntegTestCase { +public class CardinalityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CardinalityIT(Settings dynamicSettings) { - super(dynamicSettings); + public CardinalityIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 94756f3fe9f99..8122304ba992c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -38,12 +38,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.BucketOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -54,10 +53,10 @@ import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; -public class CardinalityWithRequestBreakerIT extends ParameterizedOpenSearchIntegTestCase { +public class CardinalityWithRequestBreakerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CardinalityWithRequestBreakerIT(Settings dynamicSettings) { - super(dynamicSettings); + public CardinalityWithRequestBreakerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Test that searches using cardinality aggregations returns all request breaker memory. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java index 3d804b9aa626e..4a2c100690de4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -70,8 +70,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { - public ExtendedStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExtendedStatsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index 78100d1778ecf..ed87fa6d8f5f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -55,8 +55,8 @@ public class GeoCentroidIT extends AbstractGeoTestCase { private static final String aggName = "geoCentroid"; - public GeoCentroidIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoCentroidIT(Settings staticSettings) { + super(staticSettings); } public void testEmptyAggregation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 7ca5130388eea..ae67f0b1c0b66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -72,8 +72,8 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { - public HDRPercentileRanksIT(Settings dynamicSettings) { - super(dynamicSettings); + public HDRPercentileRanksIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java index ec913b3e130f5..ff1cab85c18e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -75,8 +75,8 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { - public HDRPercentilesIT(Settings dynamicSettings) { - super(dynamicSettings); + public HDRPercentilesIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index b8447d682abae..0edba475a6401 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -91,8 +91,8 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { private static double singleValueExactMAD; private static double multiValueExactMAD; - public MedianAbsoluteDeviationIT(Settings dynamicSettings) { - super(dynamicSettings); + public MedianAbsoluteDeviationIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java index ced2358ac3f78..1725aa7847d72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -56,7 +55,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -93,12 +92,12 @@ @ClusterScope(scope = Scope.SUITE) @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ScriptedMetricIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptedMetricIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static long numDocs; - public ScriptedMetricIT(Settings dynamicSettings) { - super(dynamicSettings); + public ScriptedMetricIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java index f957a74eeb9d0..3708e1e6ab21b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java @@ -66,8 +66,8 @@ import static org.hamcrest.Matchers.sameInstance; public class StatsIT extends AbstractNumericTestCase { - public StatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public StatsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java index 382d656448114..b2aa3438b2306 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java @@ -68,8 +68,8 @@ public class SumIT extends AbstractNumericTestCase { - public SumIT(Settings dynamicSettings) { - super(dynamicSettings); + public SumIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 941d3a888db29..4225c027c4d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -72,8 +72,8 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { - public TDigestPercentileRanksIT(Settings dynamicSettings) { - super(dynamicSettings); + public TDigestPercentileRanksIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java index 6457cf9307fa1..974e90fab16e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -74,8 +74,8 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { - public TDigestPercentilesIT(Settings dynamicSettings) { - super(dynamicSettings); + public TDigestPercentilesIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index 10e51079cf389..5d84452998e40 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -42,7 +42,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -70,7 +69,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -111,13 +110,13 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase() -public class TopHitsIT extends ParameterizedOpenSearchIntegTestCase { +public class TopHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TERMS_AGGS_FIELD = "terms"; private static final String SORT_FIELD = "sort"; - public TopHitsIT(Settings dynamicSettings) { - super(dynamicSettings); + public TopHitsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -128,11 +127,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java index 833d1ce3bb4c3..4610281c4b8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java @@ -35,7 +35,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -45,7 +44,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -73,10 +72,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ValueCountIT extends ParameterizedOpenSearchIntegTestCase { +public class ValueCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ValueCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public ValueCountIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -87,11 +86,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java index bec9203384026..48fd06bac285b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AvgBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class AvgBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class AvgBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public AvgBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public AvgBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java index 4c3129eb89e3b..1b22cf2018d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -76,7 +75,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketScriptIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -90,8 +89,8 @@ public class BucketScriptIT extends ParameterizedOpenSearchIntegTestCase { private static int maxNumber; private static long date; - public BucketScriptIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketScriptIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -102,11 +101,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java index a7b28add7373a..7dca1d0d79b1e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -77,7 +76,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSelectorIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketSelectorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -89,8 +88,8 @@ public class BucketSelectorIT extends ParameterizedOpenSearchIntegTestCase { private static int minNumber; private static int maxNumber; - public BucketSelectorIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketSelectorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -101,11 +100,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java index 2e4fd7a412118..ffb607866935b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -48,7 +47,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.time.ZonedDateTime; @@ -75,7 +74,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSortIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX = "bucket-sort-it-data-index"; private static final String INDEX_WITH_GAPS = "bucket-sort-it-data-index-with-gaps"; @@ -85,8 +84,8 @@ public class BucketSortIT extends ParameterizedOpenSearchIntegTestCase { private static final String VALUE_1_FIELD = "value_1"; private static final String VALUE_2_FIELD = "value_2"; - public BucketSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex(INDEX, INDEX_WITH_GAPS); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java index b05ff7b4329cd..8c89c1232ebb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -39,7 +39,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -76,15 +75,15 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateDerivativeIT extends ParameterizedOpenSearchIntegTestCase { +public class DateDerivativeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { // some index names used during these tests private static final String IDX_DST_START = "idx_dst_start"; private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; - public DateDerivativeIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateDerivativeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 41bbffc13658b..f8def40ec003a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -78,7 +77,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DerivativeIT extends ParameterizedOpenSearchIntegTestCase { +public class DerivativeIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -112,11 +111,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 299827e2413d4..1bd04cc13268f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ExtendedStatsBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class ExtendedStatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -80,8 +79,8 @@ public class ExtendedStatsBucketIT extends ParameterizedOpenSearchIntegTestCase static int numValueBuckets; static long[] valueCounts; - public ExtendedStatsBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExtendedStatsBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index dc3b690c7f78f..ea6fcbd6a1560 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -58,7 +57,7 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -80,7 +79,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MaxBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class MaxBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -91,8 +90,8 @@ public class MaxBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public MaxBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public MaxBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -103,11 +102,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index 189bfd9b5b80a..44d12436382f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MinBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class MinBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class MinBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public MinBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public MinBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 8ad3107ac33ac..d35b80b7918fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -43,14 +43,13 @@ import org.opensearch.client.Client; import org.opensearch.common.collect.EvictingQueue; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -77,7 +76,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MovAvgIT extends ParameterizedOpenSearchIntegTestCase { +public class MovAvgIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; private static final String VALUE_FIELD2 = "v_value2"; @@ -133,8 +132,8 @@ public String toString() { } } - public MovAvgIT(Settings dynamicSettings) { - super(dynamicSettings); + public MovAvgIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -145,11 +144,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx").setMapping( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java index 580497715ed6d..29cb334bfcd00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.metrics.Percentile; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class PercentilesBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class PercentilesBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; @@ -80,8 +79,8 @@ public class PercentilesBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public PercentilesBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public PercentilesBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index b4da63802bc50..507bff51f0e39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -38,12 +38,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.collect.EvictingQueue; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SerialDiffIT extends ParameterizedOpenSearchIntegTestCase { +public class SerialDiffIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -98,8 +97,8 @@ public String toString() { } } - public SerialDiffIT(Settings dynamicSettings) { - super(dynamicSettings); + public SerialDiffIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -110,11 +109,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ValuesSourceAggregationBuilder<? extends ValuesSourceAggregationBuilder<?>> randomMetric(String name, String field) { int rand = randomIntBetween(0, 3); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java index 21fdd5e761e77..fbaf799871c8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class StatsBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class StatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; static int numDocs; @@ -76,8 +75,8 @@ public class StatsBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public StatsBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public StatsBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java index d4bd8f21b2a99..a5967124ff921 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SumBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class SumBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class SumBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public SumBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public SumBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 28ada82a1c56b..fb84134120e00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -21,7 +21,6 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -37,7 +36,7 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.hamcrest.MatcherAssert; @@ -61,13 +60,13 @@ import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchBackpressureIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchBackpressureIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); private static final int MOVING_AVERAGE_WINDOW_SIZE = 10; - public SearchBackpressureIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchBackpressureIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { final List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index bd623ccdf2731..ad1ce0582cfb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -42,11 +42,10 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchService; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import java.util.Arrays; @@ -61,10 +60,10 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchRedStateIndexIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchRedStateIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchRedStateIndexIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchRedStateIndexIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; buildRedIndex(numShards); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index a5989b693d332..681f7081fa2dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -39,9 +39,8 @@ import org.opensearch.client.Client; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -54,10 +53,10 @@ * This test basically verifies that search with a single shard active (cause we indexed to it) and other * shards possibly not active at all (cause they haven't allocated) will still work. */ -public class SearchWhileCreatingIndexIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWhileCreatingIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWhileCreatingIndexIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWhileCreatingIndexIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testIndexCausesIndexCreation() throws Exception { searchWhileCreatingIndex(false, 1); // 1 replica in our default... } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java index 6d2ec845afa98..f7b8b0df7dca7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java @@ -40,10 +40,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchWhileRelocatingIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWhileRelocatingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWhileRelocatingIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWhileRelocatingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSearchAndRelocateConcurrentlyRandomReplicas() throws Exception { testSearchAndRelocateConcurrently(randomIntBetween(0, 1)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java index aa82b9d21c7fb..614ec2ebd634a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java @@ -49,14 +49,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.engine.ThrowingLeafReaderWrapper; @@ -71,10 +70,10 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class SearchWithRandomExceptionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRandomExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRandomExceptionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRandomExceptionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(RandomExceptionDirectoryReaderWrapper.TestPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java index 446a0bce58d66..b45b334fc1d1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -45,13 +45,12 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.store.MockFSDirectoryFactory; import org.opensearch.test.store.MockFSIndexStore; @@ -64,10 +63,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -public class SearchWithRandomIOExceptionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRandomIOExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRandomIOExceptionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRandomIOExceptionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java index cbe52abf5279b..0e337822ba0e7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java @@ -44,11 +44,10 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -64,10 +63,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class TransportSearchFailuresIT extends ParameterizedOpenSearchIntegTestCase { +public class TransportSearchFailuresIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public TransportSearchFailuresIT(Settings dynamicSettings) { - super(dynamicSettings); + public TransportSearchFailuresIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int maximumNumberOfReplicas() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index edceb0cbc0d24..a82b6f12755ca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -54,7 +53,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -80,10 +79,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class TransportTwoNodesSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class TransportTwoNodesSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public TransportTwoNodesSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public TransportTwoNodesSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -94,11 +93,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 87f2153eb800f..13b4abb58b4df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -42,7 +42,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -54,7 +53,7 @@ import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -74,10 +73,10 @@ import static org.hamcrest.CoreMatchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2) -public class FetchSubPhasePluginIT extends ParameterizedOpenSearchIntegTestCase { +public class FetchSubPhasePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FetchSubPhasePluginIT(Settings dynamicSettings) { - super(dynamicSettings); + public FetchSubPhasePluginIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 1a730c01e4890..b743c00bf4549 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -58,7 +57,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -88,10 +87,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class InnerHitsIT extends ParameterizedOpenSearchIntegTestCase { +public class InnerHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public InnerHitsIT(Settings dynamicSettings) { - super(dynamicSettings); + public InnerHitsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -102,11 +101,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index 83cedb8c20e1d..a1adc6f99b92a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentHelper; @@ -45,7 +44,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -62,12 +61,14 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.hasKey; -public class MatchedQueriesIT extends ParameterizedOpenSearchIntegTestCase { +public class MatchedQueriesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MatchedQueriesIT(Settings dynamicSettings) { - super(dynamicSettings); + public MatchedQueriesIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); @@ -101,15 +97,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .should(rangeQuery("number").gte(2).queryName("test2")) ) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -119,15 +118,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .setQuery( boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -153,12 +155,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -174,12 +179,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -203,9 +211,11 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -237,13 +247,15 @@ public void testRegExpQuerySupportsName() throws InterruptedException { SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("regex")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -258,15 +270,17 @@ public void testPrefixQuerySupportsName() throws InterruptedException { refresh(); indexRandomForConcurrentSearch("test1"); - SearchResponse searchResponse = client().prepareSearch() + var query = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) - .get(); + .setIncludeNamedQueriesScore(true); + var searchResponse = query.get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("prefix")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -288,8 +302,9 @@ public void testFuzzyQuerySupportsName() throws InterruptedException { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("fuzzy")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -306,13 +321,15 @@ public void testWildcardQuerySupportsName() throws InterruptedException { SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("wildcard")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -334,8 +351,9 @@ public void testSpanFirstQuerySupportsName() throws InterruptedException { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("span")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -369,11 +387,13 @@ public void testMatchedWithShould() throws Exception { assertHitCount(searchResponse, 2L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("dolor")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("elit")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -397,7 +417,10 @@ public void testMatchedWithWrapperQuery() throws Exception { for (QueryBuilder query : queries) { SearchResponse searchResponse = client().prepareSearch().setQuery(query).get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("abc")); + SearchHit hit = searchResponse.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index fe17c3e22d43c..66cbf36137551 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -35,12 +35,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -57,10 +56,10 @@ * Integration test for highlighters registered by a plugin. */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class CustomHighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class CustomHighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CustomHighlighterSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public CustomHighlighterSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomHighlighterPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 2afa911223074..5bfc556bb629e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -49,7 +49,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; @@ -75,7 +74,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -128,13 +127,13 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class HighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class HighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { // TODO as we move analyzers out of the core we need to move some of these into HighlighterWithAnalyzersTests private static final String[] ALL_TYPES = new String[] { "plain", "fvh", "unified" }; - public HighlighterSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public HighlighterSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -145,11 +144,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index f5d1b8234558e..4d398f8ca09cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -59,10 +58,10 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class FieldCapabilitiesIT extends ParameterizedOpenSearchIntegTestCase { +public class FieldCapabilitiesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FieldCapabilitiesIT(Settings dynamicSettings) { - super(dynamicSettings); + public FieldCapabilitiesIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -73,11 +72,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index ed8fe74504f92..906d45ef84b3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; @@ -63,7 +62,7 @@ import org.opensearch.search.lookup.FieldLookup; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -104,10 +103,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class SearchFieldsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchFieldsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchFieldsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchFieldsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 3a6624c2ad2e6..0380b3c7ddb89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -46,7 +46,6 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.FunctionScoreQuery.ScoreMode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -56,7 +55,7 @@ import org.opensearch.search.MultiValueMode; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.time.ZoneOffset; @@ -91,10 +90,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -public class DecayFunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class DecayFunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public DecayFunctionScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public DecayFunctionScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java index 62d0d89c644a5..0573dcfc4863d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -58,7 +57,7 @@ import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -83,10 +82,10 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class ExplainableScriptIT extends ParameterizedOpenSearchIntegTestCase { +public class ExplainableScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ExplainableScriptIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExplainableScriptIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class ExplainableScriptPlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection<ScriptContext<?>> contexts) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index d53f55b98bd23..6956833cf6d62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -61,10 +60,10 @@ /** * Tests for the {@code field_value_factor} function in a function_score query. */ -public class FunctionScoreFieldValueIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScoreFieldValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FunctionScoreFieldValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScoreFieldValueIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testFieldValueFactor() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 3b80d437e95c0..4f267f0059291 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -39,7 +39,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -78,13 +77,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class FunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String TYPE = "type"; static final String INDEX = "index"; - public FunctionScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index a91f53dae04d2..593f844305743 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -40,7 +40,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.query.functionscore.DecayFunction; @@ -52,7 +51,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -71,10 +70,10 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class FunctionScorePluginIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScorePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FunctionScorePluginIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScorePluginIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index bda6284d9535a..5121d5023fd95 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.Operator; @@ -55,7 +54,7 @@ import org.opensearch.search.rescore.QueryRescoreMode; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -84,6 +83,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSecondHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasMatchedQueries; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -91,10 +91,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -public class QueryRescorerIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryRescorerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public QueryRescorerIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryRescorerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -105,11 +105,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testEnforceWindowSize() throws InterruptedException { createIndex("test"); // this @@ -600,7 +595,7 @@ public void testExplain() throws Exception { SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR).queryName("hello-world")) .setRescorer(innerRescoreQuery, 5) .setExplain(true) .get(); @@ -608,7 +603,10 @@ public void testExplain() throws Exception { assertFirstHit(searchResponse, hasId("1")); assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - + final String[] matchedQueries = { "hello-world" }; + assertFirstHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertSecondHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertThirdHit(searchResponse, hasMatchedQueries(matchedQueries)); for (int j = 0; j < 3; j++) { assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 69e30fc879dd8..f1205ba0f1e93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -36,7 +36,6 @@ import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -47,7 +46,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.CoreMatchers; import java.util.Arrays; @@ -76,10 +75,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -public class RandomScoreFunctionIT extends ParameterizedOpenSearchIntegTestCase { +public class RandomScoreFunctionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public RandomScoreFunctionIT(Settings dynamicSettings) { - super(dynamicSettings); + public RandomScoreFunctionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -90,11 +89,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index ba519be04edff..701ff0a94baf2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -56,7 +56,6 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.PolygonBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.Streams; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; @@ -64,7 +63,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.junit.BeforeClass; @@ -99,10 +98,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class GeoFilterIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoFilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoFilterIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoFilterIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java index 85cb087585d31..2010a288427b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java @@ -39,10 +39,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoPolygonIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoPolygonIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoPolygonIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoPolygonIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java index 1f9b6ae434f75..6dbffa019382d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java @@ -41,14 +41,13 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -61,10 +60,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class GeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoShapeIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java index d21d6036c9673..e9115cf7dfbce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -50,7 +49,7 @@ import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -63,10 +62,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class LegacyGeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class LegacyGeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public LegacyGeoShapeIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public LegacyGeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Test that orientation parameter correctly persists across cluster restart */ diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index dc7c4e687c2fa..36fc5de0a5cf7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder; @@ -50,7 +49,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -79,10 +78,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -public class MoreLikeThisIT extends ParameterizedOpenSearchIntegTestCase { +public class MoreLikeThisIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MoreLikeThisIT(Settings dynamicSettings) { - super(dynamicSettings); + public MoreLikeThisIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -93,11 +92,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index b35208941d2a2..9f49b7a27cda4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -37,10 +37,9 @@ import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -52,10 +51,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; -public class MultiSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java index 71f82d7c0b412..a6554271a0bc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -31,11 +30,6 @@ */ public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /* * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested * here as call to indexRandomForMultipleSlices is made and compared with explain output for diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 8eeffcbecb377..19e38da1aed05 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -47,7 +47,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -56,7 +55,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortMode; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -77,10 +76,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class SimpleNestedIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SimpleNestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleNestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index a3432bfe7e3e4..8bea5ef97fbba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -32,12 +32,11 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -67,7 +66,7 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeIT extends ParameterizedOpenSearchIntegTestCase { +public class PitMultiNodeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public PitMultiNodeIT(Settings settings) { super(settings); } @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setupIndex() throws ExecutionException, InterruptedException { createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 6e40c08ed08a1..bc9eeb528b031 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -44,13 +44,12 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -70,10 +69,10 @@ import static org.hamcrest.Matchers.not; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchPreferenceIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchPreferenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchPreferenceIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchPreferenceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java index 82dd6225fda4e..2f608a0cbe06f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; @@ -50,7 +49,7 @@ import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.QueryProfileShardResult; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.core.IsNull; import java.util.ArrayList; @@ -83,7 +82,7 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationProfilerIT extends ParameterizedOpenSearchIntegTestCase { +public class AggregationProfilerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String BUILD_LEAF_COLLECTOR = AggregationTimingType.BUILD_LEAF_COLLECTOR.toString(); private static final String COLLECT = AggregationTimingType.COLLECT.toString(); @@ -166,8 +165,8 @@ public class AggregationProfilerIT extends ParameterizedOpenSearchIntegTestCase private static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; private static final String REASON_AGGREGATION = "aggregation"; - public AggregationProfilerIT(Settings dynamicSettings) { - super(dynamicSettings); + public AggregationProfilerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -178,11 +177,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index ef73438114079..412a94aaf1b3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -42,14 +42,13 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -68,7 +67,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryProfilerIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private final boolean concurrentSearchEnabled; private static final String MAX_PREFIX = "max_"; private static final String MIN_PREFIX = "min_"; @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, * constructs 20-100 random queries and tries to profile them diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java index e3253ea583ac2..b95542382e5fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -62,10 +61,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class ExistsIT extends ParameterizedOpenSearchIntegTestCase { +public class ExistsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ExistsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExistsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -76,11 +75,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java index 457114bac33b8..392f8b036b7a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; @@ -54,7 +53,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -92,10 +91,10 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; -public class MultiMatchQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiMatchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiMatchQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiMatchQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -106,11 +105,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MockKeywordPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 1ca5859f23bca..c43a9c23661ea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -47,7 +46,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import org.junit.BeforeClass; @@ -70,12 +69,12 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class QueryStringIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; - public QueryStringIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryStringIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index 55029712a061c..136ddce152f63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.RangeQueryBuilder; @@ -46,7 +45,7 @@ import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -67,10 +66,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; -public class ScriptScoreQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptScoreQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ScriptScoreQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public ScriptScoreQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -81,11 +80,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 03312c6e1e2f7..a58db51780826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -51,7 +51,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -80,7 +79,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestIssueLogging; import java.io.IOException; @@ -147,10 +146,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class SearchQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -161,11 +160,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockAnalysisPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index d8902238005da..31678d3f018a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -43,7 +43,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -60,7 +59,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.BeforeClass; import java.io.IOException; @@ -95,12 +94,12 @@ /** * Tests for the {@code simple_query_string} query */ -public class SimpleQueryStringIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleQueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; - public SimpleQueryStringIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleQueryStringIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -111,11 +110,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index ae00904f237a5..7dbc61a3ced39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; @@ -50,7 +49,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -71,7 +70,7 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class ScriptQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ScriptQuerySearchIT(Settings settings) { super(settings); } @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index c7a6d18f881c6..55b3cfeef7419 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -40,14 +40,13 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -60,7 +59,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -public class DuelScrollIT extends ParameterizedOpenSearchIntegTestCase { +public class DuelScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public DuelScrollIT(Settings settings) { super(settings); } @@ -73,11 +72,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index b82048ffc924e..35b5a7949b20b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -60,7 +59,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.After; @@ -92,7 +91,7 @@ /** * Tests for scrolling. */ -public class SearchScrollIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchScrollIT(Settings settings) { super(settings); } @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @After public void cleanup() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index 27002b844da1d..38f65c8c2d0da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -39,9 +39,8 @@ import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -58,7 +57,7 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 0) -public class SearchScrollWithFailingNodesIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchScrollWithFailingNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchScrollWithFailingNodesIT(Settings settings) { super(settings); } @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { return 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index b99f66850e9e3..13c510ff21338 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -47,12 +47,11 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -69,7 +68,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SearchAfterIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchAfterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX_NAME = "test"; private static final int NUM_DOCS = 100; @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testsShouldFail() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 7aae41d939cac..1c1587a3be600 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.rest.RestStatus; @@ -52,7 +51,7 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -77,7 +76,7 @@ import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; -public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SimpleSearchIT(Settings settings) { super(settings); @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSearchNullIndex() { expectThrows( NullPointerException.class, @@ -305,7 +299,15 @@ public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Ex .setSize(size) .setTrackTotalHits(true) .get(); - assertHitCount(searchResponse, i); + + // Do not expect an exact match as an optimization introduced by https://issues.apache.org/jira/browse/LUCENE-10620 + // can produce a total hit count > terminated_after, but this only kicks in + // when size = 0 which is when TotalHitCountCollector is used. + if (size == 0) { + assertHitCount(searchResponse, i, max); + } else { + assertHitCount(searchResponse, i); + } assertTrue(searchResponse.isTerminatedEarly()); assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } @@ -319,7 +321,6 @@ public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Ex assertFalse(searchResponse.isTerminatedEarly()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") public void testSimpleTerminateAfterCountSize0() throws Exception { int max = randomIntBetween(3, 29); dotestSimpleTerminateAfterCountWithSize(0, max); @@ -330,6 +331,24 @@ public void testSimpleTerminateAfterCountRandomSize() throws Exception { dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); } + /** + * Special cases when size = 0: + * + * If track_total_hits = true: + * Weight#count optimization can cause totalHits in the response to be up to the total doc count regardless of terminate_after. + * So, we will have to do a range check, not an equality check. + * + * If track_total_hits != true, but set to a value AND terminate_after is set: + * Again, due to the optimization, any count can be returned. + * Up to terminate_after, relation == EQUAL_TO. + * But if track_total_hits_up_to ≥ terminate_after, relation can be EQ _or_ GTE. + * This ambiguity is due to the fact that totalHits == track_total_hits_up_to + * or totalHits > track_total_hits_up_to and SearchPhaseController sets totalHits = track_total_hits_up_to when returning results + * in which case relation = GTE. + * + * @param size + * @throws Exception + */ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -346,6 +365,7 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except refresh(); SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) .setTerminateAfter(10) @@ -356,25 +376,28 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except assertEquals(5, searchResponse.getHits().getTotalHits().value); assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) - .setTerminateAfter(5) - .setSize(size) - .setTrackTotalHitsUpTo(10) - .get(); - assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); - assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + // For size = 0, the following queries terminate early, but hits and relation can vary. + if (size > 0) { + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) - .setTerminateAfter(5) - .setSize(size) - .setTrackTotalHitsUpTo(5) - .get(); - assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); - assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) @@ -383,7 +406,12 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except .setTrackTotalHits(true) .get(); assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); + if (size == 0) { + // Since terminate_after < track_total_hits, we need to do a range check. + assertHitCount(searchResponse, 5, numDocs); + } else { + assertEquals(5, searchResponse.getHits().getTotalHits().value); + } assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); searchResponse = client().prepareSearch("test") @@ -405,12 +433,11 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") - public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize() throws Exception { + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize0() throws Exception { doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); } - public void testSimpleTerminateAfterTrackTotalHitsUpToSize0() throws Exception { + public void testSimpleTerminateAfterTrackTotalHitsUpToSize() throws Exception { doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index 27a56f9d14f08..ea73f9ee1a2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.Scroll; @@ -53,7 +52,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -70,9 +69,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class SearchSliceIT extends ParameterizedOpenSearchIntegTestCase { - public SearchSliceIT(Settings dynamicSettings) { - super(dynamicSettings); +public class SearchSliceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchSliceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -83,11 +82,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { String mapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index 81e948640ee94..e40928f15e8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -47,7 +47,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -63,7 +62,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -109,7 +108,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; -public class FieldSortIT extends ParameterizedOpenSearchIntegTestCase { +public class FieldSortIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { public FieldSortIT(Settings dynamicSettings) { super(dynamicSettings); } @@ -122,11 +121,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() { @@ -203,7 +197,8 @@ public void testIssue8226() throws InterruptedException { public void testIssue6614() throws ExecutionException, InterruptedException { List<IndexRequestBuilder> builders = new ArrayList<>(); boolean strictTimeBasedIndices = randomBoolean(); - final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month + // consider only 15 days of the month to avoid hitting open file limit + final int numIndices = randomIntBetween(2, 15); int docs = 0; for (int i = 0; i < numIndices; i++) { final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx"; @@ -2389,4 +2384,185 @@ public void testLongSortOptimizationCorrectResults() throws InterruptedException } } + public void testSimpleSortsPoints() throws Exception { + final int docs = 100; + + Random random = random(); + assertAcked( + prepareCreate("test").setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("str_value") + .field("type", "keyword") + .endObject() + .startObject("boolean_value") + .field("type", "boolean") + .endObject() + .startObject("byte_value") + .field("type", "byte") + .endObject() + .startObject("short_value") + .field("type", "short") + .endObject() + .startObject("integer_value") + .field("type", "integer") + .endObject() + .startObject("long_value") + .field("type", "long") + .endObject() + .startObject("unsigned_long_value") + .field("type", "unsigned_long") + .endObject() + .startObject("float_value") + .field("type", "float") + .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() + .startObject("double_value") + .field("type", "double") + .endObject() + .endObject() + .endObject() + ) + ); + ensureGreen(); + BigInteger UNSIGNED_LONG_BASE = Numbers.MAX_UNSIGNED_LONG_VALUE.subtract(BigInteger.valueOf(10000 * docs)); + List<IndexRequestBuilder> builders = new ArrayList<>(); + for (int i = 0; i < docs / 2; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) + .field("boolean_value", true) + .field("byte_value", i) + .field("short_value", i) + .field("integer_value", i) + .field("long_value", i) + .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) + .field("float_value", 32 * i) + .field("half_float_value", 16 * i) + .field("double_value", 64 * i) + .endObject() + ); + builders.add(builder); + } + + // We keep half of the docs with numeric values and other half without + for (int i = docs / 2; i < docs; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject().field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })).endObject() + ); + builders.add(builder); + } + + int j = 0; + Collections.shuffle(builders, random); + for (IndexRequestBuilder builder : builders) { + builder.get(); + if ((++j % 25) == 0) { + refresh(); + } + + } + refresh(); + indexRandomForConcurrentSearch("test"); + + final int size = 2; + // HALF_FLOAT + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("half_float_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // FLOAT + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // DOUBLE + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // UNSIGNED_LONG + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.DESC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 766ac6139b24b..492ffce3321e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -39,13 +39,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -66,10 +65,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoDistanceIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 1b8bd9694483d..b6f53936d5939 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -41,11 +41,10 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -65,7 +64,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSortValues; import static org.hamcrest.Matchers.closeTo; -public class GeoDistanceSortBuilderIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceSortBuilderIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public GeoDistanceSortBuilderIT(Settings settings) { super(settings); } @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static final String LOCATION_FIELD = "location"; @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java index ddfbc3cce2be6..cb8b508c4496b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java @@ -40,7 +40,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -49,7 +48,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -75,12 +74,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimpleSortIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DOUBLE_APOSTROPHE = "\u0027\u0027"; - public SimpleSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 76e68781c72ba..ec891045cb510 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -12,14 +12,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.plugin.CustomSortBuilder; import org.opensearch.search.sort.plugin.CustomSortPlugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -27,7 +26,7 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; -public class SortFromPluginIT extends ParameterizedOpenSearchIntegTestCase { +public class SortFromPluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SortFromPluginIT(Settings settings) { super(settings); } @@ -40,11 +39,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomSortPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index a9c4bf841d9a1..4c1e47ef8da99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -38,14 +38,13 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHits; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -57,10 +56,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MetadataFetchingIT extends ParameterizedOpenSearchIntegTestCase { +public class MetadataFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MetadataFetchingIT(Settings dynamicSettings) { - super(dynamicSettings); + public MetadataFetchingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index 805e82dc9850b..294657cedcc5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -36,8 +36,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -47,10 +46,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; -public class SourceFetchingIT extends ParameterizedOpenSearchIntegTestCase { +public class SourceFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SourceFetchingIT(Settings dynamicSettings) { - super(dynamicSettings); + public SourceFetchingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -61,11 +60,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java index f770bd9864850..f8d2955440bc4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesQueryCache; @@ -61,6 +60,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, SEGMENT_SLICE_COUNT) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) .build(); } @@ -74,11 +74,6 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testConcurrentQueryCount() throws Exception { String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index 8fb3c57dd7680..99cb3a4e8ca20 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -45,7 +45,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.plugins.Plugin; @@ -54,7 +53,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -81,10 +80,10 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) -public class SearchStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchStatsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index b342e6d35f0b4..c72e128a88045 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -48,7 +48,6 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; @@ -65,7 +64,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -100,7 +99,7 @@ import static org.hamcrest.Matchers.notNullValue; @SuppressCodecs("*") // requires custom completion format -public class CompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class CompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public CompletionSuggestSearchIT(Settings settings) { super(settings); } @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java index bac3e7fb61683..67523e9fd424a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -53,7 +52,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.search.suggest.completion.context.GeoQueryContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -73,7 +72,7 @@ import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format -public class ContextCompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class ContextCompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ContextCompletionSuggestSearchIT(Settings settings) { super(settings); } @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index bc6e49008bf6b..e0afdbc816f5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -57,7 +56,7 @@ import org.opensearch.search.suggest.phrase.StupidBackoff; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -96,7 +95,7 @@ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that * request, modify again, request again, etc. This makes it very obvious what changes between requests. */ -public class SuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class SuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SuggestSearchIT(Settings settings) { super(settings); } @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { assertAcked(prepareCreate("test").setMapping("text", "type=text")); diff --git a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java index 8c9bff9833462..b89541c647580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java @@ -36,8 +36,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimilarityIT extends ParameterizedOpenSearchIntegTestCase { +public class SimilarityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SimilarityIT(Settings settings) { super(settings); } @@ -62,11 +61,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testCustomBM25Similarity() throws Exception { try { client().admin().indices().prepareDelete("test").execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e79bf1c16b586..73feeb84308ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -92,6 +92,7 @@ public void testDeleteShallowCopySnapshot() throws Exception { } // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9208") public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 9a92ddc81852a..90bb2b501764e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -7,6 +7,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -25,15 +26,21 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.monitor.fs.FsInfo; @@ -47,6 +54,8 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -54,6 +63,7 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -75,10 +85,10 @@ protected Settings.Builder randomRepositorySettings() { return settings; } - private Settings.Builder chunkedRepositorySettings() { + private Settings.Builder chunkedRepositorySettings(long chunkSize) { final Settings.Builder settings = Settings.builder(); settings.put("location", randomRepoPath()).put("compress", randomBoolean()); - settings.put("chunk_size", 2 << 23, ByteSizeUnit.BYTES); + settings.put("chunk_size", chunkSize, ByteSizeUnit.BYTES); return settings; } @@ -184,10 +194,10 @@ public void testSnapshottingSearchableSnapshots() throws Exception { } /** - * Tests a chunked repository scenario for searchable snapshots by creating an index, + * Tests a default 8mib chunked repository scenario for searchable snapshots by creating an index, * taking a snapshot, restoring it as a searchable snapshot index. */ - public void testCreateSearchableSnapshotWithChunks() throws Exception { + public void testCreateSearchableSnapshotWithDefaultChunks() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; @@ -195,7 +205,33 @@ public void testCreateSearchableSnapshotWithChunks() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - Settings.Builder repositorySettings = chunkedRepositorySettings(); + Settings.Builder repositorySettings = chunkedRepositorySettings(2 << 23); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); + createRepositoryWithSettings(repositorySettings, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + deleteIndicesAndEnsureGreen(client, indexName); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + assertDocCount(restoredIndexName, 1000L); + } + + /** + * Tests a small 1000 bytes chunked repository scenario for searchable snapshots by creating an index, + * taking a snapshot, restoring it as a searchable snapshot index. + */ + public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { + final int numReplicasIndex = randomIntBetween(1, 4); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + Settings.Builder repositorySettings = chunkedRepositorySettings(1000); internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); @@ -234,6 +270,62 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() assertDocCount(indexName, 100L); } + public void testSearchableSnapshotAllocationFilterSettings() throws Exception { + final int numShardsIndex = randomIntBetween(3, 6); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numShardsIndex); + createIndexWithDocsAndEnsureGreen(numShardsIndex, 1, 100, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + final Set<String> searchNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) + .filter(DiscoveryNode::isSearchNode) + .map(DiscoveryNode::getId) + .collect(Collectors.toSet()); + + for (int i = searchNodes.size(); i > 2; --i) { + String pickedNode = randomFrom(searchNodes); + searchNodes.remove(pickedNode); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, true); + assertTrue( + client.admin() + .indices() + .prepareUpdateSettings(restoredIndexName) + .setSettings(Settings.builder().put("index.routing.allocation.exclude._id", pickedNode)) + .execute() + .actionGet() + .isAcknowledged() + ); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, false); + assertIndexAssignedToNodeOrNot(indexName, pickedNode, true); + } + } + + private void assertIndexAssignedToNodeOrNot(String index, String node, boolean assigned) { + final ClusterState state = clusterService().state(); + if (assigned) { + assertTrue(state.getRoutingTable().allShards(index).stream().anyMatch(shard -> shard.currentNodeId().equals(node))); + } else { + assertTrue(state.getRoutingTable().allShards(index).stream().noneMatch(shard -> shard.currentNodeId().equals(node))); + } + } + /** * Tests the functionality of remote shard allocation to * ensure it can handle node drops for failover scenarios and the cluster gets back to a healthy state when @@ -341,11 +433,16 @@ public void testDeleteSearchableSnapshotBackingIndex() throws Exception { } private void createIndexWithDocsAndEnsureGreen(int numReplicasIndex, int numOfDocs, String indexName) throws InterruptedException { + createIndexWithDocsAndEnsureGreen(1, numReplicasIndex, numOfDocs, indexName); + } + + private void createIndexWithDocsAndEnsureGreen(int numShardsIndex, int numReplicasIndex, int numOfDocs, String indexName) + throws InterruptedException { createIndex( indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex)) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasIndex) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsIndex) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) .build() ); @@ -722,6 +819,47 @@ public void testDefaultShardPreference() throws Exception { } } + public void testRestoreSearchableSnapshotWithIndexStoreTypeThrowsException() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final int numReplicasIndex1 = randomIntBetween(1, 4); + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(numReplicasIndex1 + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1); + deleteIndicesAndEnsureGreen(client, indexName1); + + internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex1 + 1); + + // set "index.store.type" to "remote_snapshot" in index settings of restore API and assert appropriate exception with error message + // is thrown. + final SnapshotRestoreException error = expectThrows( + SnapshotRestoreException.class, + () -> client.admin() + .cluster() + .prepareRestoreSnapshot(repoName, snapshotName) + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setIndexSettings( + Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + ) + .setWaitForCompletion(true) + .execute() + .actionGet() + ); + assertThat( + error.getMessage(), + containsString( + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ) + ); + } + /** * Asserts the cache folder count to match the number of shards and the number of indices within the cache folder * as provided. @@ -750,4 +888,75 @@ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, i // Verifies if all the shards (primary and replica) have been deleted assertEquals(numCacheFolderCount, searchNodeFileCachePaths.size()); } + + public void testRelocateSearchableSnapshotIndex() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName = "test-idx-1"; + final String restoredIndexName = indexName + "-copy"; + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(1); + createIndexWithDocsAndEnsureGreen(0, 100, indexName); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + deleteIndicesAndEnsureGreen(client, indexName); + + String searchNode1 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + String searchNode2 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + + final Index index = resolveIndex(restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, true); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + + // relocate the shard from node1 to node2 + client.admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(restoredIndexName, 0, searchNode1, searchNode2)) + .execute() + .actionGet(); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertDocCount(restoredIndexName, 100L); + + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, false); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, true); + deleteIndicesAndEnsureGreen(client, restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + } + + private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception { + final Node node = internalCluster().getInstance(Node.class, nodeName); + final ShardId shardId = new ShardId(index, 0); + final ShardPath shardPath = ShardPath.loadFileCachePath(node.getNodeEnvironment(), shardId); + + assertBusy(() -> { + assertTrue( + "shard state path should " + (exists ? "exist" : "not exist"), + Files.exists(shardPath.getShardStatePath()) == exists + ); + assertTrue("shard cache path should " + (exists ? "exist" : "not exist"), Files.exists(shardPath.getDataPath()) == exists); + }, 30, TimeUnit.SECONDS); + + final Path indexDataPath = node.getNodeEnvironment().fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + final Path indexPath = node.getNodeEnvironment().fileCacheNodePath().indicesPath.resolve(index.getUUID()); + assertBusy(() -> { + assertTrue("index path should " + (exists ? "exist" : "not exist"), Files.exists(indexDataPath) == exists); + assertTrue("index cache path should " + (exists ? "exist" : "not exist"), Files.exists(indexPath) == exists); + }, 30, TimeUnit.SECONDS); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java index f50fc691fb232..28b84655a2cc7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -12,7 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -53,7 +53,7 @@ public void testRestrictedSettingsCantBeUpdated() { assertEquals( e.getMessage(), "[system-repo-name] trying to modify an unmodifiable attribute type of system " - + "repository from current value [fs] to new value [mock]" + + "repository from current value [reloadable-fs] to new value [mock]" ); } @@ -65,7 +65,12 @@ public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); assertAcked( - client.admin().cluster().preparePutRepository(systemRepoName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + client.admin() + .cluster() + .preparePutRepository(systemRepoName) + .setType(ReloadableFsRepository.TYPE) + .setSettings(repoSettings) + .get() ); } } diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index 39c22e60f038a..c5a5ce12b238c 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -8,13 +8,11 @@ package org.opensearch; -import org.opensearch.core.index.snapshots.IndexShardSnapshotException; -import org.opensearch.crypto.CryptoRegistryException; - import static org.opensearch.OpenSearchException.OpenSearchExceptionHandle; import static org.opensearch.OpenSearchException.OpenSearchExceptionHandleRegistry.registerExceptionHandle; import static org.opensearch.OpenSearchException.UNKNOWN_VERSION_ADDED; import static org.opensearch.Version.V_2_10_0; +import static org.opensearch.Version.V_2_13_0; import static org.opensearch.Version.V_2_1_0; import static org.opensearch.Version.V_2_4_0; import static org.opensearch.Version.V_2_5_0; @@ -678,7 +676,12 @@ public static void registerExceptions() { ) ); registerExceptionHandle( - new OpenSearchExceptionHandle(IndexShardSnapshotException.class, IndexShardSnapshotException::new, 98, UNKNOWN_VERSION_ADDED) + new OpenSearchExceptionHandle( + org.opensearch.core.index.snapshots.IndexShardSnapshotException.class, + org.opensearch.core.index.snapshots.IndexShardSnapshotException::new, + 98, + UNKNOWN_VERSION_ADDED + ) ); registerExceptionHandle( new OpenSearchExceptionHandle( @@ -1174,7 +1177,30 @@ public static void registerExceptions() { V_2_7_0 ) ); - registerExceptionHandle(new OpenSearchExceptionHandle(CryptoRegistryException.class, CryptoRegistryException::new, 171, V_2_10_0)); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.crypto.CryptoRegistryException.class, + org.opensearch.crypto.CryptoRegistryException::new, + 171, + V_2_10_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewNotFoundException.class, + org.opensearch.action.admin.indices.view.ViewNotFoundException::new, + 172, + V_2_13_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException.class, + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException::new, + 173, + V_2_13_0 + ) + ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.cluster.block.IndexCreateBlockException.class, diff --git a/server/src/main/java/org/opensearch/SpecialPermission.java b/server/src/main/java/org/opensearch/SpecialPermission.java index 8a694d4543f32..8348f0844acc6 100644 --- a/server/src/main/java/org/opensearch/SpecialPermission.java +++ b/server/src/main/java/org/opensearch/SpecialPermission.java @@ -98,6 +98,7 @@ public SpecialPermission(String name, String actions) { /** * Check that the current stack has {@link SpecialPermission} access according to the {@link SecurityManager}. */ + @SuppressWarnings("removal") public static void check() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 46775466aa615..b19bf9590f43b 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -224,6 +224,12 @@ import org.opensearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.opensearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.TransportBulkAction; import org.opensearch.action.bulk.TransportShardBulkAction; @@ -409,6 +415,7 @@ import org.opensearch.rest.action.admin.indices.RestUpgradeAction; import org.opensearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.opensearch.rest.action.admin.indices.RestValidateQueryAction; +import org.opensearch.rest.action.admin.indices.RestViewAction; import org.opensearch.rest.action.cat.AbstractCatAction; import org.opensearch.rest.action.cat.RestAliasAction; import org.opensearch.rest.action.cat.RestAllocationAction; @@ -721,6 +728,14 @@ public <Request extends ActionRequest, Response extends ActionResponse> void reg actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); actions.register(DataStreamsStatsAction.INSTANCE, DataStreamsStatsAction.TransportAction.class); + // Views: + actions.register(CreateViewAction.INSTANCE, CreateViewAction.TransportAction.class); + actions.register(DeleteViewAction.INSTANCE, DeleteViewAction.TransportAction.class); + actions.register(GetViewAction.INSTANCE, GetViewAction.TransportAction.class); + actions.register(UpdateViewAction.INSTANCE, UpdateViewAction.TransportAction.class); + actions.register(ListViewNamesAction.INSTANCE, ListViewNamesAction.TransportAction.class); + actions.register(SearchViewAction.INSTANCE, SearchViewAction.TransportAction.class); + // Persistent tasks: actions.register(StartPersistentTaskAction.INSTANCE, StartPersistentTaskAction.TransportAction.class); actions.register(UpdatePersistentTaskStatusAction.INSTANCE, UpdatePersistentTaskStatusAction.TransportAction.class); @@ -915,6 +930,14 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) { registerHandler.accept(new RestResolveIndexAction()); registerHandler.accept(new RestDataStreamsStatsAction()); + // View API + registerHandler.accept(new RestViewAction.CreateViewHandler()); + registerHandler.accept(new RestViewAction.DeleteViewHandler()); + registerHandler.accept(new RestViewAction.GetViewHandler()); + registerHandler.accept(new RestViewAction.UpdateViewHandler()); + registerHandler.accept(new RestViewAction.SearchViewHandler()); + registerHandler.accept(new RestViewAction.ListViewNamesHandler()); + // CAT API registerHandler.accept(new RestAllocationAction()); registerHandler.accept(new RestCatSegmentReplicationAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 8293a5bb27612..8562a7eb37709 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -229,8 +229,7 @@ public NodeStats(StreamInput in) throws IOException { } else { repositoriesStats = null; } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { admissionControlStats = in.readOptionalWriteable(AdmissionControlStats::new); } else { admissionControlStats = null; @@ -504,8 +503,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index e62c83490d810..ab6451382aa88 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -55,6 +55,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResult; import org.opensearch.tasks.TaskResultsService; import org.opensearch.threadpool.ThreadPool; @@ -84,6 +85,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques private final Client client; private final NamedXContentRegistry xContentRegistry; + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject public TransportGetTaskAction( ThreadPool threadPool, @@ -91,7 +94,8 @@ public TransportGetTaskAction( ActionFilters actionFilters, ClusterService clusterService, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + TaskResourceTrackingService taskResourceTrackingService ) { super(GetTaskAction.NAME, transportService, actionFilters, GetTaskRequest::new); this.threadPool = threadPool; @@ -99,6 +103,7 @@ public TransportGetTaskAction( this.transportService = transportService; this.client = new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN); this.xContentRegistry = xContentRegistry; + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -173,6 +178,7 @@ public void onFailure(Exception e) { } }); } else { + taskResourceTrackingService.refreshResourceStats(runningTask); TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); listener.onResponse(new GetTaskResponse(new TaskResult(false, info))); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index e5dbefc3dba97..01b4cd779c261 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -594,4 +594,25 @@ public void writeTo(StreamOutput out) throws IOException { } waitForActiveShards.writeTo(out); } + + @Override + public String toString() { + return "CreateIndexRequest{" + + "cause='" + + cause + + '\'' + + ", index='" + + index + + '\'' + + ", settings=" + + settings + + ", mappings='" + + mappings + + '\'' + + ", aliases=" + + aliases + + ", waitForActiveShards=" + + waitForActiveShards + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 14c985f1d3427..9265c6ae60678 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -82,7 +82,7 @@ public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAc "index.number_of_replicas" ); - private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog" }; + private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog", "index.routing.allocation" }; private final MetadataUpdateSettingsService updateSettingsService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 23cd8efdcaf59..ca4c16935c2b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -168,9 +168,11 @@ protected void clusterManagerOperation( .getSegments() .getReplicationStats().maxBytesBehind != 0) { throw new IllegalStateException( - " For index [" + "Replication still in progress for index [" + sourceIndex - + "] replica shards haven't caught up with primary, please retry after sometime." + + "]. Please wait for replication to complete and retry. Use the _cat/segment_replication/" + + sourceIndex + + " api to check if the index is up to date (e.g. bytes_behind == 0)." ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java new file mode 100644 index 0000000000000..9faf25ce10732 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java @@ -0,0 +1,279 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** Action to create a view */ +@ExperimentalApi +public class CreateViewAction extends ActionType<GetViewAction.Response> { + + private static final int MAX_NAME_LENGTH = 64; + private static final int MAX_DESCRIPTION_LENGTH = 256; + private static final int MAX_TARGET_COUNT = 25; + private static final int MAX_TARGET_INDEX_PATTERN_LENGTH = 64; + + public static final CreateViewAction INSTANCE = new CreateViewAction(); + public static final String NAME = "cluster:admin/views/create"; + + private CreateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** + * Request for Creating View + */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + private final String description; + private final List<Target> targets; + + public Request(final String name, final String description, final List<Target> targets) { + this.name = name; + this.description = Objects.requireNonNullElse(description, ""); + this.targets = targets; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.description = in.readString(); + this.targets = in.readList(Target::new); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public List<Target> getTargets() { + return new ArrayList<>(targets); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return name.equals(that.name) && description.equals(that.description) && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, targets); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + if (name != null && name.length() > MAX_NAME_LENGTH) { + validationException = ValidateActions.addValidationError( + "name must be less than " + MAX_NAME_LENGTH + " characters in length", + validationException + ); + } + if (description != null && description.length() > MAX_DESCRIPTION_LENGTH) { + validationException = ValidateActions.addValidationError( + "description must be less than " + MAX_DESCRIPTION_LENGTH + " characters in length", + validationException + ); + } + if (CollectionUtils.isEmpty(targets)) { + validationException = ValidateActions.addValidationError("targets cannot be empty", validationException); + } else { + if (targets.size() > MAX_TARGET_COUNT) { + validationException = ValidateActions.addValidationError( + "view cannot have more than " + MAX_TARGET_COUNT + " targets", + validationException + ); + } + for (final Target target : targets) { + final var validationMessages = Optional.ofNullable(target.validate()) + .map(ValidationException::validationErrors) + .orElse(List.of()); + for (final String validationMessage : validationMessages) { + validationException = ValidateActions.addValidationError(validationMessage, validationException); + } + } + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeString(description); + out.writeList(targets); + } + + /** View target representation for create requests */ + @ExperimentalApi + public static class Target implements Writeable { + public final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = indexPattern; + } + + public Target(final StreamInput in) throws IOException { + this.indexPattern = in.readString(); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(indexPattern)) { + validationException = ValidateActions.addValidationError("index pattern cannot be empty or null", validationException); + } + if (indexPattern != null && indexPattern.length() > MAX_TARGET_INDEX_PATTERN_LENGTH) { + validationException = ValidateActions.addValidationError( + "target index pattern must be less than " + MAX_TARGET_INDEX_PATTERN_LENGTH + " characters in length", + validationException + ); + } + + return validationException; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.Target.INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "create_view_request", + args -> new Request((String) args[0], (String) args[1], (List<Target>) args[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), View.TARGETS_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for creating a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.createView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java new file mode 100644 index 0000000000000..abb3c3f4db5f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to delete a view */ +@SuppressWarnings("deprecation") +@ExperimentalApi +public class DeleteViewAction extends ActionType<AcknowledgedResponse> { + + public static final DeleteViewAction INSTANCE = new DeleteViewAction(); + public static final String NAME = "cluster:admin/views/delete"; + + public DeleteViewAction() { + super(NAME, AcknowledgedResponse::new); + } + + /** Request for delete view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "delete_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for deleting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, AcknowledgedResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected AcknowledgedResponse read(final StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<AcknowledgedResponse> listener + ) throws Exception { + viewService.deleteView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java new file mode 100644 index 0000000000000..762eea965c8c1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java @@ -0,0 +1,214 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to get a view */ +@ExperimentalApi +public class GetViewAction extends ActionType<GetViewAction.Response> { + + public static final GetViewAction INSTANCE = new GetViewAction(); + public static final String NAME = "views:data/read/get"; + + public GetViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for get view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "get_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** Response with a view */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final View view; + + public Response(final View view) { + this.view = view; + } + + public Response(final StreamInput in) throws IOException { + super(in); + this.view = new View(in); + } + + public View getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return getView().equals(that.getView()); + } + + @Override + public int hashCode() { + return Objects.hash(getView()); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + this.view.writeTo(out); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("view", view); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Response, Void> PARSER = new ConstructingObjectParser<>( + "view_response", + args -> new Response((View) args[0]) + ); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), View.PARSER, new ParseField("view")); + } + + public static Response fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected Response read(final StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void clusterManagerOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) + throws Exception { + viewService.getView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java new file mode 100644 index 0000000000000..eac0b1d5558ca --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** Action to list a view names */ +@ExperimentalApi +public class ListViewNamesAction extends ActionType<ListViewNamesAction.Response> { + + public static final ListViewNamesAction INSTANCE = new ListViewNamesAction(); + public static final String NAME = "views:data/read/list"; + + public ListViewNamesAction() { + super(NAME, ListViewNamesAction.Response::new); + } + + /** Request for list view names */ + @ExperimentalApi + public static class Request extends ActionRequest { + public Request() {} + + public Request(final StreamInput in) {} + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return true; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + /** Response for list view names */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final List<String> views; + + public Response(final List<String> views) { + this.views = views; + } + + public Response(final StreamInput in) throws IOException { + views = in.readStringList(); + } + + public List<String> getViewNames() { + return views; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return views.equals(that.views); + } + + @Override + public int hashCode() { + return Objects.hash(views); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeStringCollection(views); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("views", views); + builder.endObject(); + return builder; + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends HandledTransportAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener<Response> listener) { + viewService.listViewNames(listener); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java new file mode 100644 index 0000000000000..1e20221242f06 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Function; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** Action to create a view */ +@ExperimentalApi +public class SearchViewAction extends ActionType<SearchResponse> { + + public static final SearchViewAction INSTANCE = new SearchViewAction(); + public static final String NAME = "views:data/read/search"; + + private SearchViewAction() { + super(NAME, SearchResponse::new); + } + + /** + * Wraps the functionality of search requests and tailors for what is available + * when searching through views + */ + @ExperimentalApi + public static class Request extends SearchRequest { + + private final String view; + + public Request(final String view, final SearchRequest searchRequest) { + super(searchRequest); + this.view = view; + } + + public Request(final StreamInput in) throws IOException { + super(in); + view = in.readString(); + } + + public String getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return view.equals(that.view) && super.equals(that); + } + + @Override + public int hashCode() { + return Objects.hash(view, super.hashCode()); + } + + @Override + public ActionRequestValidationException validate() { + final Function<String, String> unsupported = (String x) -> x + " is not supported when searching views"; + ActionRequestValidationException validationException = super.validate(); + + if (scroll() != null) { + validationException = addValidationError(unsupported.apply("Scroll"), validationException); + } + + // TODO: Filter out any additional search features that are not supported. + // Required before removing @ExperimentalApi annotations. + + if (Strings.isNullOrEmpty(view)) { + validationException = addValidationError("View is required", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(view); + } + + @Override + public String toString() { + return super.toString().replace("SearchRequest{", "SearchViewAction.Request{view=" + view + ","); + } + } + + /** + * Transport Action for searching a View + */ + public static class TransportAction extends HandledTransportAction<Request, SearchResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(final Task task, final Request request, final ActionListener<SearchResponse> listener) { + viewService.searchView(request, listener); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java new file mode 100644 index 0000000000000..9182684c73a0b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** Action to update a view */ +@ExperimentalApi +public class UpdateViewAction extends ActionType<GetViewAction.Response> { + + public static final UpdateViewAction INSTANCE = new UpdateViewAction(); + public static final String NAME = "cluster:admin/views/update"; + + public UpdateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for update view */ + @ExperimentalApi + public static class Request { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<CreateViewAction.Request, String> PARSER = new ConstructingObjectParser<>( + "create_view_request", + false, + (args, viewName) -> new CreateViewAction.Request(viewName, (String) args[0], (List<CreateViewAction.Request.Target>) args[1]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> CreateViewAction.Request.Target.fromXContent(p), + View.TARGETS_FIELD + ); + } + + public static CreateViewAction.Request fromXContent(final XContentParser parser, final String viewName) throws IOException { + return PARSER.parse(parser, viewName); + } + } + + /** + * Transport Action for updating a View + */ + @ExperimentalApi + public static class TransportAction extends TransportClusterManagerNodeAction<CreateViewAction.Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super( + NAME, + transportService, + clusterService, + threadPool, + actionFilters, + CreateViewAction.Request::new, + indexNameExpressionResolver + ); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final CreateViewAction.Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.updateView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final CreateViewAction.Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java new file mode 100644 index 0000000000000..90a69158286b4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceAlreadyExistsException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view already exists */ +@ExperimentalApi +public class ViewAlreadyExistsException extends ResourceAlreadyExistsException { + + public ViewAlreadyExistsException(final String viewName) { + super("View [{}] already exists", viewName); + } + + public ViewAlreadyExistsException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java new file mode 100644 index 0000000000000..3a90e6b0bc791 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view is not found */ +@ExperimentalApi +public class ViewNotFoundException extends ResourceNotFoundException { + + public ViewNotFoundException(final String viewName) { + super("View [{}] does not exist", viewName); + } + + public ViewNotFoundException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java new file mode 100644 index 0000000000000..294f88decba1f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.ActionListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeSet; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** Service to interact with views, create, retrieve, update, and delete */ +@ExperimentalApi +public class ViewService { + + private final static Logger LOG = LogManager.getLogger(ViewService.class); + private final ClusterService clusterService; + private final NodeClient client; + private final LongSupplier timeProvider; + + public ViewService(final ClusterService clusterService, final NodeClient client, final LongSupplier timeProvider) { + this.clusterService = clusterService; + this.client = client; + this.timeProvider = Optional.ofNullable(timeProvider).orElse(System::currentTimeMillis); + } + + public void createView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final long currentTime = timeProvider.getAsLong(); + + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View view = new View(request.getName(), request.getDescription(), currentTime, currentTime, new TreeSet<>(targets)); + + createOrUpdateView(Operation.CreateView, view, listener); + } + + public void updateView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View originalView = getViewOrThrowException(request.getName()); + + final long currentTime = timeProvider.getAsLong(); + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View updatedView = new View( + request.getName(), + request.getDescription(), + originalView.getCreatedAt(), + currentTime, + new TreeSet<>(targets) + ); + + createOrUpdateView(Operation.UpdateView, updatedView, listener); + } + + public void deleteView(final DeleteViewAction.Request request, final ActionListener<AcknowledgedResponse> listener) { + getViewOrThrowException(request.getName()); + + clusterService.submitStateUpdateTask("delete_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + return new ClusterState.Builder(clusterService.state()).metadata( + Metadata.builder(currentState.metadata()).removeView(request.getName()) + ).build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to delete view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } + + public void getView(final GetViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View view = getViewOrThrowException(request.getName()); + + listener.onResponse(new GetViewAction.Response(view)); + } + + public void listViewNames(final ActionListener<ListViewNamesAction.Response> listener) { + final List<String> viewNames = new ArrayList<>( + Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(Map::keySet) + .orElseThrow() + ); + + listener.onResponse(new ListViewNamesAction.Response(viewNames)); + } + + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + final View view = getViewOrThrowException(request.getView()); + + final String[] indices = view.getTargets().stream().map(View.Target::getIndexPattern).toArray(String[]::new); + request.indices(indices); + + client.executeLocally(SearchAction.INSTANCE, request, listener); + } + + View getViewOrThrowException(final String viewName) { + return Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(views -> views.get(viewName)) + .orElseThrow(() -> new ViewNotFoundException(viewName)); + } + + private enum Operation { + CreateView("create", false), + UpdateView("update", true); + + private final String name; + private final boolean allowOverriding; + + Operation(final String name, final boolean allowOverriding) { + this.name = name; + this.allowOverriding = allowOverriding; + } + } + + private void createOrUpdateView(final Operation operation, final View view, final ActionListener<GetViewAction.Response> listener) { + clusterService.submitStateUpdateTask(operation.name + "_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + if (!operation.allowOverriding && currentState.metadata().views().containsKey(view.getName())) { + throw new ViewAlreadyExistsException(view.getName()); + } + return new ClusterState.Builder(clusterService.state()).metadata(Metadata.builder(currentState.metadata()).put(view)) + .build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to " + operation.name + " view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + final View createdView = newState.getMetadata().views().get(view.getName()); + final GetViewAction.Response response = new GetViewAction.Response(createdView); + listener.onResponse(response); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java new file mode 100644 index 0000000000000..db0556b1bf334 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** View transport handlers. */ +package org.opensearch.action.admin.indices.view; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index b15c69a41972f..9ec41fdca585d 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -583,4 +583,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return "MultiGetRequest{" + + "preference='" + + preference + + '\'' + + ", realtime=" + + realtime + + ", refresh=" + + refresh + + ", items=" + + items + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 5b41c2a13b596..9e1d065c96dd6 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -118,6 +118,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten private final SearchRequestContext searchRequestContext; private SearchPhase currentPhase; + private boolean currentPhaseHasLifecycle; private final List<Releasable> releasables = new ArrayList<>(); @@ -436,12 +437,16 @@ private void onPhaseEnd(SearchRequestContext searchRequestContext) { long tookInNanos = System.nanoTime() - getCurrentPhase().getStartTimeInNanos(); searchRequestContext.updatePhaseTookMap(getCurrentPhase().getName(), TimeUnit.NANOSECONDS.toMillis(tookInNanos)); } - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); + } } private void onPhaseStart(SearchPhase phase) { setCurrentPhase(phase); - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + } } private void onRequestEnd(SearchRequestContext searchRequestContext) { @@ -456,6 +461,7 @@ private void executePhase(SearchPhase phase) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } + onPhaseFailure(phase, "", e); } } @@ -635,6 +641,12 @@ public SearchPhase getCurrentPhase() { private void setCurrentPhase(SearchPhase phase) { currentPhase = phase; + // The WrappingSearchAsyncActionPhase (see please CanMatchPreFilterSearchPhase as one example) is a special case + // of search phase that wraps SearchAsyncActionPhase as SearchPhase. The AbstractSearchAsyncAction manages own + // onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and the wrapping SearchPhase is being abandoned + // (fe, has no onPhaseEnd callbacks called ever). To fix that, we would not send any notifications for this + // phase. + currentPhaseHasLifecycle = ((phase instanceof WrappingSearchAsyncActionPhase) == false); } @Override @@ -714,7 +726,9 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); + } raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index 5f46e0c298de4..5b887b48f696e 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -398,4 +398,16 @@ public boolean shouldCancelChildrenOnCancellation() { } }; } + + @Override + public String toString() { + return "MultiSearchRequest{" + + "maxConcurrentSearchRequests=" + + maxConcurrentSearchRequests + + ", requests=" + + requests + + ", indicesOptions=" + + indicesOptions + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 96cea17ff4972..f738c182c06da 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -359,7 +359,7 @@ boolean isFinalReduce() { * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. */ - long getOrCreateAbsoluteStartMillis() { + public long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index e949c5e0bea29..9dac827e7d518 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -406,6 +406,15 @@ public SearchRequestBuilder setTrackScores(boolean trackScores) { return this; } + /** + * Applies when fetching scores with named queries, and controls if scores will be tracked as well. + * Defaults to {@code false}. + */ + public SearchRequestBuilder setIncludeNamedQueriesScore(boolean includeNamedQueriesScore) { + sourceBuilder().includeNamedQueriesScores(includeNamedQueriesScore); + return this; + } + /** * Indicates if the total hit count for the query should be tracked. Requests will count total hit count accurately * up to 10,000 by default, see {@link #setTrackTotalHitsUpTo(int)} to change this value or set to true/false to always/never diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java index eceac7204b196..383d9b5e82fe2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -22,7 +22,7 @@ * @opensearch.internal */ @InternalApi -class SearchRequestContext { +public class SearchRequestContext { private final SearchRequestOperationsListener searchRequestOperationsListener; private long absoluteStartNanos; private final Map<String, Long> phaseTookMap; @@ -47,7 +47,7 @@ void updatePhaseTookMap(String phaseName, Long tookTime) { this.phaseTookMap.put(phaseName, tookTime); } - Map<String, Long> phaseTookMap() { + public Map<String, Long> phaseTookMap() { return phaseTookMap; } @@ -70,7 +70,7 @@ void setAbsoluteStartNanos(long absoluteStartNanos) { /** * Request start time in nanos */ - long getAbsoluteStartNanos() { + public long getAbsoluteStartNanos() { return absoluteStartNanos; } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 2a09cc084f79f..2acb35af667f0 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -31,21 +31,21 @@ protected SearchRequestOperationsListener(final boolean enabled) { this.enabled = enabled; } - abstract void onPhaseStart(SearchPhaseContext context); + protected abstract void onPhaseStart(SearchPhaseContext context); - abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); + protected abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); - abstract void onPhaseFailure(SearchPhaseContext context); + protected abstract void onPhaseFailure(SearchPhaseContext context); - void onRequestStart(SearchRequestContext searchRequestContext) {} + protected void onRequestStart(SearchRequestContext searchRequestContext) {} - void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} - boolean isEnabled(SearchRequest searchRequest) { + protected boolean isEnabled(SearchRequest searchRequest) { return isEnabled(); } - boolean isEnabled() { + protected boolean isEnabled() { return enabled; } @@ -69,7 +69,7 @@ static final class CompositeListener extends SearchRequestOperationsListener { } @Override - void onPhaseStart(SearchPhaseContext context) { + protected void onPhaseStart(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseStart(context); @@ -80,7 +80,7 @@ void onPhaseStart(SearchPhaseContext context) { } @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseEnd(context, searchRequestContext); @@ -91,7 +91,7 @@ void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestCo } @Override - void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseFailure(context); @@ -102,7 +102,7 @@ void onPhaseFailure(SearchPhaseContext context) { } @Override - void onRequestStart(SearchRequestContext searchRequestContext) { + protected void onRequestStart(SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onRequestStart(searchRequestContext); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index 7f25f9026f215..74e04d976cb1c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -134,19 +134,19 @@ public SearchRequestSlowLog(ClusterService clusterService) { } @Override - void onPhaseStart(SearchPhaseContext context) {} + protected void onPhaseStart(SearchPhaseContext context) {} @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context) {} @Override - void onRequestStart(SearchRequestContext searchRequestContext) {} + protected void onRequestStart(SearchRequestContext searchRequestContext) {} @Override - void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 88d599a0dcdaa..ac32b08afb7f6 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -58,12 +58,12 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { } @Override - void onPhaseStart(SearchPhaseContext context) { + protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); } @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); phaseStats.current.dec(); phaseStats.total.inc(); @@ -71,7 +71,7 @@ void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestCo } @Override - void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 842c10b700d24..3d1a25a8aa01f 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -1220,8 +1220,8 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction timeProvider, clusterState, task, - (iter) -> { - AbstractSearchAsyncAction<? extends SearchPhaseResult> action = searchAsyncAction( + (iter) -> new WrappingSearchAsyncActionPhase( + searchAsyncAction( task, searchRequest, executor, @@ -1237,14 +1237,8 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction threadPool, clusters, searchRequestContext - ); - return new SearchPhase(action.getName()) { - @Override - public void run() { - action.start(); - } - }; - }, + ) + ), clusters, searchRequestContext ); diff --git a/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java new file mode 100644 index 0000000000000..3c1ad52a1fe6a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.search.SearchPhaseResult; + +/** + * The WrappingSearchAsyncActionPhase (see please {@link CanMatchPreFilterSearchPhase} as one example) is a special case + * of search phase that wraps SearchAsyncActionPhase as {@link SearchPhase}. The {@link AbstractSearchAsyncAction} manages own + * onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and but just wrapping it with the SearchPhase causes + * only some callbacks being called. The {@link AbstractSearchAsyncAction} has special treatment of {@link WrappingSearchAsyncActionPhase}. + */ +class WrappingSearchAsyncActionPhase extends SearchPhase { + private final AbstractSearchAsyncAction<? extends SearchPhaseResult> action; + + protected WrappingSearchAsyncActionPhase(AbstractSearchAsyncAction<? extends SearchPhaseResult> action) { + super(action.getName()); + this.action = action; + } + + @Override + public void run() { + action.start(); + } + + SearchPhase getSearchPhase() { + return action; + } +} diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index e43c42446de2c..485dd43a5999c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -718,6 +718,7 @@ public final BootstrapCheckResult check(BootstrapContext context) { return BootstrapCheckResult.success(); } + @SuppressWarnings("removal") boolean isAllPermissionGranted() { final SecurityManager sm = System.getSecurityManager(); assert sm != null; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java index 0aa965ce46096..52dd5d710eedc 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java @@ -128,6 +128,7 @@ public Object remove(Object key) { /** * Returns a read-only view of all system properties */ + @SuppressWarnings("removal") public static Dictionary<Object, Object> getSystemProperties() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 4d36efff0e192..8eb4f841b9671 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -83,6 +83,7 @@ class OpenSearch extends EnvironmentAwareCommand { /** * Main entry point for starting opensearch */ + @SuppressWarnings("removal") public static void main(final String[] args) throws Exception { overrideDnsCachePolicyProperties(); /* diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java index 14435db64274c..4571eb35ca93c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java @@ -53,6 +53,7 @@ * * @opensearch.internal **/ +@SuppressWarnings("removal") final class OpenSearchPolicy extends Policy { /** template policy file, the one used in tests */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java index 2b28260097ce1..5f9a01436b4cb 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java @@ -98,6 +98,7 @@ void onNonFatalUncaught(final String threadName, final Throwable t) { Terminal.DEFAULT.flush(); } + @SuppressWarnings("removal") void halt(int status) { AccessController.doPrivileged(new PrivilegedHaltAction(status)); } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 749c146de4f16..a48bbd61016e3 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -119,6 +119,7 @@ * * @opensearch.internal */ +@SuppressWarnings("removal") final class Security { /** no instantiation */ private Security() {} diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index f4ae383249f61..322b435bdf35c 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -34,6 +34,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -466,6 +468,18 @@ public interface Client extends OpenSearchClient, Releasable { */ void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener); + /** Search a view */ + void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener); + + /** Search a view */ + ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request); + + /** List all view names */ + void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener); + + /** List all view names */ + ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request); + /** * Returns this clients settings */ diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 20dab1caa36c4..588584cd8a280 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -125,6 +125,9 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; @@ -838,4 +841,28 @@ public interface IndicesAdminClient extends OpenSearchClient { * Resolves names and wildcard expressions to indices, aliases, and data streams */ ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request); + + /** Create a view */ + void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Create a view */ + ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request); + + /** Get the details of a view */ + void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Get the details of a view */ + ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request); + + /** Delete a view */ + void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener); + + /** Delete a view */ + ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request); + + /** Update a view */ + void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Update a view */ + ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request); } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 786bfa38bb19c..6c6049f04231b 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -312,6 +312,12 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -742,6 +748,26 @@ public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } + @Override + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + execute(SearchViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request) { + return execute(SearchViewAction.INSTANCE, request); + } + + @Override + public void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener) { + execute(ListViewNamesAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request) { + return execute(ListViewNamesAction.INSTANCE, request); + } + static class Admin implements AdminClient { private final ClusterAdmin clusterAdmin; @@ -2070,6 +2096,46 @@ public void resolveIndex(ResolveIndexAction.Request request, ActionListener<Reso public ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request) { return execute(ResolveIndexAction.INSTANCE, request); } + + @Override + public void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(CreateViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request) { + return execute(CreateViewAction.INSTANCE, request); + } + + /** Gets a view */ + public void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(GetViewAction.INSTANCE, request, listener); + } + + /** Gets a view */ + public ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request) { + return execute(GetViewAction.INSTANCE, request); + } + + /** Create a view */ + public void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener) { + execute(DeleteViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request) { + return execute(DeleteViewAction.INSTANCE, request); + } + + /** Create a view */ + public void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(UpdateViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request) { + return execute(UpdateViewAction.INSTANCE, request); + } } @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index bad881f8bda76..d2f4888ae8971 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.DelayedAllocationService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -195,6 +196,7 @@ public static List<Entry> getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom ); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); + registerMetadataCustom(entries, ViewMetadata.TYPE, ViewMetadata::new, ViewMetadata::readDiffFrom); registerMetadataCustom(entries, WeightedRoutingMetadata.TYPE, WeightedRoutingMetadata::new, WeightedRoutingMetadata::readDiffFrom); registerMetadataCustom( entries, @@ -292,6 +294,7 @@ public static List<NamedXContentRegistry.Entry> getNamedXWriteables() { DataStreamMetadata::fromXContent ) ); + entries.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(ViewMetadata.TYPE), ViewMetadata::fromXContent)); entries.add( new NamedXContentRegistry.Entry( Metadata.Custom.class, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index f701a2f52277d..bc365b9872037 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -58,6 +58,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -176,12 +177,13 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - // TODO: We are using one of the existing node to build the repository metadata, this will need to be updated - // once we start supporting mixed compatibility mode. An optimization can be done as this will get invoked + // An optimization can be done as this will get invoked // for every set of node join task which we can optimize to not compute if cluster state already has // repository information. + Optional<DiscoveryNode> remoteDN = currentNodes.getNodes().values().stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + DiscoveryNode dn = remoteDN.orElseGet(() -> (currentNodes.getNodes().values()).stream().findFirst().get()); RepositoriesMetadata repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( - (currentNodes.getNodes().values()).stream().findFirst().get(), + dn, currentState.getMetadata().custom(RepositoriesMetadata.TYPE) ); @@ -212,6 +214,16 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); + + if (remoteDN.isEmpty()) { + // This is hit only on cases where we encounter first remote node + logger.info("Updating system repository now for remote store"); + repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + node, + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + } + nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -495,36 +507,46 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod assert existingNodes.isEmpty() == false; - // TODO: The below check is valid till we don't support migration, once we start supporting migration a remote - // store node will be able to join a non remote store cluster and vice versa. #7986 CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = existingNodes.get(0); if (joiningNode.isRemoteStoreNode()) { + ensureRemoteStoreNodesCompatibility(joiningNode, existingNode); + } else { if (existingNode.isRemoteStoreNode()) { - RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); - RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); - if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { - throw new IllegalStateException( - "a remote store node [" - + joiningNode - + "] is trying to join a remote store cluster with incompatible node attributes in " - + "comparison with existing node [" - + existingNode - + "]" - ); - } - } else { throw new IllegalStateException( - "a remote store node [" + joiningNode + "] is trying to join a non remote store cluster" + "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" ); } - } else { - if (existingNode.isRemoteStoreNode()) { + } + } else { + if (remoteStoreCompatibilityMode == CompatibilityMode.MIXED) { + if (joiningNode.isRemoteStoreNode()) { + Optional<DiscoveryNode> remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); + } + } + } + } + + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNode existingNode) { + if (joiningNode.isRemoteStoreNode()) { + if (existingNode.isRemoteStoreNode()) { + RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); + RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); + if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { throw new IllegalStateException( - "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" ); } + } else { + throw new IllegalStateException("a remote store node [" + joiningNode + "] is trying to join a non remote store cluster"); } } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 1871ed24973c2..59dc86ea28ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -831,6 +831,10 @@ public Map<String, DataStream> dataStreams() { .orElse(Collections.emptyMap()); } + public Map<String, View> views() { + return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -1325,6 +1329,36 @@ public Builder removeDataStream(String name) { return this; } + private Map<String, View> getViews() { + return Optional.ofNullable(customs.get(ViewMetadata.TYPE)) + .map(o -> (ViewMetadata) o) + .map(vmd -> vmd.views()) + .orElse(new HashMap<>()); + } + + public View view(final String viewName) { + return getViews().get(viewName); + } + + public Builder views(final Map<String, View> views) { + this.customs.put(ViewMetadata.TYPE, new ViewMetadata(views)); + return this; + } + + public Builder put(final View view) { + Objects.requireNonNull(view, "view cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.put(view.getName(), view); + return views(replacementViews); + } + + public Builder removeView(final String viewName) { + Objects.requireNonNull(viewName, "viewName cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.remove(viewName); + return views(replacementViews); + } + public Custom getCustom(String type) { return customs.get(type); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 3384393d8feaf..4dde5d0ea013f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -39,6 +39,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -135,6 +136,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; @@ -816,6 +818,16 @@ static Settings aggregateIndexSettings( final Settings.Builder requestSettings = Settings.builder().put(request.settings()); final Settings.Builder indexSettingsBuilder = Settings.builder(); + + // Store type of `remote_snapshot` is intended to be system-managed for searchable snapshot indexes so a special case is needed here + // to prevent a user specifying this value when creating an index + String storeTypeSetting = request.settings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new IllegalArgumentException( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ); + } + if (sourceMetadata == null) { final Settings.Builder additionalIndexSettings = Settings.builder(); final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/View.java b/server/src/main/java/org/opensearch/cluster/metadata/View.java new file mode 100644 index 0000000000000..1b1639bbca945 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/View.java @@ -0,0 +1,205 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** View of data in OpenSearch indices */ +@ExperimentalApi +public class View extends AbstractDiffable<View> implements ToXContentObject { + + private final String name; + private final String description; + private final long createdAt; + private final long modifiedAt; + private final SortedSet<Target> targets; + + public View(final String name, final String description, final Long createdAt, final Long modifiedAt, final Set<Target> targets) { + this.name = Objects.requireNonNull(name, "Name must be provided"); + this.description = description; + this.createdAt = createdAt != null ? createdAt : -1; + this.modifiedAt = modifiedAt != null ? modifiedAt : -1; + this.targets = new TreeSet<>(Objects.requireNonNull(targets, "Targets are required on a view")); + } + + public View(final StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readZLong(), in.readZLong(), new TreeSet<>(in.readList(Target::new))); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public long getCreatedAt() { + return createdAt; + } + + public long getModifiedAt() { + return modifiedAt; + } + + public SortedSet<Target> getTargets() { + return new TreeSet<>(targets); + } + + public static Diff<View> readDiffFrom(final StreamInput in) throws IOException { + return readDiffFrom(View::new, in); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + View that = (View) o; + return name.equals(that.name) + && description.equals(that.description) + && createdAt == that.createdAt + && modifiedAt == that.modifiedAt + && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, createdAt, modifiedAt, targets); + } + + /** The source of data used to project the view */ + @ExperimentalApi + public static class Target implements Writeable, ToXContentObject, Comparable<Target> { + + private final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = Objects.requireNonNull(indexPattern, "IndexPattern is required"); + } + + public Target(final StreamInput in) throws IOException { + this(in.readString()); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + public static final ParseField INDEX_PATTERN_FIELD = new ParseField("indexPattern"); + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_PATTERN_FIELD.getPreferredName(), indexPattern); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + @Override + public int compareTo(final Target o) { + if (this == o) return 0; + + final Target other = (Target) o; + return this.indexPattern.compareTo(other.indexPattern); + } + } + + public static final ParseField NAME_FIELD = new ParseField("name"); + public static final ParseField DESCRIPTION_FIELD = new ParseField("description"); + public static final ParseField CREATED_AT_FIELD = new ParseField("createdAt"); + public static final ParseField MODIFIED_AT_FIELD = new ParseField("modifiedAt"); + public static final ParseField TARGETS_FIELD = new ParseField("targets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser<View, Void> PARSER = new ConstructingObjectParser<>( + "view", + args -> new View((String) args[0], (String) args[1], (Long) args[2], (Long) args[3], new TreeSet<>((List<Target>) args[4])) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DESCRIPTION_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, CREATED_AT_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, MODIFIED_AT_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), TARGETS_FIELD); + } + + public static View fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + builder.field(CREATED_AT_FIELD.getPreferredName(), createdAt); + builder.field(MODIFIED_AT_FIELD.getPreferredName(), modifiedAt); + builder.field(TARGETS_FIELD.getPreferredName(), targets); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeZLong(createdAt); + out.writeZLong(modifiedAt); + out.writeList(targets.stream().collect(Collectors.toList())); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java new file mode 100644 index 0000000000000..a89068078be58 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.metadata.ComposableIndexTemplateMetadata.MINIMMAL_SUPPORTED_VERSION; + +/** View metadata */ +@ExperimentalApi +public class ViewMetadata implements Metadata.Custom { + + public static final String TYPE = "view"; + private static final ParseField VIEW_FIELD = new ParseField("view"); + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<ViewMetadata, Void> PARSER = new ConstructingObjectParser<>( + TYPE, + false, + a -> new ViewMetadata((Map<String, View>) a[0]) + ); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> { + Map<String, View> views = new HashMap<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + views.put(p.currentName(), View.fromXContent(p)); + } + return views; + }, VIEW_FIELD); + } + + private final Map<String, View> views; + + public ViewMetadata(final Map<String, View> views) { + this.views = views; + } + + public ViewMetadata(final StreamInput in) throws IOException { + this.views = in.readMap(StreamInput::readString, View::new); + } + + public Map<String, View> views() { + return this.views; + } + + @Override + public Diff<Metadata.Custom> diff(final Metadata.Custom before) { + return new ViewMetadata.ViewMetadataDiff((ViewMetadata) before, this); + } + + public static NamedDiff<Metadata.Custom> readDiffFrom(final StreamInput in) throws IOException { + return new ViewMetadata.ViewMetadataDiff(in); + } + + @Override + public EnumSet<Metadata.XContentContext> context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return MINIMMAL_SUPPORTED_VERSION; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeMap(this.views, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + } + + public static ViewMetadata fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(VIEW_FIELD.getPreferredName()); + for (Map.Entry<String, View> entry : views.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + return builder; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int hashCode() { + return Objects.hash(this.views); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ViewMetadata other = (ViewMetadata) obj; + return Objects.equals(this.views, other.views); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * Builder of view metadata. + */ + @ExperimentalApi + public static class Builder { + + private final Map<String, View> views = new HashMap<>(); + + public Builder putDataStream(final View view) { + views.put(view.getName(), view); + return this; + } + + public ViewMetadata build() { + return new ViewMetadata(views); + } + } + + /** + * A diff between view metadata. + */ + static class ViewMetadataDiff implements NamedDiff<Metadata.Custom> { + + final Diff<Map<String, View>> dataStreamDiff; + + ViewMetadataDiff(ViewMetadata before, ViewMetadata after) { + this.dataStreamDiff = DiffableUtils.diff(before.views, after.views, DiffableUtils.getStringKeySerializer()); + } + + ViewMetadataDiff(StreamInput in) throws IOException { + this.dataStreamDiff = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), View::new, View::readDiffFrom); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new ViewMetadata(dataStreamDiff.apply(((ViewMetadata) part).views)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataStreamDiff.writeTo(out); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index bc24dd22f5c6e..b303c3a2034d5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -26,6 +26,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.Map; +import java.util.Objects; /** * Contains metadata for weighted routing @@ -99,7 +100,7 @@ public static NamedDiff<Metadata.Custom> readDiffFrom(StreamInput in) throws IOE public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws IOException { String attrKey = null; Double attrValue; - String attributeName = null; + String attributeName = ""; Map<String, Double> weights = new HashMap<>(); WeightedRouting weightedRouting; XContentParser.Token token; @@ -162,12 +163,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WeightedRoutingMetadata that = (WeightedRoutingMetadata) o; - return weightedRouting.equals(that.weightedRouting); + return weightedRouting.equals(that.weightedRouting) && version == that.version; } @Override public int hashCode() { - return weightedRouting.hashCode(); + return Objects.hash(weightedRouting.hashCode(), version); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index a406552f854da..938a603c459c9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -729,23 +729,6 @@ assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == + " was matched but wasn't removed"; } - public void swapPrimaryWithReplica( - Logger logger, - ShardRouting primaryShard, - ShardRouting replicaShard, - RoutingChangesObserver changes - ) { - assert primaryShard.primary() : "Invalid primary shard provided"; - assert !replicaShard.primary() : "Invalid Replica shard provided"; - - ShardRouting newPrimary = primaryShard.moveActivePrimaryToReplica(); - ShardRouting newReplica = replicaShard.moveActiveReplicaToPrimary(); - updateAssigned(primaryShard, newPrimary); - updateAssigned(replicaShard, newReplica); - logger.info("Swap relocation performed for shard [{}]", newPrimary.shortSummary()); - changes.replicaPromoted(newPrimary); - } - private void unassignPrimaryAndPromoteActiveReplicaIfExists( ShardRouting failedShard, UnassignedInfo unassignedInfo, diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 2b93a1483b801..468fac08d2946 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -54,6 +54,7 @@ public boolean isSet() { @Override public void writeTo(StreamOutput out) throws IOException { + out.writeString(attributeName); out.writeGenericValue(weights); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index 8a14ce3f1a288..a05938c176678 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -43,6 +43,8 @@ public final class RemoteShardsBalancer extends ShardsBalancer { private final Logger logger; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; + // indicates if there are any nodes being throttled for allocating any unassigned shards + private boolean anyNodesThrottled = false; public RemoteShardsBalancer(Logger logger, RoutingAllocation allocation) { this.logger = logger; @@ -84,24 +86,39 @@ void moveShards() { Queue<RoutingNode> excludedNodes = new ArrayDeque<>(); classifyNodesForShardMovement(eligibleNodes, excludedNodes); - if (excludedNodes.isEmpty()) { - logger.debug("No excluded nodes found. Returning..."); - return; - } - - while (!eligibleNodes.isEmpty() && !excludedNodes.isEmpty()) { - RoutingNode sourceNode = excludedNodes.poll(); - for (ShardRouting ineligibleShard : sourceNode) { - if (ineligibleShard.started() == false) { + // move shards that cannot remain on eligible nodes + final List<ShardRouting> forceMoveShards = new ArrayList<>(); + eligibleNodes.forEach(sourceNode -> { + for (final ShardRouting shardRouting : sourceNode) { + if (ineligibleForMove(shardRouting)) { continue; } - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(ineligibleShard, allocation))) { + if (allocation.deciders().canRemain(shardRouting, sourceNode, allocation) == Decision.NO) { + forceMoveShards.add(shardRouting); + } + } + }); + for (final ShardRouting shard : forceMoveShards) { + if (eligibleNodes.isEmpty()) { + logger.trace("there are no eligible nodes available, return"); + return; + } + + tryShardMovementToEligibleNode(eligibleNodes, shard); + } + + // move shards that are currently assigned on excluded nodes + while (eligibleNodes.isEmpty() == false && excludedNodes.isEmpty() == false) { + RoutingNode sourceNode = excludedNodes.poll(); + for (final ShardRouting ineligibleShard : sourceNode) { + if (ineligibleForMove(ineligibleShard)) { continue; } if (eligibleNodes.isEmpty()) { - break; + logger.trace("there are no eligible nodes available, return"); + return; } tryShardMovementToEligibleNode(eligibleNodes, ineligibleShard); @@ -109,6 +126,10 @@ void moveShards() { } } + private boolean ineligibleForMove(ShardRouting shard) { + return shard.started() == false || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false; + } + /** * Classifies the nodes into eligible and excluded depending on whether node is able or unable for shard assignment * @param eligibleNodes contains the list of classified nodes eligible to accept shards @@ -145,10 +166,23 @@ private void classifyNodesForShardMovement(Queue<RoutingNode> eligibleNodes, Que * @param shard the ineligible shard to be moved */ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, ShardRouting shard) { - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!eligibleNodes.isEmpty()) { - RoutingNode targetNode = eligibleNodes.poll(); - Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); + final Set<String> nodesCheckedForShard = new HashSet<>(); + int numNodesToCheck = eligibleNodes.size(); + while (eligibleNodes.isEmpty() == false) { + assert numNodesToCheck > 0; + final RoutingNode targetNode = eligibleNodes.poll(); + --numNodesToCheck; + // skip the node that the target shard is currently allocated on + if (targetNode.nodeId().equals(shard.currentNodeId())) { + assert nodesCheckedForShard.add(targetNode.nodeId()); + eligibleNodes.offer(targetNode); + if (numNodesToCheck == 0) { + return; + } + continue; + } + + final Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); if (currentShardDecision.type() == Decision.Type.YES) { if (logger.isDebugEnabled()) { @@ -166,7 +200,7 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh allocation.changes() ); eligibleNodes.offer(targetNode); - break; + return; } else { if (logger.isTraceEnabled()) { logger.trace( @@ -177,18 +211,19 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh ); } - Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); + final Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); if (nodeLevelDecision.type() == Decision.Type.YES) { logger.debug("Node: [{}] can still accept shards. Adding it back to the queue.", targetNode.nodeId()); eligibleNodes.offer(targetNode); - nodesCheckedForShard.add(targetNode.nodeId()); + assert nodesCheckedForShard.add(targetNode.nodeId()); } else { logger.debug("Node: [{}] cannot accept any more shards. Removing it from queue.", targetNode.nodeId()); } - // Break out if all nodes in the queue have been checked for this shard - if (eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { - break; + // Break out if all eligible nodes have been examined + if (numNodesToCheck == 0) { + assert eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId())); + return; } } } @@ -225,7 +260,7 @@ void balance() { } } - while (!sourceNodes.isEmpty() && !targetNodes.isEmpty()) { + while (sourceNodes.isEmpty() == false && targetNodes.isEmpty() == false) { RoutingNode sourceNode = sourceNodes.poll(); tryRebalanceNode(sourceNode, targetNodes, avgPrimaryPerNode, nodePrimaryShardCount); } @@ -275,11 +310,11 @@ public Map<String, UnassignedIndexShards> groupUnassignedShardsByIndex() { HashMap<String, UnassignedIndexShards> unassignedShardMap = new HashMap<>(); for (ShardRouting shard : routingNodes.unassigned().drain()) { String index = shard.getIndexName(); - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { routingNodes.unassigned().add(shard); continue; } - if (!unassignedShardMap.containsKey(index)) { + if (unassignedShardMap.containsKey(index) == false) { unassignedShardMap.put(index, new UnassignedIndexShards()); } unassignedShardMap.get(index).addShard(shard); @@ -296,13 +331,15 @@ private void unassignIgnoredRemoteShards(RoutingAllocation routingAllocation) { RoutingNodes.UnassignedShards unassignedShards = routingAllocation.routingNodes().unassigned(); for (ShardRouting shard : unassignedShards.drainIgnored()) { RoutingPool pool = RoutingPool.getShardPool(shard, routingAllocation); - if (pool == RoutingPool.REMOTE_CAPABLE && shard.unassigned() && (shard.primary() || !shard.unassignedInfo().isDelayed())) { + if (pool == RoutingPool.REMOTE_CAPABLE + && shard.unassigned() + && (shard.primary() || shard.unassignedInfo().isDelayed() == false)) { ShardRouting unassignedShard = shard; // Shard when moved to an unassigned state updates the recovery source to be ExistingStoreRecoverySource // Remote shards do not have an existing store to recover from and can be recovered from an empty source // to re-fetch any shard blocks from the repository. if (shard.primary()) { - if (!RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType())) { + if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false) { unassignedShard = shard.updateUnassigned(shard.unassignedInfo(), RecoverySource.EmptyStoreRecoverySource.INSTANCE); } } @@ -323,12 +360,16 @@ private void allocateUnassignedReplicas(Queue<RoutingNode> nodeQueue, Map<String } private void ignoreRemainingShards(Map<String, UnassignedIndexShards> unassignedShardMap) { + // If any nodes are throttled during allocation, mark all remaining unassigned shards as THROTTLED + final UnassignedInfo.AllocationStatus status = anyNodesThrottled + ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED + : UnassignedInfo.AllocationStatus.DECIDERS_NO; for (UnassignedIndexShards indexShards : unassignedShardMap.values()) { for (ShardRouting shard : indexShards.getPrimaries()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } for (ShardRouting shard : indexShards.getReplicas()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } } } @@ -353,7 +394,7 @@ private void allocateUnassignedShards( } logger.debug("Allocating shards for index: [{}]", index); - while (!shardsToAllocate.isEmpty() && !nodeQueue.isEmpty()) { + while (shardsToAllocate.isEmpty() == false && nodeQueue.isEmpty() == false) { ShardRouting shard = shardsToAllocate.poll(); if (shard.assignedToNode()) { if (logger.isDebugEnabled()) { @@ -389,11 +430,11 @@ private void allocateUnassignedShards( private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouting shard) { boolean allocated = false; boolean throttled = false; - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!nodeQueue.isEmpty()) { + int numNodesToCheck = nodeQueue.size(); + while (nodeQueue.isEmpty() == false) { RoutingNode node = nodeQueue.poll(); + --numNodesToCheck; Decision allocateDecision = allocation.deciders().canAllocate(shard, node, allocation); - nodesCheckedForShard.add(node.nodeId()); if (allocateDecision.type() == Decision.Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shardShortSummary(shard), node.nodeId()); @@ -432,6 +473,10 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } nodeQueue.offer(node); } else { + if (nodeLevelDecision.type() == Decision.Type.THROTTLE) { + anyNodesThrottled = true; + } + if (logger.isTraceEnabled()) { logger.trace( "Cannot allocate any shard to node: [{}]. Removing from queue. Node level decisions: [{}],[{}]", @@ -443,14 +488,14 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } // Break out if all nodes in the queue have been checked for this shard - if (nodeQueue.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { + if (numNodesToCheck == 0) { break; } } } - if (!allocated) { - UnassignedInfo.AllocationStatus status = throttled + if (allocated == false) { + UnassignedInfo.AllocationStatus status = (throttled || anyNodesThrottled) ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED : UnassignedInfo.AllocationStatus.DECIDERS_NO; routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); @@ -470,14 +515,16 @@ private void tryRebalanceNode( // Try to relocate the valid shards on the sourceNode, one at a time; // until either sourceNode is balanced OR no more active primary shard available OR all the target nodes are exhausted - while (shardsToBalance > 0 && shardIterator.hasNext() && !targetNodes.isEmpty()) { + while (shardsToBalance > 0 && shardIterator.hasNext() && targetNodes.isEmpty() == false) { // Find an active primary shard to relocate ShardRouting shard = shardIterator.next(); - if (!shard.started() || !shard.primary() || !RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (shard.started() == false + || shard.primary() == false + || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { continue; } - while (!targetNodes.isEmpty()) { + while (targetNodes.isEmpty() == false) { // Find a valid target node that can accommodate the current shard relocation RoutingNode targetNode = targetNodes.poll(); if (primaryCount.get(targetNode.nodeId()) >= avgPrimary) { @@ -485,6 +532,10 @@ private void tryRebalanceNode( continue; } + if (targetNode.getByShardId(shard.shardId()) != null) { + continue; + } + // Try relocate the shard on the target node Decision rebalanceDecision = tryRelocateShard(shard, targetNode); @@ -522,21 +573,10 @@ private void tryRebalanceNode( } /** - * For every primary shard for which this method is invoked, - * swap is attempted with the destination node in case replica shard is present. - * In case replica is not present, relocation of the shard id performed. + * For every primary shard for which this method is invoked, relocation of the shard id performed. */ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNode) { - // Check if there is already a replica for the shard on the destination node. - // Then we can directly swap the replica with the primary shards. - // Invariant: We only allow swap relocation on remote shards. - ShardRouting replicaShard = destinationNode.getByShardId(shard.shardId()); - if (replicaShard != null) { - assert !replicaShard.primary() : "Primary Shard found while expected Replica during shard rebalance"; - return executeSwapShard(shard, replicaShard, allocation); - } - - // Since no replica present on the destinationNode; try relocating the shard to the destination node + assert destinationNode.getByShardId(shard.shardId()) == null; Decision allocationDecision = allocation.deciders().canAllocate(shard, destinationNode, allocation); Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation); logger.trace( @@ -566,15 +606,6 @@ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNod return Decision.NO; } - private Decision executeSwapShard(ShardRouting primaryShard, ShardRouting replicaShard, RoutingAllocation allocation) { - if (!replicaShard.started()) { - return new Decision.Single(Decision.Type.NO); - } - - allocation.routingNodes().swapPrimaryWithReplica(logger, primaryShard, replicaShard, allocation.changes()); - return new Decision.Single(Decision.Type.YES); - } - private void failUnattemptedShards() { RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { diff --git a/server/src/main/java/org/opensearch/common/cache/CachePolicyInfoWrapper.java b/server/src/main/java/org/opensearch/common/cache/CachePolicyInfoWrapper.java new file mode 100644 index 0000000000000..ada1e7442747a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/CachePolicyInfoWrapper.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * A class containing information needed for all CacheTierPolicy objects to decide whether to admit + * a given BytesReference. This spares us from having to create an entire short-lived QuerySearchResult object + * just to read a few values. + */ +public class CachePolicyInfoWrapper implements Writeable { + private final Long tookTimeNanos; + + public CachePolicyInfoWrapper(Long tookTimeNanos) { + this.tookTimeNanos = tookTimeNanos; + // Add more values here as they are needed for future cache tier policies + } + + public CachePolicyInfoWrapper(StreamInput in) throws IOException { + this.tookTimeNanos = in.readOptionalLong(); + } + + public Long getTookTimeNanos() { + return tookTimeNanos; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalLong(tookTimeNanos); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/CacheTierPolicy.java b/server/src/main/java/org/opensearch/common/cache/CacheTierPolicy.java new file mode 100644 index 0000000000000..ec39c8d74792a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/CacheTierPolicy.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +/** + * An interface for policies that inspect data of type T to decide whether they are admitted into a cache tier. + */ +public interface CacheTierPolicy<T> { + /** + * Determines whether this policy allows the data into its cache tier. + * @param data The data to check + * @return true if accepted, otherwise false + */ + boolean checkData(T data); +} diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java new file mode 100644 index 0000000000000..c5aeb7cd1fa40 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Cache types available within OpenSearch. + */ +@ExperimentalApi +public enum CacheType { + INDICES_REQUEST_CACHE("indices.requests.cache"); + + private final String settingPrefix; + + CacheType(String settingPrefix) { + this.settingPrefix = settingPrefix; + } + + public String getSettingPrefix() { + return settingPrefix; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java index c6ea5fca1a8fe..f7be46a852631 100644 --- a/server/src/main/java/org/opensearch/common/cache/ICache.java +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -8,6 +8,12 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.store.config.CacheConfig; + +import java.io.Closeable; +import java.util.Map; + /** * Represents a cache interface. * @param <K> Type of key. @@ -15,7 +21,8 @@ * * @opensearch.experimental */ -public interface ICache<K, V> { +@ExperimentalApi +public interface ICache<K, V> extends Closeable { V get(K key); void put(K key, V value); @@ -31,4 +38,14 @@ public interface ICache<K, V> { long count(); void refresh(); + + /** + * Factory to create objects. + */ + @ExperimentalApi + interface Factory { + <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories); + + String getCacheName(); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java index 57aa4aa39c782..aafd46560021b 100644 --- a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java +++ b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java @@ -8,13 +8,16 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Extends a cache loader with awareness of whether the data is loaded or not. * @param <K> Type of key. * @param <V> Type of value. * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface LoadAwareCacheLoader<K, V> extends CacheLoader<K, V> { boolean isLoaded(); } diff --git a/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java new file mode 100644 index 0000000000000..832a65b573aec --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.service.CacheService; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Holds all the cache factories and provides a way to fetch them when needed. + */ +@ExperimentalApi +public class CacheModule { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + + private final CacheService cacheService; + private final Settings settings; + + public CacheModule(List<CachePlugin> cachePlugins, Settings settings) { + this.cacheStoreTypeFactories = getCacheStoreTypeFactories(cachePlugins); + this.settings = settings; + this.cacheService = new CacheService(cacheStoreTypeFactories, settings); + } + + private static Map<String, ICache.Factory> getCacheStoreTypeFactories(List<CachePlugin> cachePlugins) { + Map<String, ICache.Factory> cacheStoreTypeFactories = new HashMap<>(); + // Add the core OpenSearchOnHeapCache as well. + cacheStoreTypeFactories.put( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory() + ); + for (CachePlugin cachePlugin : cachePlugins) { + Map<String, ICache.Factory> factoryMap = cachePlugin.getCacheFactoryMap(); + for (Map.Entry<String, ICache.Factory> entry : factoryMap.entrySet()) { + if (cacheStoreTypeFactories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cache name: " + entry.getKey() + " is " + "already registered"); + } + } + } + return Collections.unmodifiableMap(cacheStoreTypeFactories); + } + + public CacheService getCacheService() { + return this.cacheService; + } + + // Package private for testing. + Map<String, ICache.Factory> getCacheStoreTypeFactories() { + return cacheStoreTypeFactories; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/module/package-info.java b/server/src/main/java/org/opensearch/common/cache/module/package-info.java new file mode 100644 index 0000000000000..95ed25ca21643 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache providers. */ +package org.opensearch.common.cache.module; diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java new file mode 100644 index 0000000000000..c6e970b58ea08 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +import java.util.HashMap; +import java.util.Map; + +/** + * Service responsible to create caches. + */ +public class CacheService { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + private final Settings settings; + private Map<CacheType, ICache<?, ?>> cacheTypeMap; + + public CacheService(Map<String, ICache.Factory> cacheStoreTypeFactories, Settings settings) { + this.cacheStoreTypeFactories = cacheStoreTypeFactories; + this.settings = settings; + this.cacheTypeMap = new HashMap<>(); + } + + public Map<CacheType, ICache<?, ?>> getCacheTypeMap() { + return this.cacheTypeMap; + } + + public <K, V> ICache<K, V> createCache(CacheConfig<K, V> config, CacheType cacheType) { + Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String storeName = cacheSettingForCacheType.get(settings); + if (storeName == null || storeName.isBlank()) { + throw new IllegalArgumentException("No configuration exists for cache type: " + cacheType); + } + if (!cacheStoreTypeFactories.containsKey(storeName)) { + throw new IllegalArgumentException("No store name: [" + storeName + "] is registered for cache type: " + cacheType); + } + ICache.Factory factory = cacheStoreTypeFactories.get(storeName); + ICache<K, V> iCache = factory.create(config, cacheType, cacheStoreTypeFactories); + cacheTypeMap.put(cacheType, iCache); + return iCache; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/service/package-info.java b/server/src/main/java/org/opensearch/common/cache/service/package-info.java new file mode 100644 index 0000000000000..5fb87f7613627 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Package related to cache service **/ +package org.opensearch.common.cache.service; diff --git a/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java new file mode 100644 index 0000000000000..eb4563fda2275 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.settings; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; + +/** + * Settings related to cache. + */ +@ExperimentalApi +public class CacheSettings { + + /** + * Used to store cache store name for desired cache types within OpenSearch. + * Setting pattern: {cache_type}.store.name + * Example: indices.request.cache.store.name + */ + public static final Setting.AffixSetting<String> CACHE_TYPE_STORE_NAME = Setting.suffixKeySetting( + "store.name", + (key) -> Setting.simpleString(key, "", Setting.Property.NodeScope) + ); + + public static Setting<String> getConcreteSettingForCacheType(CacheType cacheType) { + return CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java new file mode 100644 index 0000000000000..7fa82021c5557 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache settings */ +package org.opensearch.common.cache.settings; diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index c497c8dbb7ea9..d218903de5b6d 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -10,12 +10,21 @@ import org.opensearch.common.cache.Cache; import org.opensearch.common.cache.CacheBuilder; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.Map; + +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; /** * This variant of on-heap cache uses OpenSearch custom cache implementation. @@ -24,11 +33,10 @@ * * @opensearch.experimental */ -public class OpenSearchOnHeapCache<K, V> implements StoreAwareCache<K, V>, RemovalListener<K, V> { +public class OpenSearchOnHeapCache<K, V> implements ICache<K, V>, RemovalListener<K, V> { private final Cache<K, V> cache; - - private final StoreAwareCacheEventListener<K, V> eventListener; + private final RemovalListener<K, V> removalListener; public OpenSearchOnHeapCache(Builder<K, V> builder) { CacheBuilder<K, V> cacheBuilder = CacheBuilder.<K, V>builder() @@ -39,35 +47,23 @@ public OpenSearchOnHeapCache(Builder<K, V> builder) { cacheBuilder.setExpireAfterAccess(builder.getExpireAfterAcess()); } cache = cacheBuilder.build(); - this.eventListener = builder.getEventListener(); + this.removalListener = builder.getRemovalListener(); } @Override public V get(K key) { V value = cache.get(key); - if (value != null) { - eventListener.onHit(key, value, CacheStoreType.ON_HEAP); - } else { - eventListener.onMiss(key, CacheStoreType.ON_HEAP); - } return value; } @Override public void put(K key, V value) { cache.put(key, value); - eventListener.onCached(key, value, CacheStoreType.ON_HEAP); } @Override public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { V value = cache.computeIfAbsent(key, key1 -> loader.load(key)); - if (!loader.isLoaded()) { - eventListener.onHit(key, value, CacheStoreType.ON_HEAP); - } else { - eventListener.onMiss(key, CacheStoreType.ON_HEAP); - eventListener.onCached(key, value, CacheStoreType.ON_HEAP); - } return value; } @@ -97,20 +93,33 @@ public void refresh() { } @Override - public CacheStoreType getTierType() { - return CacheStoreType.ON_HEAP; - } + public void close() {} @Override public void onRemoval(RemovalNotification<K, V> notification) { - eventListener.onRemoval( - new StoreAwareCacheRemovalNotification<>( - notification.getKey(), - notification.getValue(), - notification.getRemovalReason(), - CacheStoreType.ON_HEAP - ) - ); + this.removalListener.onRemoval(notification); + } + + /** + * Factory to create OpenSearchOnheap cache. + */ + public static class OpenSearchOnHeapCacheFactory implements Factory { + + public static final String NAME = "opensearch_onheap"; + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + return new Builder<K, V>().setMaximumWeightInBytes( + ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes() + ).setWeigher(config.getWeigher()).setRemovalListener(config.getRemovalListener()).build(); + } + + @Override + public String getCacheName() { + return NAME; + } } /** @@ -118,10 +127,10 @@ public void onRemoval(RemovalNotification<K, V> notification) { * @param <K> Type of key * @param <V> Type of value */ - public static class Builder<K, V> extends StoreAwareCacheBuilder<K, V> { + public static class Builder<K, V> extends ICacheBuilder<K, V> { @Override - public StoreAwareCache<K, V> build() { + public ICache<K, V> build() { return new OpenSearchOnHeapCache<K, V>(this); } } diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java deleted file mode 100644 index 45ca48d94c140..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.ICache; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Represents a cache with a specific type of store like onHeap, disk etc. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.experimental - */ -public interface StoreAwareCache<K, V> extends ICache<K, V> { - CacheStoreType getTierType(); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java deleted file mode 100644 index 492dbff3532a1..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.RemovalNotification; -import org.opensearch.common.cache.RemovalReason; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Removal notification for store aware cache. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public class StoreAwareCacheRemovalNotification<K, V> extends RemovalNotification<K, V> { - private final CacheStoreType cacheStoreType; - - public StoreAwareCacheRemovalNotification(K key, V value, RemovalReason removalReason, CacheStoreType cacheStoreType) { - super(key, value, removalReason); - this.cacheStoreType = cacheStoreType; - } - - public CacheStoreType getCacheStoreType() { - return cacheStoreType; - } -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java deleted file mode 100644 index 4fbbbbfebfaa7..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Represents a store aware cache value. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public class StoreAwareCacheValue<V> { - private final V value; - private final CacheStoreType source; - - public StoreAwareCacheValue(V value, CacheStoreType source) { - this.value = value; - this.source = source; - } - - public V getValue() { - return value; - } - - public CacheStoreType getCacheStoreType() { - return source; - } -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java new file mode 100644 index 0000000000000..7ca9080ec1aa6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.builders; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.ToLongBiFunction; + +/** + * Builder for store aware cache. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public abstract class ICacheBuilder<K, V> { + + private long maxWeightInBytes; + + private ToLongBiFunction<K, V> weigher; + + private TimeValue expireAfterAcess; + + private Settings settings; + + private RemovalListener<K, V> removalListener; + + public ICacheBuilder() {} + + public ICacheBuilder<K, V> setMaximumWeightInBytes(long sizeInBytes) { + this.maxWeightInBytes = sizeInBytes; + return this; + } + + public ICacheBuilder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public ICacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAcess) { + this.expireAfterAcess = expireAfterAcess; + return this; + } + + public ICacheBuilder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public ICacheBuilder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public long getMaxWeightInBytes() { + return maxWeightInBytes; + } + + public TimeValue getExpireAfterAcess() { + return expireAfterAcess; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + public RemovalListener<K, V> getRemovalListener() { + return this.removalListener; + } + + public Settings getSettings() { + return settings; + } + + public abstract ICache<K, V> build(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java deleted file mode 100644 index fc5aa48aae90f..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store.builders; - -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; -import org.opensearch.common.unit.TimeValue; - -import java.util.function.ToLongBiFunction; - -/** - * Builder for store aware cache. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public abstract class StoreAwareCacheBuilder<K, V> { - - private long maxWeightInBytes; - - private ToLongBiFunction<K, V> weigher; - - private TimeValue expireAfterAcess; - - private StoreAwareCacheEventListener<K, V> eventListener; - - public StoreAwareCacheBuilder() {} - - public StoreAwareCacheBuilder<K, V> setMaximumWeightInBytes(long sizeInBytes) { - this.maxWeightInBytes = sizeInBytes; - return this; - } - - public StoreAwareCacheBuilder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { - this.weigher = weigher; - return this; - } - - public StoreAwareCacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAcess) { - this.expireAfterAcess = expireAfterAcess; - return this; - } - - public StoreAwareCacheBuilder<K, V> setEventListener(StoreAwareCacheEventListener<K, V> eventListener) { - this.eventListener = eventListener; - return this; - } - - public long getMaxWeightInBytes() { - return maxWeightInBytes; - } - - public TimeValue getExpireAfterAcess() { - return expireAfterAcess; - } - - public ToLongBiFunction<K, V> getWeigher() { - return weigher; - } - - public StoreAwareCacheEventListener<K, V> getEventListener() { - return eventListener; - } - - public abstract StoreAwareCache<K, V> build(); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java new file mode 100644 index 0000000000000..6fefea6578fb9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.config; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.settings.Settings; + +import java.util.function.ToLongBiFunction; + +/** + * Common configurations related to store aware caches. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CacheConfig<K, V> { + + private final Settings settings; + + /** + * Defines the key type. + */ + private final Class<K> keyType; + + /** + * Defines the value type. + */ + private final Class<V> valueType; + + /** + * Represents a function that calculates the size or weight of a key-value pair. + */ + private final ToLongBiFunction<K, V> weigher; + + private final RemovalListener<K, V> removalListener; + + private CacheConfig(Builder<K, V> builder) { + this.keyType = builder.keyType; + this.valueType = builder.valueType; + this.settings = builder.settings; + this.removalListener = builder.removalListener; + this.weigher = builder.weigher; + } + + public Class<K> getKeyType() { + return keyType; + } + + public Class<V> getValueType() { + return valueType; + } + + public Settings getSettings() { + return settings; + } + + public RemovalListener<K, V> getRemovalListener() { + return removalListener; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + /** + * Builder class to build Cache config related parameters. + * @param <K> Type of key. + * @param <V> Type of value. + */ + public static class Builder<K, V> { + + private Settings settings; + + private Class<K> keyType; + + private Class<V> valueType; + + private RemovalListener<K, V> removalListener; + + private ToLongBiFunction<K, V> weigher; + + public Builder() {} + + public Builder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public Builder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public CacheConfig<K, V> build() { + return new CacheConfig<>(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java new file mode 100644 index 0000000000000..6b662a8af3f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for store aware cache config */ +package org.opensearch.common.cache.store.config; diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java b/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java deleted file mode 100644 index 6d7e4b39aaf9f..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store.listeners; - -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * This can be used to listen to tiered caching events - * @param <K> Type of key - * @param <V> Type of value - * - * @opensearch.internal - */ -public interface StoreAwareCacheEventListener<K, V> { - - void onMiss(K key, CacheStoreType cacheStoreType); - - void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification); - - void onHit(K key, V value, CacheStoreType cacheStoreType); - - void onCached(K key, V value, CacheStoreType cacheStoreType); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java new file mode 100644 index 0000000000000..bfd2d937fb430 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.settings; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings for OpenSearchOnHeap + */ +public class OpenSearchOnHeapCacheSettings { + + /** + * Setting to define maximum size for the cache as a percentage of heap memory available. + * + * Setting pattern: {cache_type}.opensearch_onheap.size + */ + public static final Setting.AffixSetting<ByteSizeValue> MAXIMUM_SIZE_IN_BYTES = Setting.suffixKeySetting( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ".size", + (key) -> Setting.memorySizeSetting(key, "1%", NodeScope) + ); + + public static final String MAXIMUM_SIZE_IN_BYTES_KEY = "maximum_size_in_bytes"; + + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of(MAXIMUM_SIZE_IN_BYTES_KEY, MAXIMUM_SIZE_IN_BYTES); + + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + private static Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + public static Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java new file mode 100644 index 0000000000000..91613876a5f31 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache setting **/ +package org.opensearch.common.cache.store.settings; diff --git a/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java b/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java deleted file mode 100644 index 8b432c9484aed..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.tier; - -import org.opensearch.common.cache.ICache; -import org.opensearch.common.cache.LoadAwareCacheLoader; -import org.opensearch.common.cache.RemovalReason; -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.StoreAwareCacheValue; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; -import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.util.iterable.Iterables; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Function; - -/** - * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap - * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, - * then items are eventually evicted from it and removed which will result in cache miss. - * - * @param <K> Type of key - * @param <V> Type of value - * - * @opensearch.experimental - */ -public class TieredSpilloverCache<K, V> implements ICache<K, V>, StoreAwareCacheEventListener<K, V> { - - // TODO: Remove optional when diskCache implementation is integrated. - private final Optional<StoreAwareCache<K, V>> onDiskCache; - private final StoreAwareCache<K, V> onHeapCache; - private final StoreAwareCacheEventListener<K, V> listener; - ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); - ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); - - /** - * Maintains caching tiers in ascending order of cache latency. - */ - private final List<StoreAwareCache<K, V>> cacheList; - - TieredSpilloverCache(Builder<K, V> builder) { - Objects.requireNonNull(builder.onHeapCacheBuilder, "onHeap cache builder can't be null"); - this.onHeapCache = builder.onHeapCacheBuilder.setEventListener(this).build(); - if (builder.onDiskCacheBuilder != null) { - this.onDiskCache = Optional.of(builder.onDiskCacheBuilder.setEventListener(this).build()); - } else { - this.onDiskCache = Optional.empty(); - } - this.listener = builder.listener; - this.cacheList = this.onDiskCache.map(diskTier -> Arrays.asList(this.onHeapCache, diskTier)).orElse(List.of(this.onHeapCache)); - } - - // Package private for testing - StoreAwareCache<K, V> getOnHeapCache() { - return onHeapCache; - } - - // Package private for testing - Optional<StoreAwareCache<K, V>> getOnDiskCache() { - return onDiskCache; - } - - @Override - public V get(K key) { - StoreAwareCacheValue<V> cacheValue = getValueFromTieredCache(true).apply(key); - if (cacheValue == null) { - return null; - } - return cacheValue.getValue(); - } - - @Override - public void put(K key, V value) { - try (ReleasableLock ignore = writeLock.acquire()) { - onHeapCache.put(key, value); - listener.onCached(key, value, CacheStoreType.ON_HEAP); - } - } - - @Override - public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { - // We are skipping calling event listeners at this step as we do another get inside below computeIfAbsent. - // Where we might end up calling onMiss twice for a key not present in onHeap cache. - // Similary we might end up calling both onMiss and onHit for a key, in case we are receiving concurrent - // requests for the same key which requires loading only once. - StoreAwareCacheValue<V> cacheValue = getValueFromTieredCache(false).apply(key); - if (cacheValue == null) { - // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. - // This is needed as there can be many requests for the same key at the same time and we only want to load - // the value once. - V value = null; - try (ReleasableLock ignore = writeLock.acquire()) { - value = onHeapCache.computeIfAbsent(key, loader); - } - if (loader.isLoaded()) { - listener.onMiss(key, CacheStoreType.ON_HEAP); - onDiskCache.ifPresent(diskTier -> listener.onMiss(key, CacheStoreType.DISK)); - listener.onCached(key, value, CacheStoreType.ON_HEAP); - } else { - listener.onHit(key, value, CacheStoreType.ON_HEAP); - } - return value; - } - listener.onHit(key, cacheValue.getValue(), cacheValue.getCacheStoreType()); - if (cacheValue.getCacheStoreType().equals(CacheStoreType.DISK)) { - listener.onMiss(key, CacheStoreType.ON_HEAP); - } - return cacheValue.getValue(); - } - - @Override - public void invalidate(K key) { - // We are trying to invalidate the key from all caches though it would be present in only of them. - // Doing this as we don't know where it is located. We could do a get from both and check that, but what will - // also trigger a hit/miss listener event, so ignoring it for now. - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.invalidate(key); - } - } - } - - @Override - public void invalidateAll() { - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.invalidateAll(); - } - } - } - - /** - * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. - * @return An iterable over (onHeap + disk) keys - */ - @Override - public Iterable<K> keys() { - Iterable<K> onDiskKeysIterable; - if (onDiskCache.isPresent()) { - onDiskKeysIterable = onDiskCache.get().keys(); - } else { - onDiskKeysIterable = Collections::emptyIterator; - } - return Iterables.concat(onHeapCache.keys(), onDiskKeysIterable); - } - - @Override - public long count() { - long totalCount = 0; - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - totalCount += storeAwareCache.count(); - } - return totalCount; - } - - @Override - public void refresh() { - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.refresh(); - } - } - } - - @Override - public void onMiss(K key, CacheStoreType cacheStoreType) { - // Misses for tiered cache are tracked here itself. - } - - @Override - public void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification) { - if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) - || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { - switch (notification.getCacheStoreType()) { - case ON_HEAP: - try (ReleasableLock ignore = writeLock.acquire()) { - onDiskCache.ifPresent(diskTier -> { diskTier.put(notification.getKey(), notification.getValue()); }); - } - onDiskCache.ifPresent( - diskTier -> listener.onCached(notification.getKey(), notification.getValue(), CacheStoreType.DISK) - ); - break; - default: - break; - } - } - listener.onRemoval(notification); - } - - @Override - public void onHit(K key, V value, CacheStoreType cacheStoreType) { - // Hits for tiered cache are tracked here itself. - } - - @Override - public void onCached(K key, V value, CacheStoreType cacheStoreType) { - // onCached events for tiered cache are tracked here itself. - } - - private Function<K, StoreAwareCacheValue<V>> getValueFromTieredCache(boolean triggerEventListener) { - return key -> { - try (ReleasableLock ignore = readLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - V value = storeAwareCache.get(key); - if (value != null) { - if (triggerEventListener) { - listener.onHit(key, value, storeAwareCache.getTierType()); - } - return new StoreAwareCacheValue<>(value, storeAwareCache.getTierType()); - } else { - if (triggerEventListener) { - listener.onMiss(key, storeAwareCache.getTierType()); - } - } - } - } - return null; - }; - } - - /** - * Builder object for tiered spillover cache. - * @param <K> Type of key - * @param <V> Type of value - */ - public static class Builder<K, V> { - private StoreAwareCacheBuilder<K, V> onHeapCacheBuilder; - private StoreAwareCacheBuilder<K, V> onDiskCacheBuilder; - private StoreAwareCacheEventListener<K, V> listener; - - public Builder() {} - - public Builder<K, V> setOnHeapCacheBuilder(StoreAwareCacheBuilder<K, V> onHeapCacheBuilder) { - this.onHeapCacheBuilder = onHeapCacheBuilder; - return this; - } - - public Builder<K, V> setOnDiskCacheBuilder(StoreAwareCacheBuilder<K, V> onDiskCacheBuilder) { - this.onDiskCacheBuilder = onDiskCacheBuilder; - return this; - } - - public Builder<K, V> setListener(StoreAwareCacheEventListener<K, V> listener) { - this.listener = listener; - return this; - } - - public TieredSpilloverCache<K, V> build() { - return new TieredSpilloverCache<>(this); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java index 90350c0a21a42..5aff09d715622 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.Version; @@ -302,6 +303,11 @@ public DocIdSetIterator iterator() { return subQueryScorer.iterator(); } + @Override + public TwoPhaseIterator twoPhaseIterator() { + return subQueryScorer.twoPhaseIterator(); + } + @Override public float getMaxScore(int upTo) { return Float.MAX_VALUE; // TODO: what would be a good upper bound? diff --git a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java index bb273b14c42e2..1804a9ac05a29 100644 --- a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java +++ b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java @@ -144,7 +144,7 @@ public long readLong(long pos) throws IOException { } private void validatePos(long pos, int len) throws EOFException { - if (pos < 0 || pos + len > length + offset) { + if (pos < 0 || pos + len > length) { throw new EOFException("seek past EOF"); } } diff --git a/server/src/main/java/org/opensearch/common/path/PathTrie.java b/server/src/main/java/org/opensearch/common/path/PathTrie.java index 7cb7b46acfafe..0b516fa037c48 100644 --- a/server/src/main/java/org/opensearch/common/path/PathTrie.java +++ b/server/src/main/java/org/opensearch/common/path/PathTrie.java @@ -37,6 +37,7 @@ import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Stack; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -405,4 +406,45 @@ public T next() { } }; } + + public Iterator<T> retrieveAll() { + Stack<TrieNode> stack = new Stack<>(); + stack.add(root); + + return new Iterator<T>() { + @Override + public boolean hasNext() { + while (!stack.empty()) { + TrieNode node = stack.peek(); + + if (node.value != null) { + return true; + } + + advance(); + } + + return false; + } + + @Override + public T next() { + while (!stack.empty()) { + TrieNode node = advance(); + + if (node.value != null) { + return node.value; + } + } + + throw new NoSuchElementException("called next() without validating hasNext()! no more nodes available"); + } + + private TrieNode advance() { + TrieNode node = stack.pop(); + stack.addAll(node.children.values()); + return node; + } + }; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 277286ae1ff1b..0baa09d7b63d1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -293,6 +293,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, @@ -697,13 +698,17 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + // Concurrent segment search settings + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ) ) ); @@ -716,11 +721,6 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map<List<String>, List<Setting>> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), - List.of( - SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING - ), List.of(FeatureFlags.TELEMETRY), List.of( TelemetrySettings.TRACER_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index e19f8e8370d5b..47da53b52c325 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -32,9 +32,10 @@ protected FeatureFlagSettings( public static final Set<Setting<?>> BUILT_IN_FEATURE_FLAGS = Set.of( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING + FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, + FeatureFlags.DOC_ID_FUZZY_SET_SETTING, + FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index b34a2aaffe408..49bb3abf1decd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -41,7 +41,6 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; @@ -150,6 +149,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.MAX_ANALYZED_OFFSET_SETTING, IndexSettings.MAX_TERMS_COUNT_SETTING, + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, IndexSettings.DEFAULT_FIELD_SETTING, IndexSettings.QUERY_STRING_LENIENT_SETTING, @@ -230,6 +230,12 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, + IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, + IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + + // Settings for concurrent segment search + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map<String, Settings> groups = s.getAsGroups(); @@ -252,10 +258,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * is ready for production release, the feature flag can be removed, and the * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ - public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.CONCURRENT_SEGMENT_SEARCH, - List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); + public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of(); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 0e96edff0681c..fea4c165809ba 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -978,6 +978,9 @@ private Setting<T> getConcreteSetting(String namespace, String key) { * Get a setting with the given namespace filled in for prefix and suffix. */ public Setting<T> getConcreteSettingForNamespace(String namespace) { + if (namespace == null) { + throw new IllegalArgumentException("Namespace should not be null"); + } String fullKey = key.toConcreteKey(namespace).toString(); return getConcreteSetting(namespace, fullKey); } @@ -2804,6 +2807,12 @@ public static <T> AffixSetting<T> prefixKeySetting(String prefix, Function<Strin return affixKeySetting(new AffixKey(prefix), delegateFactoryWithNamespace); } + public static <T> AffixSetting<T> suffixKeySetting(String suffix, Function<String, Setting<T>> delegateFactory) { + BiFunction<String, String, Setting<T>> delegateFactoryWithNamespace = (ns, k) -> delegateFactory.apply(k); + AffixKey affixKey = new AffixKey(null, suffix); + return affixKeySetting(affixKey, delegateFactoryWithNamespace); + } + /** * This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, affix key settings don't support updaters @@ -2943,12 +2952,14 @@ public static final class AffixKey implements Key { assert prefix != null || suffix != null : "Either prefix or suffix must be non-null"; this.prefix = prefix; - if (prefix.endsWith(".") == false) { + if (prefix != null && prefix.endsWith(".") == false) { throw new IllegalArgumentException("prefix must end with a '.'"); } this.suffix = suffix; if (suffix == null) { pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); + } else if (prefix == null) { + pattern = Pattern.compile("((?:[-\\w]+[.])*[-\\w]+\\." + Pattern.quote(suffix) + ")"); } else { // the last part of this regexp is to support both list and group keys pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\..*)?"); diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index e74ab687b903b..527dce7677dd8 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -1299,6 +1299,41 @@ public class DateFormatters { .withResolverStyle(ResolverStyle.STRICT) ); + /** + * Returns RFC 3339 a popular ISO 8601 profile compatible date time formatter and parser. + * This is not fully compatible to the existing spec, its more linient and closely follows w3c note on datetime + */ + + public static final DateFormatter RFC3339_LENIENT_DATE_FORMATTER = new JavaDateFormatter( + "rfc3339_lenient", + new OpenSearchDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new RFC3339CompatibleDateTimeFormatter( + new DateTimeFormatterBuilder().append(DATE_FORMATTER) + .optionalStart() + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 1, 2, SignStyle.NOT_NEGATIVE) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(',') + .appendFraction(NANO_OF_SECOND, 1, 9, false) + .optionalEnd() + .optionalStart() + .appendOffsetId() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) + ); + private static final DateTimeFormatter HOUR_MINUTE_SECOND_FORMATTER = new DateTimeFormatterBuilder().append(HOUR_MINUTE_FORMATTER) .appendLiteral(":") .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) @@ -2152,6 +2187,8 @@ static DateFormatter forPattern(String input) { return STRICT_YEAR_MONTH; } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) { return STRICT_YEAR_MONTH_DAY; + } else if (FormatNames.RFC3339_LENIENT.matches(input)) { + return RFC3339_LENIENT_DATE_FORMATTER; } else { try { return new JavaDateFormatter( diff --git a/server/src/main/java/org/opensearch/common/time/FormatNames.java b/server/src/main/java/org/opensearch/common/time/FormatNames.java index ba0a8fcf4a17a..ec5e825fc933e 100644 --- a/server/src/main/java/org/opensearch/common/time/FormatNames.java +++ b/server/src/main/java/org/opensearch/common/time/FormatNames.java @@ -44,6 +44,7 @@ */ public enum FormatNames { ISO8601(null, "iso8601"), + RFC3339_LENIENT(null, "rfc3339_lenient"), BASIC_DATE("basicDate", "basic_date"), BASIC_DATE_TIME("basicDateTime", "basic_date_time"), BASIC_DATE_TIME_NO_MILLIS("basicDateTimeNoMillis", "basic_date_time_no_millis"), diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index f711b14aeb928..033ea280e6172 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.Strings; import java.text.ParsePosition; +import java.time.DateTimeException; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -52,7 +53,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -70,11 +70,11 @@ class JavaDateFormatter implements DateFormatter { private final String format; private final String printFormat; - private final DateTimeFormatter printer; - private final List<DateTimeFormatter> parsers; + private final OpenSearchDateTimePrinter printer; + private final List<OpenSearchDateTimeFormatter> parsers; private final JavaDateFormatter roundupParser; private final Boolean canCacheLastParsedFormatter; - private volatile DateTimeFormatter lastParsedformatter = null; + private volatile OpenSearchDateTimeFormatter lastParsedformatter = null; /** * A round up formatter @@ -83,11 +83,11 @@ class JavaDateFormatter implements DateFormatter { */ static class RoundUpFormatter extends JavaDateFormatter { - RoundUpFormatter(String format, List<DateTimeFormatter> roundUpParsers) { + RoundUpFormatter(String format, List<OpenSearchDateTimeFormatter> roundUpParsers) { super(format, firstFrom(roundUpParsers), null, roundUpParsers); } - private static DateTimeFormatter firstFrom(List<DateTimeFormatter> roundUpParsers) { + private static OpenSearchDateTimeFormatter firstFrom(List<OpenSearchDateTimeFormatter> roundUpParsers) { return roundUpParsers.get(0); } @@ -101,14 +101,18 @@ JavaDateFormatter getRoundupParser() { JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, + OpenSearchDateTimePrinter printer, Boolean canCacheLastParsedFormatter, - DateTimeFormatter... parsers + OpenSearchDateTimeFormatter... parsers ) { this(format, printFormat, printer, ROUND_UP_BASE_FIELDS, canCacheLastParsedFormatter, parsers); } JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + this(format, format, wrapFormatter(printer), false, wrapAllFormatters(parsers)); + } + + JavaDateFormatter(String format, OpenSearchDateTimePrinter printer, OpenSearchDateTimeFormatter... parsers) { this(format, format, printer, false, parsers); } @@ -127,19 +131,19 @@ JavaDateFormatter getRoundupParser() { JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, + OpenSearchDateTimePrinter printer, BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, Boolean canCacheLastParsedFormatter, - DateTimeFormatter... parsers + OpenSearchDateTimeFormatter... parsers ) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } - long distinctZones = Arrays.stream(parsers).map(DateTimeFormatter::getZone).distinct().count(); + long distinctZones = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getZone).distinct().count(); if (distinctZones > 1) { throw new IllegalArgumentException("formatters must have the same time zone"); } - long distinctLocales = Arrays.stream(parsers).map(DateTimeFormatter::getLocale).distinct().count(); + long distinctLocales = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getLocale).distinct().count(); if (distinctLocales > 1) { throw new IllegalArgumentException("formatters must have the same locale"); } @@ -149,12 +153,12 @@ JavaDateFormatter getRoundupParser() { this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; if (parsers.length == 0) { - this.parsers = Collections.singletonList(printer); + this.parsers = Collections.singletonList((OpenSearchDateTimeFormatter) printer); } else { this.parsers = Arrays.asList(parsers); } List<DateTimeFormatter> roundUp = createRoundUpParser(format, roundupParserConsumer); - this.roundupParser = new RoundUpFormatter(format, roundUp); + this.roundupParser = new RoundUpFormatter(format, wrapAllFormatters(roundUp)); } JavaDateFormatter( @@ -163,7 +167,7 @@ JavaDateFormatter getRoundupParser() { BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, DateTimeFormatter... parsers ) { - this(format, format, printer, roundupParserConsumer, false, parsers); + this(format, format, wrapFormatter(printer), roundupParserConsumer, false, wrapAllFormatters(parsers)); } /** @@ -181,7 +185,8 @@ private List<DateTimeFormatter> createRoundUpParser( ) { if (format.contains("||") == false) { List<DateTimeFormatter> roundUpParsers = new ArrayList<>(); - for (DateTimeFormatter parser : this.parsers) { + for (OpenSearchDateTimeFormatter customparser : this.parsers) { + DateTimeFormatter parser = customparser.getFormatter(); DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); builder.append(parser); roundupParserConsumer.accept(builder, parser); @@ -201,12 +206,12 @@ public static DateFormatter combined( assert formatters.size() > 0; assert printFormatter != null; - List<DateTimeFormatter> parsers = new ArrayList<>(formatters.size()); - List<DateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); assert printFormatter instanceof JavaDateFormatter; JavaDateFormatter javaPrintFormatter = (JavaDateFormatter) printFormatter; - DateTimeFormatter printer = javaPrintFormatter.getPrinter(); + OpenSearchDateTimePrinter printer = javaPrintFormatter.getPrinter(); for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; @@ -227,9 +232,9 @@ public static DateFormatter combined( private JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, - List<DateTimeFormatter> roundUpParsers, - List<DateTimeFormatter> parsers, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers, Boolean canCacheLastParsedFormatter ) { this.format = format; @@ -245,6 +250,15 @@ private JavaDateFormatter( DateTimeFormatter printer, List<DateTimeFormatter> roundUpParsers, List<DateTimeFormatter> parsers + ) { + this(format, format, wrapFormatter(printer), wrapAllFormatters(roundUpParsers), wrapAllFormatters(parsers), false); + } + + private JavaDateFormatter( + String format, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers ) { this(format, format, printer, roundUpParsers, parsers, false); } @@ -253,7 +267,7 @@ JavaDateFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getPrinter() { + OpenSearchDateTimePrinter getPrinter() { return printer; } @@ -265,7 +279,7 @@ public TemporalAccessor parse(String input) { try { return doParse(input); - } catch (DateTimeParseException e) { + } catch (DateTimeException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } @@ -289,14 +303,14 @@ private TemporalAccessor doParse(String input) { Object object = null; if (canCacheLastParsedFormatter && lastParsedformatter != null) { ParsePosition pos = new ParsePosition(0); - object = lastParsedformatter.toFormat().parseObject(input, pos); + object = lastParsedformatter.parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { return (TemporalAccessor) object; } } - for (DateTimeFormatter formatter : parsers) { + for (OpenSearchDateTimeFormatter formatter : parsers) { ParsePosition pos = new ParsePosition(0); - object = formatter.toFormat().parseObject(input, pos); + object = formatter.parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { lastParsedformatter = formatter; return (TemporalAccessor) object; @@ -312,16 +326,28 @@ private boolean parsingSucceeded(Object object, String input, ParsePosition pos) return object != null && pos.getIndex() == input.length(); } + private static OpenSearchDateTimeFormatter wrapFormatter(DateTimeFormatter formatter) { + return new OpenSearchDateTimeFormatter(formatter); + } + + private static OpenSearchDateTimeFormatter[] wrapAllFormatters(DateTimeFormatter... formatters) { + return Arrays.stream(formatters).map(JavaDateFormatter::wrapFormatter).toArray(OpenSearchDateTimeFormatter[]::new); + } + + private static List<OpenSearchDateTimeFormatter> wrapAllFormatters(List<DateTimeFormatter> formatters) { + return formatters.stream().map(JavaDateFormatter::wrapFormatter).collect(Collectors.toList()); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily if (zoneId.equals(zone())) { return this; } - List<DateTimeFormatter> parsers = new CopyOnWriteArrayList<>( + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()) ); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withZone(zoneId)) .collect(Collectors.toList()); @@ -334,10 +360,10 @@ public DateFormatter withLocale(Locale locale) { if (locale.equals(locale())) { return this; } - List<DateTimeFormatter> parsers = new CopyOnWriteArrayList<>( + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()) ); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withLocale(locale)) .collect(Collectors.toList()); @@ -396,7 +422,7 @@ public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } - Collection<DateTimeFormatter> getParsers() { + Collection<OpenSearchDateTimeFormatter> getParsers() { return parsers; } } diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java new file mode 100644 index 0000000000000..3a629d8843949 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.text.Format; +import java.text.ParsePosition; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQuery; +import java.util.Locale; + +/** +* Wrapper class for DateTimeFormatter{@link java.time.format.DateTimeFormatter} +* to allow for custom implementations for datetime parsing/formatting + */ +class OpenSearchDateTimeFormatter implements OpenSearchDateTimePrinter { + private final DateTimeFormatter formatter; + + public OpenSearchDateTimeFormatter(String pattern) { + this.formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + } + + public OpenSearchDateTimeFormatter(String pattern, Locale locale) { + this.formatter = DateTimeFormatter.ofPattern(pattern, locale); + } + + public OpenSearchDateTimeFormatter(DateTimeFormatter formatter) { + this.formatter = formatter; + } + + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new OpenSearchDateTimeFormatter(getFormatter().withLocale(locale)); + } + + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new OpenSearchDateTimeFormatter(getFormatter().withZone(zoneId)); + } + + public String format(TemporalAccessor temporal) { + return this.getFormatter().format(temporal); + } + + public TemporalAccessor parse(CharSequence text, ParsePosition position) { + return this.getFormatter().parse(text, position); + } + + public TemporalAccessor parse(CharSequence text) { + return this.getFormatter().parse(text); + } + + public <T> T parse(CharSequence text, TemporalQuery<T> query) { + return this.getFormatter().parse(text, query); + } + + public ZoneId getZone() { + return this.getFormatter().getZone(); + } + + public Locale getLocale() { + return this.getFormatter().getLocale(); + } + + public TemporalAccessor parse(String input) { + return formatter.parse(input); + } + + public DateTimeFormatter getFormatter() { + return formatter; + } + + public Format toFormat() { + return getFormatter().toFormat(); + } + + public Object parseObject(String text, ParsePosition pos) { + return getFormatter().toFormat().parseObject(text, pos); + } +} diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java new file mode 100644 index 0000000000000..350bae21b22b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.time.ZoneId; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +/** + * Interface for DateTimeFormatter{@link java.time.format.DateTimeFormatter} + * to allow for custom implementations for datetime formatting + */ +interface OpenSearchDateTimePrinter { + + public OpenSearchDateTimePrinter withLocale(Locale locale); + + public OpenSearchDateTimePrinter withZone(ZoneId zoneId); + + public String format(TemporalAccessor temporal); + + public Locale getLocale(); + + public ZoneId getZone(); +} diff --git a/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java new file mode 100644 index 0000000000000..98b87efd2380b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Internet Time Utility project (https://github.com/ethlo/itu) under the Apache License, version 2.0. + * Copyright (C) 2017 Morten Haraldsen (ethlo) + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.common.time; + +import java.text.ParsePosition; +import java.time.DateTimeException; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; +import java.util.Arrays; +import java.util.Locale; + +/** + * Defines a close profile of RFC3339 datetime format where the date is mandatory and the time is optional. + * <p> + * The returned formatter can only be used for parsing, printing is unsupported. + * <p> + * This parser can parse zoned datetimes. + * The parser is strict by default, thus time string {@code 24:00} cannot be parsed. + * <p> + * It accepts formats described by the following syntax: + * <pre> + * Year: + * YYYY (eg 1997) + * Year and month: + * YYYY-MM (eg 1997-07) + * Complete date: + * YYYY-MM-DD (eg 1997-07-16) + * Complete date plus hours and minutes: + * YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) + * Complete date plus hours, minutes and seconds: + * YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) + * Complete date plus hours, minutes, seconds and a decimal fraction of a second + * YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) + * YYYY-MM-DDThh:mm:ss,sTZD (eg 1997-07-16T19:20:30,45+01:00) + * where: + * + * YYYY = four-digit year + * MM = two-digit month (01=January, etc.) + * DD = two-digit day of month (01 through 31) + * hh = two digits of hour (00 through 23) (am/pm NOT allowed) + * mm = two digits of minute (00 through 59) + * ss = two digits of second (00 through 59) + * s = one or more(max 9) digits representing a decimal fraction of a second + * TZD = time zone designator (Z or z or +hh:mm or -hh:mm) + * </pre> + */ +final class RFC3339CompatibleDateTimeFormatter extends OpenSearchDateTimeFormatter { + public static final char DATE_SEPARATOR = '-'; + public static final char TIME_SEPARATOR = ':'; + public static final char SEPARATOR_UPPER = 'T'; + private static final char PLUS = '+'; + private static final char MINUS = '-'; + private static final char SEPARATOR_LOWER = 't'; + private static final char SEPARATOR_SPACE = ' '; + private static final char FRACTION_SEPARATOR_1 = '.'; + private static final char FRACTION_SEPARATOR_2 = ','; + private static final char ZULU_UPPER = 'Z'; + private static final char ZULU_LOWER = 'z'; + + private ZoneId zone; + + public RFC3339CompatibleDateTimeFormatter(String pattern) { + super(pattern); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter) { + super(formatter); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter, ZoneId zone) { + super(formatter); + this.zone = zone; + } + + @Override + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withZone(zoneId), zoneId); + } + + @Override + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withLocale(locale)); + } + + @Override + public Object parseObject(String text, ParsePosition pos) { + try { + return parse(text); + } catch (DateTimeException e) { + return null; + } + } + + @Override + public TemporalAccessor parse(final String dateTime) { + OffsetDateTime parsedDatetime = parse(dateTime, new ParsePosition(0)); + return zone == null ? parsedDatetime : parsedDatetime.atZoneSameInstant(zone); + } + + public OffsetDateTime parse(String date, ParsePosition pos) { + if (date == null) { + throw new IllegalArgumentException("date cannot be null"); + } + + final int len = date.length() - pos.getIndex(); + if (len <= 0) { + throw new DateTimeParseException("out of bound parse position", date, pos.getIndex()); + } + final char[] chars = date.substring(pos.getIndex()).toCharArray(); + + // Date portion + + // YEAR + final int years = getYear(chars, pos); + if (4 == len) { + return OffsetDateTime.of(years, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // MONTH + consumeChar(chars, pos, DATE_SEPARATOR); + final int months = getMonth(chars, pos); + if (7 == len) { + return OffsetDateTime.of(years, months, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // DAY + consumeChar(chars, pos, DATE_SEPARATOR); + final int days = getDay(chars, pos); + if (10 == len) { + return OffsetDateTime.of(years, months, days, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // HOURS + consumeChar(chars, pos, SEPARATOR_UPPER, SEPARATOR_LOWER, SEPARATOR_SPACE); + final int hours = getHour(chars, pos); + + // MINUTES + consumeChar(chars, pos, TIME_SEPARATOR); + final int minutes = getMinute(chars, pos); + if (16 == len) { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + // SECONDS or TIMEZONE + return handleTime(chars, pos, years, months, days, hours, minutes); + } + + private static boolean isDigit(char c) { + return (c >= '0' && c <= '9'); + } + + private static int digit(char c) { + return c - '0'; + } + + private static int readInt(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + if (end > strNum.length) { + pos.setErrorIndex(end); + throw new DateTimeParseException("Unexpected end of expression at position " + strNum.length, new String(strNum), end); + } + + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + if (isDigit(c) == false) { + pos.setErrorIndex(i); + throw new DateTimeParseException("Character " + c + " is not a digit", new String(strNum), i); + } + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int readIntUnchecked(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int getHour(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getMinute(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getDay(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static boolean isValidOffset(char[] chars, int offset) { + return offset < chars.length; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char expected) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + if (chars[offset] != expected) { + throw new DateTimeParseException("Expected character " + expected + " at position " + offset, new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static void consumeNextChar(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static boolean checkPositionContains(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + break; + } + } + return found; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + pos.setIndex(offset + 1); + break; + } + } + if (!found) { + throw new DateTimeParseException( + "Expected character " + Arrays.toString(expected) + " at position " + offset, + new String(chars), + offset + ); + } + } + + private static void assertNoMoreChars(char[] chars, ParsePosition pos) { + if (chars.length > pos.getIndex()) { + throw new DateTimeParseException("Trailing junk data after position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + } + + private static ZoneOffset parseTimezone(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + final int left = chars.length - offset; + if (checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + assertNoMoreChars(chars, pos); + return ZoneOffset.UTC; + } + + if (left != 6) { + throw new DateTimeParseException("Invalid timezone offset", new String(chars, offset, left), offset); + } + + final char sign = chars[offset]; + consumeNextChar(chars, pos); + int hours = getHour(chars, pos); + consumeChar(chars, pos, TIME_SEPARATOR); + int minutes = getMinute(chars, pos); + if (sign == MINUS) { + if (hours == 0 && minutes == 0) { + throw new DateTimeParseException("Unknown 'Local Offset Convention' date-time not allowed", new String(chars), offset); + } + hours = -hours; + minutes = -minutes; + } else if (sign != PLUS) { + throw new DateTimeParseException("Invalid character starting at position " + offset, new String(chars), offset); + } + + return ZoneOffset.ofHoursMinutes(hours, minutes); + } + + private static OffsetDateTime handleTime(char[] chars, ParsePosition pos, int year, int month, int day, int hour, int minute) { + switch (chars[pos.getIndex()]) { + case TIME_SEPARATOR: + consumeChar(chars, pos, TIME_SEPARATOR); + return handleSeconds(year, month, day, hour, minute, chars, pos); + + case PLUS: + case MINUS: + case ZULU_UPPER: + case ZULU_LOWER: + final ZoneOffset zoneOffset = parseTimezone(chars, pos); + return OffsetDateTime.of(year, month, day, hour, minute, 0, 0, zoneOffset); + } + throw new DateTimeParseException("Unexpected character " + " at position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + + private static int getMonth(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getYear(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 4); + } + + private static int getSeconds(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getFractions(final char[] chars, final ParsePosition pos, final int len) { + final int fractions; + fractions = readIntUnchecked(chars, pos, len); + switch (len) { + case 0: + throw new DateTimeParseException("Must have at least 1 fraction digit", new String(chars), pos.getIndex()); + case 1: + return fractions * 100_000_000; + case 2: + return fractions * 10_000_000; + case 3: + return fractions * 1_000_000; + case 4: + return fractions * 100_000; + case 5: + return fractions * 10_000; + case 6: + return fractions * 1_000; + case 7: + return fractions * 100; + case 8: + return fractions * 10; + default: + return fractions; + } + } + + public static int indexOfNonDigit(final char[] text, int offset) { + for (int i = offset; i < text.length; i++) { + if (isDigit(text[i]) == false) { + return i; + } + } + return -1; + } + + public static void consumeDigits(final char[] text, ParsePosition pos) { + final int idx = indexOfNonDigit(text, pos.getIndex()); + if (idx == -1) { + pos.setErrorIndex(text.length); + } else { + pos.setIndex(idx); + } + } + + private static OffsetDateTime handleSeconds(int year, int month, int day, int hour, int minute, char[] chars, ParsePosition pos) { + // From here the specification is more lenient + final int seconds = getSeconds(chars, pos); + int currPos = pos.getIndex(); + final int remaining = chars.length - currPos; + if (remaining == 0) { + // No offset + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + ZoneOffset offset = null; + int fractions = 0; + if (remaining == 1 && checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + // Do nothing we are done + offset = ZoneOffset.UTC; + assertNoMoreChars(chars, pos); + } else if (remaining >= 1 && checkPositionContains(chars, pos, FRACTION_SEPARATOR_1, FRACTION_SEPARATOR_2)) { + // We have fractional seconds; + consumeNextChar(chars, pos); + ParsePosition initPosition = new ParsePosition(pos.getIndex()); + consumeDigits(chars, pos); + if (pos.getErrorIndex() == -1) { + // We have an end of fractions + final int len = pos.getIndex() - initPosition.getIndex(); + fractions = getFractions(chars, initPosition, len); + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + } else if (remaining >= 1 && checkPositionContains(chars, pos, PLUS, MINUS)) { + // No fractional sections + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("Unexpected character at position " + (pos.getIndex()), new String(chars), pos.getIndex()); + } + + return OffsetDateTime.of(year, month, day, hour, minute, seconds, fractions, offset); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index d4ab161527cc0..b51efeab21254 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,6 +20,11 @@ * @opensearch.internal */ public class FeatureFlags { + /** + * Gates the visibility of the remote store migration support from docrep . + */ + public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled"; + /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -38,12 +43,6 @@ public class FeatureFlags { */ public static final String IDENTITY = "opensearch.experimental.feature.identity.enabled"; - /** - * Gates the functionality of concurrently searching the segments - * Once the feature is ready for release, this feature flag can be removed. - */ - public static final String CONCURRENT_SEGMENT_SEARCH = "opensearch.experimental.feature.concurrent_segment_search.enabled"; - /** * Gates the functionality of telemetry framework. */ @@ -60,6 +59,11 @@ public class FeatureFlags { */ public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + /** + * Gates the optimization to enable bloom filters for doc id lookup. + */ + public static final String DOC_ID_FUZZY_SET = "opensearch.experimental.optimize_doc_id_lookup.fuzzy_set.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -99,18 +103,18 @@ public static boolean isEnabled(Setting<Boolean> featureFlag) { } } + public static final Setting<Boolean> REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + public static final Setting<Boolean> EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting<Boolean> IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); public static final Setting<Boolean> TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - public static final Setting<Boolean> CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( - CONCURRENT_SEGMENT_SEARCH, - false, - Property.NodeScope - ); - public static final Setting<Boolean> DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( DATETIME_FORMATTER_CACHING, true, @@ -122,4 +126,6 @@ public static boolean isEnabled(Setting<Boolean> featureFlag) { false, Property.NodeScope ); + + public static final Setting<Boolean> DOC_ID_FUZZY_SET_SETTING = Setting.boolSetting(DOC_ID_FUZZY_SET, false, Property.NodeScope); } diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 86e7227cb6c85..fe053a26329e4 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -118,10 +118,17 @@ public ReorganizingLongHash(final long initialCapacity, final float loadFactor, mask = capacity - 1; grow = (long) (capacity * loadFactor); size = 0; - - table = bigArrays.newLongArray(capacity, false); - table.fill(0, capacity, -1); // -1 represents an empty slot - keys = bigArrays.newLongArray(initialCapacity, false); + try { + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); // -1 represents an empty slot + keys = bigArrays.newLongArray(initialCapacity, false); + } finally { + if (table == null || keys == null) { + // it's important to close the arrays initialized above to prevent memory leak + // refer: https://github.com/opensearch-project/OpenSearch/issues/10154 + Releasables.closeWhileHandlingException(table, keys); + } + } } /** diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index ec1024bbe5f30..6e45c3fb7b58d 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -393,6 +393,7 @@ static class OpenSearchThreadFactory implements ThreadFactory { final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + @SuppressWarnings("removal") OpenSearchThreadFactory(String namePrefix) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); @@ -446,6 +447,30 @@ public boolean offer(E e) { } } + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public void put(E e) { + super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + return super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean add(E e) { + return super.offer(e); + } + } /** diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index c7ba5eb040a1f..2748938d8b761 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -199,6 +199,8 @@ public String toString() { private final NodeMetadata nodeMetadata; + private final IndexStoreListener indexStoreListener; + /** * Maximum number of data nodes that should run in an environment. */ @@ -295,18 +297,23 @@ public void close() { } } + public NodeEnvironment(Settings settings, Environment environment) throws IOException { + this(settings, environment, IndexStoreListener.EMPTY); + } + /** * Setup the environment. * @param settings settings from opensearch.yml */ - public NodeEnvironment(Settings settings, Environment environment) throws IOException { - if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) { + public NodeEnvironment(Settings settings, Environment environment, IndexStoreListener indexStoreListener) throws IOException { + if (DiscoveryNode.nodeRequiresLocalStorage(settings) == false) { nodePaths = null; fileCacheNodePath = null; sharedDataPath = null; locks = null; nodeLockId = -1; nodeMetadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT); + this.indexStoreListener = IndexStoreListener.EMPTY; return; } boolean success = false; @@ -385,6 +392,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } this.nodeMetadata = loadNodeMetadata(settings, logger, nodePaths); + this.indexStoreListener = indexStoreListener; success = true; } finally { if (success == false) { @@ -577,6 +585,9 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings) throws IOException { final ShardId shardId = lock.getShardId(); assert isShardLocked(shardId) : "shard " + shardId + " is not locked"; + + indexStoreListener.beforeShardPathDeleted(shardId, indexSettings, this); + final Path[] paths = availableShardPaths(shardId); logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths); acquireFSLockForPaths(indexSettings, paths); @@ -653,6 +664,8 @@ public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSetti * @param indexSettings settings for the index being deleted */ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException { + indexStoreListener.beforeIndexPathDeleted(index, indexSettings, this); + final Path[] indexPaths = indexPaths(index); logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths); IOUtils.rm(indexPaths); @@ -663,6 +676,18 @@ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettin } } + private void deleteIndexFileCacheDirectory(Index index) { + final Path indexCachePath = fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + logger.trace("deleting index {} file cache directory, path: [{}]", index, indexCachePath); + if (Files.exists(indexCachePath)) { + try { + IOUtils.rm(indexCachePath); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + } + } + } + /** * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are released. @@ -1387,4 +1412,18 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + + /** + * A listener that is executed on per-index and per-shard store events, like deleting shard path + * + * @opensearch.internal + */ + public interface IndexStoreListener { + default void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) {} + + default void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) {} + + IndexStoreListener EMPTY = new IndexStoreListener() { + }; + } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayModule.java b/server/src/main/java/org/opensearch/gateway/GatewayModule.java index 59ec0243c88c9..847ba01737332 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayModule.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayModule.java @@ -47,6 +47,7 @@ protected void configure() { bind(GatewayService.class).asEagerSingleton(); bind(TransportNodesListGatewayMetaState.class).asEagerSingleton(); bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton(); + bind(TransportNodesListGatewayStartedShardsBatch.class).asEagerSingleton(); bind(LocalAllocateDangledIndices.class).asEagerSingleton(); } } diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java new file mode 100644 index 0000000000000..403e3e96fa209 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.shard.ShardStateMetadata; +import org.opensearch.index.store.Store; +import org.opensearch.indices.IndicesService; + +import java.io.IOException; + +/** + * This class has the common code used in {@link TransportNodesListGatewayStartedShards} and + * {@link TransportNodesListGatewayStartedShardsBatch} to get the shard info on the local node. + * <p> + * This class should not be used to add more functions and will be removed when the + * {@link TransportNodesListGatewayStartedShards} will be deprecated and all the code will be moved to + * {@link TransportNodesListGatewayStartedShardsBatch} + * + * @opensearch.internal + */ +public class TransportNodesGatewayStartedShardHelper { + public static TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard getShardInfoOnLocalNode( + Logger logger, + final ShardId shardId, + NamedXContentRegistry namedXContentRegistry, + NodeEnvironment nodeEnv, + IndicesService indicesService, + String shardDataPathInRequest, + Settings settings, + ClusterService clusterService + ) throws IOException { + logger.trace("{} loading local shard state info", shardId); + ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + logger, + namedXContentRegistry, + nodeEnv.availableShardPaths(shardId) + ); + if (shardStateMetadata != null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { + final String customDataPath; + if (shardDataPathInRequest != null) { + customDataPath = shardDataPathInRequest; + } else { + // TODO: Fallback for BWC with older OpenSearch versions. + // Remove once request.getCustomDataPath() always returns non-null + final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); + if (metadata != null) { + customDataPath = new IndexSettings(metadata, settings).customDataPath(); + } else { + logger.trace("{} node doesn't have meta data for the requests index", shardId); + throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); + } + } + // we don't have an open shard on the store, validate the files on disk are openable + ShardPath shardPath = null; + try { + shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath == null) { + throw new IllegalStateException(shardId + " no shard path found"); + } + Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); + } catch (Exception exception) { + final ShardPath finalShardPath = shardPath; + logger.trace( + () -> new ParameterizedMessage( + "{} can't open index for shard [{}] in path [{}]", + shardId, + shardStateMetadata, + (finalShardPath != null) ? finalShardPath.resolveIndex() : "" + ), + exception + ); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard( + allocationId, + shardStateMetadata.primary, + null, + exception + ); + } + } + + logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + final IndexShard shard = indicesService.getShardOrNull(shardId); + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard( + allocationId, + shardStateMetadata.primary, + shard != null ? shard.getLatestReplicationCheckpoint() : null + ); + } + logger.trace("{} no local shard info found", shardId); + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard(null, false, null); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 601a5c671d67c..0ba872aab9974 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionType; @@ -43,7 +42,6 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -55,11 +53,6 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.shard.ShardStateMetadata; -import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.ShardAttributes; @@ -72,6 +65,8 @@ import java.util.Map; import java.util.Objects; +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + /** * This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}. * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate @@ -159,72 +154,23 @@ protected NodesGatewayStartedShards newResponse( @Override protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { try { - final ShardId shardId = request.getShardId(); - logger.trace("{} loading local shard state info", shardId); - ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard shardInfo = getShardInfoOnLocalNode( logger, + request.getShardId(), namedXContentRegistry, - nodeEnv.availableShardPaths(request.shardId) + nodeEnv, + indicesService, + request.getCustomDataPath(), + settings, + clusterService + ); + return new NodeGatewayStartedShards( + clusterService.localNode(), + shardInfo.allocationId(), + shardInfo.primary(), + shardInfo.replicationCheckpoint(), + shardInfo.storeException() ); - if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null - && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { - final String customDataPath; - if (request.getCustomDataPath() != null) { - customDataPath = request.getCustomDataPath(); - } else { - // TODO: Fallback for BWC with older OpenSearch versions. - // Remove once request.getCustomDataPath() always returns non-null - final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); - if (metadata != null) { - customDataPath = new IndexSettings(metadata, settings).customDataPath(); - } else { - logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); - } - } - // we don't have an open shard on the store, validate the files on disk are openable - ShardPath shardPath = null; - try { - shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); - if (shardPath == null) { - throw new IllegalStateException(shardId + " no shard path found"); - } - Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); - } catch (Exception exception) { - final ShardPath finalShardPath = shardPath; - logger.trace( - () -> new ParameterizedMessage( - "{} can't open index for shard [{}] in path [{}]", - shardId, - shardStateMetadata, - (finalShardPath != null) ? finalShardPath.resolveIndex() : "" - ), - exception - ); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - null, - exception - ); - } - } - - logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - final IndexShard shard = indicesService.getShardOrNull(shardId); - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - shard != null ? shard.getLatestReplicationCheckpoint() : null - ); - } - logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), null, false, null); } catch (Exception e) { throw new OpenSearchException("failed to load started shards", e); } diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java new file mode 100644 index 0000000000000..bc327c1b85748 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java @@ -0,0 +1,401 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + +/** + * This transport action is used to fetch batch of unassigned shard version from each node during primary allocation in {@link GatewayAllocator}. + * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate + * shards after node or cluster restarts. + * + * @opensearch.internal + */ +public class TransportNodesListGatewayStartedShardsBatch extends TransportNodesAction< + TransportNodesListGatewayStartedShardsBatch.Request, + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeRequest, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> + implements + AsyncShardFetch.Lister< + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> { + + public static final String ACTION_NAME = "internal:gateway/local/started_shards_batch"; + public static final ActionType<NodesGatewayStartedShardsBatch> TYPE = new ActionType<>( + ACTION_NAME, + NodesGatewayStartedShardsBatch::new + ); + + private final Settings settings; + private final NodeEnvironment nodeEnv; + private final IndicesService indicesService; + private final NamedXContentRegistry namedXContentRegistry; + + @Inject + public TransportNodesListGatewayStartedShardsBatch( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + NodeEnvironment env, + IndicesService indicesService, + NamedXContentRegistry namedXContentRegistry + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + Request::new, + NodeRequest::new, + ThreadPool.Names.FETCH_SHARD_STARTED, + NodeGatewayStartedShardsBatch.class + ); + this.settings = settings; + this.nodeEnv = env; + this.indicesService = indicesService; + this.namedXContentRegistry = namedXContentRegistry; + } + + @Override + public void list( + Map<ShardId, ShardAttributes> shardAttributesMap, + DiscoveryNode[] nodes, + ActionListener<NodesGatewayStartedShardsBatch> listener + ) { + execute(new Request(nodes, shardAttributesMap), listener); + } + + @Override + protected NodeRequest newNodeRequest(Request request) { + return new NodeRequest(request); + } + + @Override + protected NodeGatewayStartedShardsBatch newNodeResponse(StreamInput in) throws IOException { + return new NodeGatewayStartedShardsBatch(in); + } + + @Override + protected NodesGatewayStartedShardsBatch newResponse( + Request request, + List<NodeGatewayStartedShardsBatch> responses, + List<FailedNodeException> failures + ) { + return new NodesGatewayStartedShardsBatch(clusterService.getClusterName(), responses, failures); + } + + /** + * This function is similar to nodeOperation method of {@link TransportNodesListGatewayStartedShards} we loop over + * the shards here and populate the data about the shards held by the local node. + * + * @param request Request containing the map shardIdsWithCustomDataPath. + * @return NodeGatewayStartedShardsBatch contains the data about the primary shards held by the local node + */ + @Override + protected NodeGatewayStartedShardsBatch nodeOperation(NodeRequest request) { + Map<ShardId, NodeGatewayStartedShard> shardsOnNode = new HashMap<>(); + for (ShardAttributes shardAttr : request.shardAttributes.values()) { + final ShardId shardId = shardAttr.getShardId(); + try { + shardsOnNode.put( + shardId, + getShardInfoOnLocalNode( + logger, + shardId, + namedXContentRegistry, + nodeEnv, + indicesService, + shardAttr.getCustomDataPath(), + settings, + clusterService + ) + ); + } catch (Exception e) { + shardsOnNode.put( + shardId, + new NodeGatewayStartedShard(null, false, null, new OpenSearchException("failed to load started shards", e)) + ); + } + } + return new NodeGatewayStartedShardsBatch(clusterService.localNode(), shardsOnNode); + } + + /** + * This is used in constructing the request for making the transport request to set of other node. + * Refer {@link TransportNodesAction} class start method. + * + * @opensearch.internal + */ + public static class Request extends BaseNodesRequest<Request> { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public Request(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public Request(DiscoveryNode[] nodes, Map<ShardId, ShardAttributes> shardAttributes) { + super(nodes); + this.shardAttributes = Objects.requireNonNull(shardAttributes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + } + + /** + * Responses received from set of other nodes is clubbed into this class and sent back to the caller + * of this transport request. Refer {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodesGatewayStartedShardsBatch extends BaseNodesResponse<NodeGatewayStartedShardsBatch> { + + public NodesGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + } + + public NodesGatewayStartedShardsBatch( + ClusterName clusterName, + List<NodeGatewayStartedShardsBatch> nodes, + List<FailedNodeException> failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List<NodeGatewayStartedShardsBatch> readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeGatewayStartedShardsBatch::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List<NodeGatewayStartedShardsBatch> nodes) throws IOException { + out.writeList(nodes); + } + } + + /** + * NodeRequest class is for deserializing the request received by this node from other node for this transport action. + * This is used in {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public NodeRequest(Request request) { + this.shardAttributes = Objects.requireNonNull(request.getShardAttributes()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + + /** + * This class encapsulates the metadata about a started shard that needs to be persisted or sent between nodes. + * This is used in {@link NodeGatewayStartedShardsBatch} to construct the response for each node, instead of + * {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} because we don't need to save an extra + * {@link DiscoveryNode} object like in {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} + * which reduces memory footprint of its objects. + * + * @opensearch.internal + */ + public static class NodeGatewayStartedShard { + private final String allocationId; + private final boolean primary; + private final Exception storeException; + private final ReplicationCheckpoint replicationCheckpoint; + + public NodeGatewayStartedShard(StreamInput in) throws IOException { + allocationId = in.readOptionalString(); + primary = in.readBoolean(); + if (in.readBoolean()) { + storeException = in.readException(); + } else { + storeException = null; + } + if (in.readBoolean()) { + replicationCheckpoint = new ReplicationCheckpoint(in); + } else { + replicationCheckpoint = null; + } + } + + public NodeGatewayStartedShard(String allocationId, boolean primary, ReplicationCheckpoint replicationCheckpoint) { + this(allocationId, primary, replicationCheckpoint, null); + } + + public NodeGatewayStartedShard( + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException + ) { + this.allocationId = allocationId; + this.primary = primary; + this.replicationCheckpoint = replicationCheckpoint; + this.storeException = storeException; + } + + public String allocationId() { + return this.allocationId; + } + + public boolean primary() { + return this.primary; + } + + public ReplicationCheckpoint replicationCheckpoint() { + return this.replicationCheckpoint; + } + + public Exception storeException() { + return this.storeException; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(allocationId); + out.writeBoolean(primary); + if (storeException != null) { + out.writeBoolean(true); + out.writeException(storeException); + } else { + out.writeBoolean(false); + } + if (replicationCheckpoint != null) { + out.writeBoolean(true); + replicationCheckpoint.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NodeGatewayStartedShard that = (NodeGatewayStartedShard) o; + + return primary == that.primary + && Objects.equals(allocationId, that.allocationId) + && Objects.equals(storeException, that.storeException) + && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); + } + + @Override + public int hashCode() { + int result = (allocationId != null ? allocationId.hashCode() : 0); + result = 31 * result + (primary ? 1 : 0); + result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); + return result; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append("NodeGatewayStartedShards[").append("allocationId=").append(allocationId).append(",primary=").append(primary); + if (storeException != null) { + buf.append(",storeException=").append(storeException); + } + if (replicationCheckpoint != null) { + buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); + } + buf.append("]"); + return buf.toString(); + } + } + + /** + * This is the response from a single node, this is used in {@link NodesGatewayStartedShardsBatch} for creating + * node to its response mapping for this transport request. + * Refer {@link TransportNodesAction} start method + * + * @opensearch.internal + */ + public static class NodeGatewayStartedShardsBatch extends BaseNodeResponse { + private final Map<ShardId, NodeGatewayStartedShard> nodeGatewayStartedShardsBatch; + + public Map<ShardId, NodeGatewayStartedShard> getNodeGatewayStartedShardsBatch() { + return nodeGatewayStartedShardsBatch; + } + + public NodeGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + this.nodeGatewayStartedShardsBatch = in.readMap(ShardId::new, NodeGatewayStartedShard::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(nodeGatewayStartedShardsBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public NodeGatewayStartedShardsBatch(DiscoveryNode node, Map<ShardId, NodeGatewayStartedShard> nodeGatewayStartedShardsBatch) { + super(node); + this.nodeGatewayStartedShardsBatch = nodeGatewayStartedShardsBatch; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 00e765d73f77f..36e48b2590a4e 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -65,7 +65,9 @@ import java.util.function.UnaryOperator; import static org.opensearch.Version.V_2_7_0; +import static org.opensearch.common.util.FeatureFlags.DOC_ID_FUZZY_SET_SETTING; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; +import static org.opensearch.index.codec.fuzzy.FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; @@ -269,6 +271,17 @@ public static IndexMergePolicy fromString(String text) { Property.IndexScope ); + /** + * Index setting describing the maximum number of nested scopes in queries. + * The default maximum of 20. 1 means once nesting. + */ + public static final Setting<Integer> MAX_NESTED_QUERY_DEPTH_SETTING = Setting.intSetting( + "index.query.max_nested_depth", + 20, + 1, + Property.Dynamic, + Property.IndexScope + ); /** * Index setting describing for NGramTokenizer and NGramTokenFilter * the maximum difference between @@ -658,6 +671,22 @@ public static IndexMergePolicy fromString(String text) { Property.Dynamic ); + public static final Setting<Boolean> INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( + "index.optimize_doc_id_lookup.fuzzy_set.enabled", + false, + Property.IndexScope, + Property.Dynamic + ); + + public static final Setting<Double> INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING = Setting.doubleSetting( + "index.optimize_doc_id_lookup.fuzzy_set.false_positive_probability", + DEFAULT_FALSE_POSITIVE_PROBABILITY, + 0.01, + 0.50, + Property.IndexScope, + Property.Dynamic + ); + public static final TimeValue DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL = new TimeValue(650, TimeUnit.MILLISECONDS); public static final TimeValue MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL = TimeValue.ZERO; public static final Setting<TimeValue> INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( @@ -747,6 +776,8 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile TimeValue searchIdleAfter; private volatile int maxAnalyzedOffset; private volatile int maxTermsCount; + + private volatile int maxNestedQueryDepth; private volatile String defaultPipeline; private volatile String requiredPipeline; private volatile boolean searchThrottled; @@ -787,6 +818,16 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile UnaryOperator<MergePolicy> mergeOnFlushPolicy; + /** + * Is fuzzy set enabled for doc id + */ + private volatile boolean enableFuzzySetForDocId; + + /** + * False positive probability to use while creating fuzzy set. + */ + private volatile double docIdFuzzySetFalsePositiveProbability; + /** * Returns the default search fields for this index. */ @@ -902,6 +943,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); + maxNestedQueryDepth = scopedSettings.get(MAX_NESTED_QUERY_DEPTH_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); @@ -926,6 +968,13 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + + boolean isOptimizeDocIdLookupUsingFuzzySetFeatureEnabled = FeatureFlags.isEnabled(DOC_ID_FUZZY_SET_SETTING); + if (isOptimizeDocIdLookupUsingFuzzySetFeatureEnabled) { + enableFuzzySetForDocId = scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING); + docIdFuzzySetFalsePositiveProbability = scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING); + } + scopedSettings.addSettingsUpdateConsumer( TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, tieredMergePolicyProvider::setNoCFSRatio @@ -1007,6 +1056,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); + scopedSettings.addSettingsUpdateConsumer(MAX_NESTED_QUERY_DEPTH_SETTING, this::setMaxNestedQueryDepth); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); @@ -1032,6 +1082,11 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this::setRemoteTranslogUploadBufferInterval ); scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); + scopedSettings.addSettingsUpdateConsumer(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, this::setEnableFuzzySetForDocId); + scopedSettings.addSettingsUpdateConsumer( + INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + this::setDocIdFuzzySetFalsePositiveProbability + ); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -1517,6 +1572,17 @@ private void setMaxTermsCount(int maxTermsCount) { this.maxTermsCount = maxTermsCount; } + /** + * @return max level of nested queries and documents + */ + public int getMaxNestedQueryDepth() { + return this.maxNestedQueryDepth; + } + + private void setMaxNestedQueryDepth(int maxNestedQueryDepth) { + this.maxNestedQueryDepth = maxNestedQueryDepth; + } + /** * Returns the maximum number of allowed script_fields to retrieve in a search request */ @@ -1801,4 +1867,36 @@ public void setDefaultSearchPipeline(String defaultSearchPipeline) { public boolean shouldWidenIndexSortType() { return this.widenIndexSortType; } + + public boolean isEnableFuzzySetForDocId() { + return enableFuzzySetForDocId; + } + + public void setEnableFuzzySetForDocId(boolean enableFuzzySetForDocId) { + verifyFeatureToSetDocIdFuzzySetSetting(enabled -> this.enableFuzzySetForDocId = enabled, enableFuzzySetForDocId); + } + + public double getDocIdFuzzySetFalsePositiveProbability() { + return docIdFuzzySetFalsePositiveProbability; + } + + public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { + verifyFeatureToSetDocIdFuzzySetSetting( + fpp -> this.docIdFuzzySetFalsePositiveProbability = fpp, + docIdFuzzySetFalsePositiveProbability + ); + } + + private static <T> void verifyFeatureToSetDocIdFuzzySetSetting(Consumer<T> settingUpdater, T val) { + if (FeatureFlags.isEnabled(DOC_ID_FUZZY_SET_SETTING)) { + settingUpdater.accept(val); + } else { + throw new IllegalArgumentException( + "Fuzzy set for optimizing doc id lookup " + + "cannot be enabled with feature flag [" + + FeatureFlags.DOC_ID_FUZZY_SET + + "] set to false" + ); + } + } } diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index dc28ad2d6dc07..1ad17f121560c 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -39,10 +39,16 @@ import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import java.util.Map; + /** * {@link PerFieldMappingPostingFormatCodec This postings format} is the default * {@link PostingsFormat} for OpenSearch. It utilizes the @@ -57,6 +63,8 @@ public class PerFieldMappingPostingFormatCodec extends Lucene99Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); + private final FuzzySetFactory fuzzySetFactory; + private PostingsFormat docIdPostingsFormat; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) @@ -67,6 +75,12 @@ public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService map super(compressionMode); this.mapperService = mapperService; this.logger = logger; + fuzzySetFactory = new FuzzySetFactory( + Map.of( + IdFieldMapper.NAME, + new FuzzySetParameters(() -> mapperService.getIndexSettings().getDocIdFuzzySetFalsePositiveProbability()) + ) + ); } @Override @@ -76,6 +90,11 @@ public PostingsFormat getPostingsFormatForField(String field) { logger.warn("no index mapper found for field: [{}] returning default postings format", field); } else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) { return CompletionFieldMapper.CompletionFieldType.postingsFormat(); + } else if (IdFieldMapper.NAME.equals(field) && mapperService.getIndexSettings().isEnableFuzzySetForDocId()) { + if (docIdPostingsFormat == null) { + docIdPostingsFormat = new FuzzyFilterPostingsFormat(super.getPostingsFormatForField(field), fuzzySetFactory); + } + return docIdPostingsFormat; } return super.getPostingsFormatForField(field); } diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java new file mode 100644 index 0000000000000..09976297361fa --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.hash.T1ha1; + +import java.io.IOException; +import java.util.Iterator; + +/** + * Encapsulates common behaviour implementation for a fuzzy set. + */ +public abstract class AbstractFuzzySet implements FuzzySet { + + /** + * Add an item to this fuzzy set. + * @param value The value to be added + */ + protected abstract void add(BytesRef value); + + /** + * Add all items to the underlying set. + * Implementations can choose to perform this using an optimized strategy based on the type of set. + * @param valuesIteratorProvider Supplier for an iterator over All values which should be added to the set. + */ + protected void addAll(CheckedSupplier<Iterator<BytesRef>, IOException> valuesIteratorProvider) throws IOException { + Iterator<BytesRef> values = valuesIteratorProvider.get(); + while (values.hasNext()) { + add(values.next()); + } + } + + public Result contains(BytesRef val) { + return containsHash(generateKey(val)); + } + + protected abstract Result containsHash(long hash); + + protected long generateKey(BytesRef value) { + return T1ha1.hash(value.bytes, value.offset, value.length, 0L); + } + + protected void assertAllElementsExist(CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) throws IOException { + Iterator<BytesRef> iter = iteratorProvider.get(); + int cnt = 0; + while (iter.hasNext()) { + BytesRef item = iter.next(); + assert contains(item) == Result.MAYBE + : "Expected Filter to return positive response for elements added to it. Elements matched: " + cnt; + cnt++; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java new file mode 100644 index 0000000000000..b8a8352183ca8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.Assertions; + +import java.io.IOException; +import java.util.Iterator; + +/** + * The code is based on Lucene's implementation of Bloom Filter. + * It represents a subset of the Lucene implementation needed for OpenSearch use cases. + * Since the Lucene implementation is marked experimental, + * this aims to ensure we can provide a bwc implementation during upgrades. + */ +public class BloomFilter extends AbstractFuzzySet { + + private static final Logger logger = LogManager.getLogger(BloomFilter.class); + + // The sizes of BitSet used are all numbers that, when expressed in binary form, + // are all ones. This is to enable fast downsizing from one bitset to another + // by simply ANDing each set index in one bitset with the size of the target bitset + // - this provides a fast modulo of the number. Values previously accumulated in + // a large bitset and then mapped to a smaller set can be looked up using a single + // AND operation of the query term's hash rather than needing to perform a 2-step + // translation of the query term that mirrors the stored content's reprojections. + static final int[] usableBitSetSizes; + + static { + usableBitSetSizes = new int[26]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + usableBitSetSizes[i] = (1 << (i + 6)) - 1; + } + } + + private final LongArrayBackedBitSet bitset; + private final int setSize; + private final int hashCount; + + BloomFilter(long maxDocs, double maxFpp, CheckedSupplier<Iterator<BytesRef>, IOException> fieldIteratorProvider) throws IOException { + int setSize = (int) Math.ceil((maxDocs * Math.log(maxFpp)) / Math.log(1 / Math.pow(2, Math.log(2)))); + setSize = getNearestSetSize(setSize < Integer.MAX_VALUE / 2 ? 2 * setSize : Integer.MAX_VALUE); + int optimalK = (int) Math.round(((double) setSize / maxDocs) * Math.log(2)); + this.bitset = new LongArrayBackedBitSet(setSize); + this.setSize = setSize; + this.hashCount = optimalK; + addAll(fieldIteratorProvider); + if (Assertions.ENABLED) { + assertAllElementsExist(fieldIteratorProvider); + } + logger.debug("Bloom filter created with fpp: {}, setSize: {}, hashCount: {}", maxFpp, setSize, hashCount); + } + + BloomFilter(IndexInput in) throws IOException { + hashCount = in.readInt(); + setSize = in.readInt(); + this.bitset = new LongArrayBackedBitSet(in); + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeInt(hashCount); + out.writeInt(setSize); + bitset.writeTo(out); + } + + private static int getNearestSetSize(int maxNumberOfBits) { + assert maxNumberOfBits > 0 : "Provided size estimate for bloom filter is illegal (<=0) : " + maxNumberOfBits; + int result = usableBitSetSizes[0]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + if (usableBitSetSizes[i] <= maxNumberOfBits) { + result = usableBitSetSizes[i]; + } + } + return result; + } + + @Override + public SetType setType() { + return SetType.BLOOM_FILTER_V1; + } + + @Override + public Result containsHash(long hash) { + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + int bloomPos = (lsb + i * msb); + if (!mayContainValue(bloomPos)) { + return Result.NO; + } + } + return Result.MAYBE; + } + + protected void add(BytesRef value) { + long hash = generateKey(value); + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + // Bitmasking using bloomSize is effectively a modulo operation since set sizes are always power of 2 + int bloomPos = (lsb + i * msb) & setSize; + bitset.set(bloomPos); + } + } + + @Override + public boolean isSaturated() { + long numBitsSet = bitset.cardinality(); + // Don't bother saving bitsets if >90% of bits are set - we don't want to + // throw any more memory at this problem. + return (float) numBitsSet / (float) setSize > 0.9f; + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.sizeOf(bitset.ramBytesUsed()); + } + + private boolean mayContainValue(int aHash) { + // Bloom sizes are always base 2 and so can be ANDed for a fast modulo + int pos = aHash & setSize; + return bitset.get(pos); + } + + @Override + public void close() throws IOException { + IOUtils.close(bitset); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java new file mode 100644 index 0000000000000..01f8054fc91be --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java @@ -0,0 +1,492 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Based on Lucene's BloomFilterPostingsFormat. + * Discussion with Lucene community based on which the decision to have this in OpenSearch code was taken + * is captured here: https://github.com/apache/lucene/issues/12986 + * + * The class deals with persisting the bloom filter through the postings format, + * and reading the field via a bloom filter fronted terms enum (to reduce disk seeks in case of absence of requested values) + * The class should be handled during lucene upgrades. There are bwc tests present to verify the format continues to work after upgrade. + */ + +public final class FuzzyFilterPostingsFormat extends PostingsFormat { + + private static final Logger logger = LogManager.getLogger(FuzzyFilterPostingsFormat.class); + + /** + * This name is stored in headers. If changing the implementation for the format, this name/version should be updated + * so that reads can work as expected. + */ + public static final String FUZZY_FILTER_CODEC_NAME = "FuzzyFilterCodec99"; + + public static final int VERSION_START = 0; + public static final int VERSION_CURRENT = VERSION_START; + + /** Extension of Fuzzy Filters file */ + public static final String FUZZY_FILTER_FILE_EXTENSION = "fzd"; + + private final PostingsFormat delegatePostingsFormat; + private final FuzzySetFactory fuzzySetFactory; + + public FuzzyFilterPostingsFormat(PostingsFormat delegatePostingsFormat, FuzzySetFactory fuzzySetFactory) { + super(FUZZY_FILTER_CODEC_NAME); + this.delegatePostingsFormat = delegatePostingsFormat; + this.fuzzySetFactory = fuzzySetFactory; + } + + // Needed for SPI + public FuzzyFilterPostingsFormat() { + this(null, null); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + if (delegatePostingsFormat == null) { + throw new UnsupportedOperationException( + "Error - " + getClass().getName() + " has been constructed without a choice of PostingsFormat" + ); + } + FieldsConsumer fieldsConsumer = delegatePostingsFormat.fieldsConsumer(state); + return new FuzzyFilteredFieldsConsumer(fieldsConsumer, state); + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new FuzzyFilteredFieldsProducer(state); + } + + static class FuzzyFilteredFieldsProducer extends FieldsProducer { + private FieldsProducer delegateFieldsProducer; + HashMap<String, FuzzySet> fuzzySetsByFieldName = new HashMap<>(); + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsProducer(SegmentReadState state) throws IOException { + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + IndexInput filterIn = null; + boolean success = false; + try { + // Using IndexInput directly instead of ChecksumIndexInput since we want to support RandomAccessInput + filterIn = state.directory.openInput(fuzzyFilterFileName, state.context); + + CodecUtil.checkIndexHeader( + filterIn, + FUZZY_FILTER_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + // Load the delegate postings format + PostingsFormat delegatePostingsFormat = PostingsFormat.forName(filterIn.readString()); + this.delegateFieldsProducer = delegatePostingsFormat.fieldsProducer(state); + int numFilters = filterIn.readInt(); + for (int i = 0; i < numFilters; i++) { + int fieldNum = filterIn.readInt(); + FuzzySet set = FuzzySetFactory.deserializeFuzzySet(filterIn); + closeables.add(set); + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNum); + fuzzySetsByFieldName.put(fieldInfo.name, set); + } + CodecUtil.retrieveChecksum(filterIn); + + // Can we disable it if we foresee performance issues? + CodecUtil.checksumEntireFile(filterIn); + success = true; + closeables.add(filterIn); + } finally { + if (!success) { + IOUtils.closeWhileHandlingException(filterIn, delegateFieldsProducer); + } + } + } + + @Override + public Iterator<String> iterator() { + return delegateFieldsProducer.iterator(); + } + + @Override + public void close() throws IOException { + // Why closing here? + IOUtils.closeWhileHandlingException(closeables); + delegateFieldsProducer.close(); + } + + @Override + public Terms terms(String field) throws IOException { + FuzzySet filter = fuzzySetsByFieldName.get(field); + if (filter == null) { + return delegateFieldsProducer.terms(field); + } else { + Terms result = delegateFieldsProducer.terms(field); + if (result == null) { + return null; + } + return new FuzzyFilteredTerms(result, filter); + } + } + + @Override + public int size() { + return delegateFieldsProducer.size(); + } + + static class FuzzyFilteredTerms extends Terms { + private Terms delegateTerms; + private FuzzySet filter; + + public FuzzyFilteredTerms(Terms terms, FuzzySet filter) { + this.delegateTerms = terms; + this.filter = filter; + } + + @Override + public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException { + return delegateTerms.intersect(compiled, startTerm); + } + + @Override + public TermsEnum iterator() throws IOException { + return new FilterAppliedTermsEnum(delegateTerms, filter); + } + + @Override + public long size() throws IOException { + return delegateTerms.size(); + } + + @Override + public long getSumTotalTermFreq() throws IOException { + return delegateTerms.getSumTotalTermFreq(); + } + + @Override + public long getSumDocFreq() throws IOException { + return delegateTerms.getSumDocFreq(); + } + + @Override + public int getDocCount() throws IOException { + return delegateTerms.getDocCount(); + } + + @Override + public boolean hasFreqs() { + return delegateTerms.hasFreqs(); + } + + @Override + public boolean hasOffsets() { + return delegateTerms.hasOffsets(); + } + + @Override + public boolean hasPositions() { + return delegateTerms.hasPositions(); + } + + @Override + public boolean hasPayloads() { + return delegateTerms.hasPayloads(); + } + + @Override + public BytesRef getMin() throws IOException { + return delegateTerms.getMin(); + } + + @Override + public BytesRef getMax() throws IOException { + return delegateTerms.getMax(); + } + } + + static final class FilterAppliedTermsEnum extends BaseTermsEnum { + + private Terms delegateTerms; + private TermsEnum delegateTermsEnum; + private final FuzzySet filter; + + public FilterAppliedTermsEnum(Terms delegateTerms, FuzzySet filter) throws IOException { + this.delegateTerms = delegateTerms; + this.filter = filter; + } + + void reset(Terms delegateTerms) throws IOException { + this.delegateTerms = delegateTerms; + this.delegateTermsEnum = null; + } + + private TermsEnum delegate() throws IOException { + if (delegateTermsEnum == null) { + /* pull the iterator only if we really need it - + * this can be a relativly heavy operation depending on the + * delegate postings format and the underlying directory + * (clone IndexInput) */ + delegateTermsEnum = delegateTerms.iterator(); + } + return delegateTermsEnum; + } + + @Override + public BytesRef next() throws IOException { + return delegate().next(); + } + + @Override + public boolean seekExact(BytesRef text) throws IOException { + // The magical fail-fast speed up that is the entire point of all of + // this code - save a disk seek if there is a match on an in-memory + // structure + // that may occasionally give a false positive but guaranteed no false + // negatives + if (filter.contains(text) == FuzzySet.Result.NO) { + return false; + } + return delegate().seekExact(text); + } + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + return delegate().seekCeil(text); + } + + @Override + public void seekExact(long ord) throws IOException { + delegate().seekExact(ord); + } + + @Override + public BytesRef term() throws IOException { + return delegate().term(); + } + + @Override + public long ord() throws IOException { + return delegate().ord(); + } + + @Override + public int docFreq() throws IOException { + return delegate().docFreq(); + } + + @Override + public long totalTermFreq() throws IOException { + return delegate().totalTermFreq(); + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return delegate().postings(reuse, flags); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return delegate().impacts(flags); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(filter=" + filter.toString() + ")"; + } + } + + @Override + public void checkIntegrity() throws IOException { + delegateFieldsProducer.checkIntegrity(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(fields=" + fuzzySetsByFieldName.size() + ",delegate=" + delegateFieldsProducer + ")"; + } + } + + class FuzzyFilteredFieldsConsumer extends FieldsConsumer { + private FieldsConsumer delegateFieldsConsumer; + private Map<FieldInfo, FuzzySet> fuzzySets = new HashMap<>(); + private SegmentWriteState state; + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsConsumer(FieldsConsumer fieldsConsumer, SegmentWriteState state) { + this.delegateFieldsConsumer = fieldsConsumer; + this.state = state; + } + + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + + // Delegate must write first: it may have opened files + // on creating the class + // (e.g. Lucene41PostingsConsumer), and write() will + // close them; alternatively, if we delayed pulling + // the fields consumer until here, we could do it + // afterwards: + delegateFieldsConsumer.write(fields, norms); + + for (String field : fields) { + Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); + FuzzySet fuzzySet = fuzzySetFactory.createFuzzySet(state.segmentInfo.maxDoc(), fieldInfo.name, () -> iterator(terms)); + if (fuzzySet == null) { + break; + } + assert fuzzySets.containsKey(fieldInfo) == false; + closeables.add(fuzzySet); + fuzzySets.put(fieldInfo, fuzzySet); + } + } + + private Iterator<BytesRef> iterator(Terms terms) throws IOException { + TermsEnum termIterator = terms.iterator(); + return new Iterator<>() { + + private BytesRef currentTerm; + private PostingsEnum postingsEnum; + + @Override + public boolean hasNext() { + try { + do { + currentTerm = termIterator.next(); + if (currentTerm == null) { + return false; + } + postingsEnum = termIterator.postings(postingsEnum, 0); + if (postingsEnum.nextDoc() != PostingsEnum.NO_MORE_DOCS) { + return true; + } + } while (true); + } catch (IOException ex) { + throw new IllegalStateException("Cannot read terms: " + termIterator.attributes()); + } + } + + @Override + public BytesRef next() { + return currentTerm; + } + }; + } + + private boolean closed; + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + delegateFieldsConsumer.close(); + + // Now we are done accumulating values for these fields + List<Map.Entry<FieldInfo, FuzzySet>> nonSaturatedSets = new ArrayList<>(); + + for (Map.Entry<FieldInfo, FuzzySet> entry : fuzzySets.entrySet()) { + FuzzySet fuzzySet = entry.getValue(); + if (!fuzzySet.isSaturated()) { + nonSaturatedSets.add(entry); + } + } + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + try (IndexOutput fuzzyFilterFileOutput = state.directory.createOutput(fuzzyFilterFileName, state.context)) { + logger.trace( + "Writing fuzzy filter postings with version: {} for segment: {}", + VERSION_CURRENT, + state.segmentInfo.toString() + ); + CodecUtil.writeIndexHeader( + fuzzyFilterFileOutput, + FUZZY_FILTER_CODEC_NAME, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + + // remember the name of the postings format we will delegate to + fuzzyFilterFileOutput.writeString(delegatePostingsFormat.getName()); + + // First field in the output file is the number of fields+sets saved + fuzzyFilterFileOutput.writeInt(nonSaturatedSets.size()); + for (Map.Entry<FieldInfo, FuzzySet> entry : nonSaturatedSets) { + FieldInfo fieldInfo = entry.getKey(); + FuzzySet fuzzySet = entry.getValue(); + saveAppropriatelySizedFuzzySet(fuzzyFilterFileOutput, fuzzySet, fieldInfo); + } + CodecUtil.writeFooter(fuzzyFilterFileOutput); + } + // We are done with large bitsets so no need to keep them hanging around + fuzzySets.clear(); + IOUtils.closeWhileHandlingException(closeables); + } + + private void saveAppropriatelySizedFuzzySet(IndexOutput fileOutput, FuzzySet fuzzySet, FieldInfo fieldInfo) throws IOException { + fileOutput.writeInt(fieldInfo.number); + fileOutput.writeString(fuzzySet.setType().getSetName()); + fuzzySet.writeTo(fileOutput); + } + } + + @Override + public String toString() { + return "FuzzyFilterPostingsFormat(" + delegatePostingsFormat + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java new file mode 100644 index 0000000000000..df443ffbca33d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedFunction; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Fuzzy Filter interface + */ +public interface FuzzySet extends Accountable, Closeable { + + /** + * Name used for a codec to be aware of what fuzzy set has been used. + */ + SetType setType(); + + /** + * @param value the item whose membership needs to be checked. + */ + Result contains(BytesRef value); + + boolean isSaturated(); + + void writeTo(DataOutput out) throws IOException; + + /** + * Enum to represent result of membership check on a fuzzy set. + */ + enum Result { + /** + * A definite no for the set membership of an item. + */ + NO, + + /** + * Fuzzy sets cannot guarantee that a given item is present in the set or not due the data being stored in + * a lossy format (e.g. fingerprint, hash). + * Hence, we return a response denoting that the item maybe present. + */ + MAYBE + } + + /** + * Enum to declare supported properties and mappings for a fuzzy set implementation. + */ + enum SetType { + BLOOM_FILTER_V1("bloom_filter_v1", BloomFilter::new, List.of("bloom_filter")); + + /** + * Name persisted in postings file. This will be used when reading to determine the bloom filter implementation. + */ + private final String setName; + + /** + * Interface for reading the actual fuzzy set implementation into java object. + */ + private final CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer; + + SetType(String setName, CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer, List<String> aliases) { + if (aliases.size() < 1) { + throw new IllegalArgumentException("Alias list is empty. Could not create Set Type: " + setName); + } + this.setName = setName; + this.deserializer = deserializer; + } + + public String getSetName() { + return setName; + } + + public CheckedFunction<IndexInput, ? extends FuzzySet, IOException> getDeserializer() { + return deserializer; + } + + public static SetType from(String name) { + for (SetType type : SetType.values()) { + if (type.setName.equals(name)) { + return type; + } + } + throw new IllegalArgumentException("There is no implementation for fuzzy set: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java new file mode 100644 index 0000000000000..5d1fd03f099d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; + +/** + * Factory class to create fuzzy set. + * Supports bloom filters for now. More sets can be added as required. + */ +public class FuzzySetFactory { + + private final Map<String, FuzzySetParameters> setTypeForField; + + public FuzzySetFactory(Map<String, FuzzySetParameters> setTypeForField) { + this.setTypeForField = setTypeForField; + } + + public FuzzySet createFuzzySet(int maxDocs, String fieldName, CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) + throws IOException { + FuzzySetParameters params = setTypeForField.get(fieldName); + if (params == null) { + throw new IllegalArgumentException("No fuzzy set defined for field: " + fieldName); + } + switch (params.getSetType()) { + case BLOOM_FILTER_V1: + return new BloomFilter(maxDocs, params.getFalsePositiveProbability(), iteratorProvider); + default: + throw new IllegalArgumentException("No Implementation for set type: " + params.getSetType()); + } + } + + public static FuzzySet deserializeFuzzySet(IndexInput in) throws IOException { + FuzzySet.SetType setType = FuzzySet.SetType.from(in.readString()); + return setType.getDeserializer().apply(in); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java new file mode 100644 index 0000000000000..7bb96e7c34f0b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import java.util.function.Supplier; + +/** + * Wrapper for params to create a fuzzy set. + */ +public class FuzzySetParameters { + private final Supplier<Double> falsePositiveProbabilityProvider; + private final FuzzySet.SetType setType; + + public static final double DEFAULT_FALSE_POSITIVE_PROBABILITY = 0.2047d; + + public FuzzySetParameters(Supplier<Double> falsePositiveProbabilityProvider) { + this.falsePositiveProbabilityProvider = falsePositiveProbabilityProvider; + this.setType = FuzzySet.SetType.BLOOM_FILTER_V1; + } + + public double getFalsePositiveProbability() { + return falsePositiveProbabilityProvider.get(); + } + + public FuzzySet.SetType getSetType() { + return setType; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java new file mode 100644 index 0000000000000..08d6059c1e82e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.OpenSearchException; +import org.opensearch.common.util.LongArray; + +import java.io.IOException; + +/** + * A Long array backed by RandomAccessInput. + * This implementation supports read operations only. + */ +class IndexInputImmutableLongArray implements LongArray { + + private final RandomAccessInput input; + private final long size; + + IndexInputImmutableLongArray(long size, RandomAccessInput input) { + this.size = size; + this.input = input; + } + + @Override + public void close() {} + + @Override + public long size() { + return size; + } + + @Override + public synchronized long get(long index) { + try { + // Multiplying by 8 since each long is 8 bytes, and we need to get the long value at (index * 8) in the + // RandomAccessInput being accessed. + return input.readLong(index << 3); + } catch (IOException ex) { + throw new OpenSearchException(ex); + } + } + + @Override + public long set(long index, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long increment(long index, long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void fill(long fromIndex, long toIndex, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.shallowSizeOfInstance(IndexInputImmutableLongArray.class); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java new file mode 100644 index 0000000000000..bd4936aeec366 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.LongArray; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A bitset backed by a long-indexed array. + */ +class LongArrayBackedBitSet implements Accountable, Closeable { + + private long underlyingArrayLength = 0L; + private LongArray longArray; + + /** + * Constructor which uses an on heap array. This should be using during construction of the bitset. + * @param capacity The maximum capacity to provision for the bitset. + */ + LongArrayBackedBitSet(long capacity) { + // Since the bitset is backed by a long array, we only need 1 element for every 64 bits in the underlying array. + underlyingArrayLength = (capacity >> 6) + 1L; + this.longArray = BigArrays.NON_RECYCLING_INSTANCE.withCircuitBreaking().newLongArray(underlyingArrayLength); + } + + /** + * Constructor which uses Lucene's IndexInput to read the bitset into a read-only buffer. + * @param in IndexInput containing the serialized bitset. + * @throws IOException + */ + LongArrayBackedBitSet(IndexInput in) throws IOException { + underlyingArrayLength = in.readLong(); + // Multiplying by 8 since the length above is of the long array, so we will have + // 8 times the number of bytes in our stream. + long streamLength = underlyingArrayLength << 3; + this.longArray = new IndexInputImmutableLongArray(underlyingArrayLength, in.randomAccessSlice(in.getFilePointer(), streamLength)); + in.skipBytes(streamLength); + } + + public void writeTo(DataOutput out) throws IOException { + out.writeLong(underlyingArrayLength); + for (int idx = 0; idx < underlyingArrayLength; idx++) { + out.writeLong(longArray.get(idx)); + } + } + + /** + * This is an O(n) operation, and will iterate over all the elements in the underlying long array + * to determine cardinality of the set. + * @return number of set bits in the bitset. + */ + public long cardinality() { + long tot = 0; + for (int i = 0; i < underlyingArrayLength; ++i) { + tot += Long.bitCount(longArray.get(i)); + } + return tot; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(long index) { + long i = index >> 6; // div 64 + long val = longArray.get(i); + long bitmask = 1L << index; + return (val & bitmask) != 0; + } + + /** + * Sets the bit at the given index. + * @param index the index to set the bit at. + */ + public void set(long index) { + long wordNum = index >> 6; // div 64 + long bitmask = 1L << index; + long val = longArray.get(wordNum); + longArray.set(wordNum, val | bitmask); + } + + @Override + public long ramBytesUsed() { + return 128L + longArray.ramBytesUsed(); + } + + @Override + public void close() throws IOException { + IOUtils.close(longArray); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java new file mode 100644 index 0000000000000..7aeac68cd192a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** classes responsible for handling all fuzzy codecs and operations */ +package org.opensearch.index.codec.fuzzy; diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java index 19454967f9ee3..a9cc24abe3c01 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -17,7 +17,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of @@ -31,10 +31,10 @@ final class ReplicaFileTracker { public static final Logger logger = LogManager.getLogger(ReplicaFileTracker.class); private final Map<String, Integer> refCounts = new HashMap<>(); - private final BiConsumer<String, String> fileDeleter; + private final Consumer<String> fileDeleter; private final Set<String> EXCLUDE_FILES = Set.of("write.lock"); - public ReplicaFileTracker(BiConsumer<String, String> fileDeleter) { + public ReplicaFileTracker(Consumer<String> fileDeleter) { this.fileDeleter = fileDeleter; } @@ -82,7 +82,7 @@ private synchronized void delete(Collection<String> toDelete) { private synchronized void delete(String fileName) { assert canDelete(fileName); - fileDeleter.accept("delete unreferenced", fileName); + fileDeleter.accept(fileName); } private synchronized boolean canDelete(String fileName) { diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index d4a97f0267222..34aecfc62b8b2 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -41,6 +41,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.ReplicationStats; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; import org.opensearch.index.remote.RemoteSegmentStats; import java.io.IOException; @@ -95,7 +96,8 @@ public class SegmentsStats implements Writeable, ToXContentFragment { Map.entry("tvx", "Term Vector Index"), Map.entry("tvd", "Term Vector Documents"), Map.entry("tvf", "Term Vector Fields"), - Map.entry("liv", "Live Documents") + Map.entry("liv", "Live Documents"), + Map.entry(FuzzyFilterPostingsFormat.FUZZY_FILTER_FILE_EXTENSION, "Fuzzy Filter") ); public SegmentsStats() { diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index c1f69d1ef3638..dea389bb6a0ff 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -90,6 +90,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo( @@ -108,6 +109,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo( @@ -126,6 +128,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); public static Set<String> ALL_FIELD_NAMES = Sets.newHashSet(FAKE_SOURCE_FIELD.name, FAKE_ROUTING_FIELD.name, FAKE_ID_FIELD.name); diff --git a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java index a2a70e280187a..3a2504ce92158 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java @@ -43,6 +43,9 @@ * aggregations, which only use {@link #advanceExact(int)} and * {@link #longValue()}. * + * In case when optimizations based on point values are used, the {@link #advance(int)} + * and, optionally, {@link #cost()} have to be implemented as well. + * * @opensearch.internal */ public abstract class AbstractNumericDocValues extends NumericDocValues { diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java index e09de53dc05f7..6db6bbccacae5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Numbers; import org.opensearch.common.geo.GeoPoint; @@ -76,6 +77,10 @@ public double doubleValue() throws IOException { throw new UnsupportedOperationException(); } + @Override + public int advance(int target) throws IOException { + return DocIdSetIterator.NO_MORE_DOCS; + } }; } @@ -561,6 +566,10 @@ public boolean advanceExact(int doc) throws IOException { return values.advanceExact(doc); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -591,6 +600,10 @@ public int docValueCount() { return values.docValueCount(); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -622,6 +635,12 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + docID = values.advance(target); + return docID; + } } /** @@ -683,6 +702,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -715,6 +739,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value.longValue(); } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -742,6 +771,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java index b0f3400acfb3d..f69cfacaf35d4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java @@ -71,6 +71,11 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } @@ -95,6 +100,23 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } + + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java index 4ee494ffb30aa..816445bb319f1 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java @@ -69,4 +69,8 @@ public double nextValue() throws IOException { return in.doubleValue(); } + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java index 39aca38c331ea..e2739e462dea5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java @@ -74,4 +74,9 @@ public NumericDoubleValues getDoubleValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } + } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java index 150e114d342de..98a44c246f654 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java @@ -67,4 +67,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java index 1bae845c9b0d2..279a78ac51adf 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java @@ -72,4 +72,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java index dce1aff9cc94f..be9064751b5f0 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java @@ -70,4 +70,15 @@ protected SortedNumericDoubleValues() {} */ public abstract int docValueCount(); + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java index 8d17146760d9e..d9e9dd6a293fd 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java @@ -42,4 +42,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java index 90b49e19a8954..63c7e6162cc55 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java @@ -47,4 +47,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java index b70752df9e826..0019a41e67c02 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java @@ -336,6 +336,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -364,6 +369,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -434,6 +444,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -462,6 +477,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index 831bb8aff3be3..d4eeb8aae8e24 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -331,6 +331,7 @@ private GetResult innerGetLoadFromStoredFields( 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); StoredFieldVisitor.Status status = fieldVisitor.needsField(fieldInfo); diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index 2a677d8bc1352..db8da8a949d6f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -36,7 +36,9 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -222,25 +224,48 @@ protected Object parseSourceValue(Object value) { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); + Query query; if (value instanceof InetAddress) { - return InetAddressPoint.newExactQuery(name(), (InetAddress) value); + query = InetAddressPoint.newExactQuery(name(), (InetAddress) value); } else { if (value instanceof BytesRef) { value = ((BytesRef) value).utf8ToString(); } + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + query = InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } else { + InetAddress address = InetAddresses.forString(term); + query = InetAddressPoint.newExactQuery(name(), address); + } + } + if (isSearchable() && hasDocValues()) { + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ); + } + if (hasDocValues()) { String term = value.toString(); if (term.contains("/")) { final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); } - InetAddress address = InetAddresses.forString(term); - return InetAddressPoint.newExactQuery(name(), address); + return SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())); } + return query; } @Override public Query termsQuery(List<?> values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); InetAddress[] addresses = new InetAddress[values.size()]; int i = 0; for (Object value : values) { @@ -265,14 +290,32 @@ public Query termsQuery(List<?> values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - return rangeQuery( - lowerTerm, - upperTerm, - includeLower, - includeUpper, - (lower, upper) -> InetAddressPoint.newRangeQuery(name(), lower, upper) - ); + failIfNotIndexedAndNoDocValues(); + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (lower, upper) -> { + Query query = InetAddressPoint.newRangeQuery(name(), lower, upper); + if (isSearchable() && hasDocValues()) { + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ); + } + if (hasDocValues()) { + return SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ); + } + return query; + }); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 9b8fa7eec37b9..fc8654216e187 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -237,6 +237,7 @@ public MapperService( ScriptService scriptService ) { super(indexSettings); + this.indexVersionCreated = indexSettings.getIndexVersionCreated(); this.indexAnalyzers = indexAnalyzers; this.documentParser = new DocumentMapperParser( @@ -261,7 +262,12 @@ public MapperService( this.idFieldDataEnabled = idFieldDataEnabled; if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { - throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); + deprecationLogger.deprecate( + index().getName() + INDEX_MAPPER_DYNAMIC_SETTING.getKey(), + "Index [{}] has setting [{}] that is not supported in OpenSearch, its value will be ignored.", + index().getName(), + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + ); } } diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index ac4fde7f06b16..3f97b3918a126 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -322,10 +322,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { try { context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); - innerQuery = this.query.toQuery(context); + try { + innerQuery = this.query.toQuery(context); + } finally { + context.nestedScope().previousLevel(); + } } finally { context.setParentFilter(previousParentFilter); - context.nestedScope().previousLevel(); } // ToParentBlockJoinQuery requires that the inner query only matches documents diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index eba3ed076e82b..f3b392559d33e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -256,7 +256,7 @@ private QueryShardContext( this.bitsetFilterCache = bitsetFilterCache; this.indexFieldDataService = indexFieldDataLookup; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); this.scriptService = scriptService; this.indexSettings = indexSettings; this.searcher = searcher; @@ -270,7 +270,7 @@ private void reset() { allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.lookup = null; this.namedQueries.clear(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); } public IndexAnalyzers getIndexAnalyzers() { @@ -423,7 +423,8 @@ public SearchLookup lookup() { if (this.lookup == null) { this.lookup = new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } return this.lookup; @@ -439,7 +440,8 @@ public SearchLookup newFetchLookup() { */ return new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java index cfc44d4434d3b..1c693f9761240 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java @@ -560,6 +560,11 @@ public boolean needsScores() { protected NumericDoubleValues distance(LeafReaderContext context) { final SortedNumericDoubleValues doubleValues = fieldData.load(context).getDoubleValues(); return FieldData.replaceMissing(mode.select(new SortingNumericDoubleValues() { + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } + @Override public boolean advanceExact(int docId) throws IOException { if (doubleValues.advanceExact(docId)) { diff --git a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java index 51abe389ad686..488768c32d17f 100644 --- a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java +++ b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java @@ -33,6 +33,7 @@ package org.opensearch.index.query.support; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ObjectMapper; import java.util.Deque; @@ -47,6 +48,11 @@ public final class NestedScope { private final Deque<ObjectMapper> levelStack = new LinkedList<>(); + private final IndexSettings indexSettings; + + public NestedScope(IndexSettings indexSettings) { + this.indexSettings = indexSettings; + } /** * @return For the current nested level returns the object mapper that belongs to that @@ -60,7 +66,21 @@ public ObjectMapper getObjectMapper() { */ public ObjectMapper nextLevel(ObjectMapper level) { ObjectMapper previous = levelStack.peek(); - levelStack.push(level); + if (levelStack.size() < indexSettings.getMaxNestedQueryDepth()) { + levelStack.push(level); + } else { + throw new IllegalArgumentException( + "The depth of Nested Query is [" + + (levelStack.size() + 1) + + "] has exceeded " + + "the allowed maximum of [" + + indexSettings.getMaxNestedQueryDepth() + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING.getKey() + + "] index level setting." + ); + } return previous; } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index fe9440813b94f..92436a09a4e7e 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -301,12 +301,17 @@ public Map<String, Long> getLatestLocalFileNameLengthMap() { } /** - * Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map. The method is given a function as an argument which is used for determining the file size (length in bytes). This method is also provided the collection of segment files which are the latest refresh local segment files. This method also removes the stale segment files from the map that are not part of the input segment files. + * Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map. + * The method is given a function as an argument which is used for determining the file size (length in bytes). + * This method is also provided the collection of segment files which are the latest refresh local segment files. + * This method also removes the stale segment files from the map that are not part of the input segment files. * * @param segmentFiles list of local refreshed segment files * @param fileSizeFunction function is used to determine the file size in bytes + * + * @return updated map of local segment files and filesize */ - public void updateLatestLocalFileNameLengthMap( + public Map<String, Long> updateLatestLocalFileNameLengthMap( Collection<String> segmentFiles, CheckedFunction<String, Long, IOException> fileSizeFunction ) { @@ -332,6 +337,7 @@ public void updateLatestLocalFileNameLengthMap( // Remove keys from the fileSizeMap that do not exist in the latest segment files latestLocalFileNameLengthMap.entrySet().removeIf(entry -> fileSet.contains(entry.getKey()) == false); computeBytesLag(); + return Collections.unmodifiableMap(latestLocalFileNameLengthMap); } public void addToLatestUploadedFiles(String file) { diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index b13f1eb04a941..576e00f8f30d1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -38,7 +38,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -466,12 +465,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.QUERY_TIME_IN_MILLIS, Fields.QUERY_TIME, getQueryTime()); builder.field(Fields.QUERY_CURRENT, queryCurrent); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); - builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); - builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); - builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); - } + builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); + builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); + builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); + builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); builder.field(Fields.FETCH_TOTAL, fetchCount); builder.humanReadableField(Fields.FETCH_TIME_IN_MILLIS, Fields.FETCH_TIME, getFetchTime()); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index cbb246219546b..977155a1cbb72 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1697,7 +1697,8 @@ ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) th } final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() - && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()) { + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration() + && latestReplicationCheckpoint.getPrimaryTerm() == getOperationPrimaryTerm()) { return latestReplicationCheckpoint; } final Map<String, StoreFileMetadata> metadataMap = store.getSegmentMetadataMap(segmentInfos); @@ -2014,7 +2015,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO /* ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ - private RemoteSegmentStoreDirectory getRemoteDirectory() { + public RemoteSegmentStoreDirectory getRemoteDirectory() { assert indexSettings.isRemoteStoreEnabled(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); @@ -2024,23 +2025,35 @@ private RemoteSegmentStoreDirectory getRemoteDirectory() { } /** - Returns true iff it is able to verify that remote segment store - is in sync with local + * Returns true iff it is able to verify that remote segment store + * is in sync with local */ boolean isRemoteSegmentStoreInSync() { assert indexSettings.isRemoteStoreEnabled(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { - // verifying that all files except EXCLUDE_FILES are uploaded to the remote Collection<String> uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet(); - SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); - Collection<String> localFiles = segmentInfos.files(true); - if (uploadFiles.containsAll(localFiles)) { - return true; + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = getSegmentInfosSnapshot()) { + Collection<String> localSegmentInfosFiles = segmentInfosGatedCloseable.get().files(true); + Set<String> localFiles = new HashSet<>(localSegmentInfosFiles); + // verifying that all files except EXCLUDE_FILES are uploaded to the remote + localFiles.removeAll(RemoteStoreRefreshListener.EXCLUDE_FILES); + if (uploadFiles.containsAll(localFiles)) { + return true; + } + logger.debug( + () -> new ParameterizedMessage( + "RemoteSegmentStoreSyncStatus localSize={} remoteSize={}", + localFiles.size(), + uploadFiles.size() + ) + ); } } - } catch (IOException e) { + } catch (AlreadyClosedException e) { + throw e; + } catch (Throwable e) { logger.error("Exception while reading latest metadata", e); } return false; diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index d96a7e7c95ecf..7bb80b736693f 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -41,6 +41,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -171,13 +172,33 @@ private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that // we update the primary term and the same condition would not evaluate to true again in syncSegments. // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. - || isRefreshAfterCommitSafe(); + || isRefreshAfterCommitSafe() + || isRemoteSegmentStoreInSync() == false; if (shouldSync || skipPrimaryTermCheck) { return shouldSync; } return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } + /** + * Checks if all files present in local store are uploaded to remote store or part of excluded files. + * + * Different from IndexShard#isRemoteSegmentStoreInSync as + * it uses files uploaded cache in RemoteDirector and it doesn't make a remote store call. + * Doesn't throw an exception on store getting closed as store will be open + * + * + * @return true iff all the local files are uploaded to remote store. + */ + boolean isRemoteSegmentStoreInSync() { + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + return segmentInfosGatedCloseable.get().files(true).stream().allMatch(this::skipUpload); + } catch (Throwable throwable) { + logger.error("Throwable thrown during isRemoteSegmentStoreInSync", throwable); + } + return false; + } + /* @return false if retry is needed */ @@ -209,13 +230,25 @@ private boolean syncSegments() { try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); + if (checkpoint.getPrimaryTerm() != indexShard.getOperationPrimaryTerm()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "primaryTerm mismatch during segments upload to remote store [%s] != [%s]", + checkpoint.getPrimaryTerm(), + indexShard.getOperationPrimaryTerm() + ) + ); + } // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); Collection<String> localSegmentsPostRefresh = segmentInfos.files(true); // Create a map of file name to size and update the refresh segment tracker - updateLocalSizeMapAndTracker(localSegmentsPostRefresh); + Map<String, Long> localSegmentsSizeMap = updateLocalSizeMapAndTracker(localSegmentsPostRefresh).entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); CountDownLatch latch = new CountDownLatch(1); ActionListener<Void> segmentUploadsCompletedListener = new LatchedActionListener<>(new ActionListener<>() { @Override @@ -231,6 +264,7 @@ public void onResponse(Void unused) { refreshClockTimeMs, refreshSeqNo, lastRefreshedCheckpoint, + localSegmentsSizeMap, checkpoint ); // At this point since we have uploaded new segments, segment infos and segment metadata file, @@ -251,7 +285,7 @@ public void onFailure(Exception e) { }, latch); // Start the segments files upload - uploadNewSegments(localSegmentsPostRefresh, segmentUploadsCompletedListener); + uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); latch.await(); } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); @@ -295,10 +329,11 @@ private void onSuccessfulSegmentsSync( long refreshClockTimeMs, long refreshSeqNo, long lastRefreshedCheckpoint, + Map<String, Long> localFileSizeMap, ReplicationCheckpoint checkpoint ) { // Update latest uploaded segment files name in segment tracker - segmentTracker.setLatestUploadedFiles(segmentTracker.getLatestLocalFileNameLengthMap().keySet()); + segmentTracker.setLatestUploadedFiles(localFileSizeMap.keySet()); // Update the remote refresh time and refresh seq no updateRemoteRefreshTimeAndSeqNo(refreshTimeMs, refreshClockTimeMs, refreshSeqNo); // Reset the backoffDelayIterator for the future failures @@ -371,7 +406,11 @@ void uploadMetadata(Collection<String> localSegmentsPostRefresh, SegmentInfos se } } - private void uploadNewSegments(Collection<String> localSegmentsPostRefresh, ActionListener<Void> listener) { + private void uploadNewSegments( + Collection<String> localSegmentsPostRefresh, + Map<String, Long> localSegmentsSizeMap, + ActionListener<Void> listener + ) { Collection<String> filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { logger.debug("No new segments to upload in uploadNewSegments"); @@ -385,7 +424,7 @@ private void uploadNewSegments(Collection<String> localSegmentsPostRefresh, Acti for (String src : filteredFiles) { // Initializing listener here to ensure that the stats increment operations are thread-safe - UploadListener statsListener = createUploadListener(); + UploadListener statsListener = createUploadListener(localSegmentsSizeMap); ActionListener<Void> aggregatedListener = ActionListener.wrap(resp -> { statsListener.onSuccess(src); batchUploadListener.onResponse(resp); @@ -444,9 +483,11 @@ private void updateRemoteRefreshTimeAndSeqNo(long refreshTimeMs, long refreshClo * Updates map of file name to size of the input segment files in the segment tracker. Uses {@code storeDirectory.fileLength(file)} to get the size. * * @param segmentFiles list of segment files that are part of the most recent local refresh. + * + * @return updated map of local segment files and filesize */ - private void updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { - segmentTracker.updateLatestLocalFileNameLengthMap(segmentFiles, storeDirectory::fileLength); + private Map<String, Long> updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { + return segmentTracker.updateLatestLocalFileNameLengthMap(segmentFiles, storeDirectory::fileLength); } private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesBeforeUpload, long startTimeInNS) { @@ -521,22 +562,24 @@ private boolean isLocalOrSnapshotRecovery() { /** * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events + * + * @param fileSizeMap updated map of current snapshot of local segments to their sizes */ - private UploadListener createUploadListener() { + private UploadListener createUploadListener(Map<String, Long> fileSizeMap) { return new UploadListener() { private long uploadStartTime = 0; @Override public void beforeUpload(String file) { // Start tracking the upload bytes started - segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesStarted(fileSizeMap.get(file)); uploadStartTime = System.currentTimeMillis(); } @Override public void onSuccess(String file) { // Track upload success - segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesSucceeded(fileSizeMap.get(file)); segmentTracker.addToLatestUploadedFiles(file); segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); } @@ -544,7 +587,7 @@ public void onSuccess(String file) { @Override public void onFailure(String file) { // Track upload failure - segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesFailed(fileSizeMap.get(file)); segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); } }; diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 5b1940bb1d9a5..3faef2da05320 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -38,11 +38,13 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -191,7 +193,8 @@ void recoverFromLocalShards( // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { throw new IndexShardRecoveryException( indexShard.shardId(), @@ -432,7 +435,8 @@ void recoverFromSnapshotAndRemoteStore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -717,7 +721,8 @@ private void restore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -791,4 +796,31 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO ); store.associateIndexWithNewTranslog(translogUUID); } + + /* + Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout + */ + private void waitForRemoteStoreSync(IndexShard indexShard) { + if (indexShard.shardRouting.primary() == false) { + return; + } + long startNanos = System.nanoTime(); + + while (System.nanoTime() - startNanos < indexShard.getRecoverySettings().internalRemoteUploadTimeout().nanos()) { + try { + if (indexShard.isRemoteSegmentStoreInSync()) { + break; + } else { + try { + Thread.sleep(TimeValue.timeValueMinutes(1).seconds()); + } catch (InterruptedException ie) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); + } + } + } catch (AlreadyClosedException e) { + // There is no point in waiting as shard is now closed . + return; + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 9c1e902606cab..bfab9f8c18aa2 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -45,6 +45,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -164,7 +165,7 @@ public RemoteSegmentMetadata init() throws IOException { */ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration, String acquirerId) throws IOException { String metadataFilePrefix = MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, commitGeneration); - String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLock(metadataFilePrefix, acquirerId); + String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFile(metadataFilePrefix, acquirerId); RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); @@ -745,30 +746,25 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List<String> metadataFilesEligibleToDelete = sortedMetadataFileList.subList( - lastNMetadataFilesToKeep, - sortedMetadataFileList.size() + List<String> metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) ); - List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream().filter(metadataFile -> { - try { - return !isLockAcquired(metadataFile); - } catch (IOException e) { - logger.error( - "skipping metadata file (" - + metadataFile - + ") deletion for this run," - + " as checking lock for metadata is failing with error: " - + e - ); - return false; - } - }).collect(Collectors.toList()); + Set<String> allLockFiles; + try { + allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + } catch (Exception e) { + logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); + return; + } + List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() + .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) + .collect(Collectors.toList()); sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); logger.debug( "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", metadataFilesEligibleToDelete, - metadataFilesEligibleToDelete + metadataFilesToBeDeleted ); Map<String, UploadedSegmentMetadata> activeSegmentFilesMetadataMap = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index 5ebd00f59ef49..9c29e03c225e4 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -21,6 +21,7 @@ import java.util.Collection; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; /** @@ -75,7 +76,7 @@ public void release(LockInfo lockInfo) throws IOException { } } - public String fetchLock(String filenamePrefix, String acquirerId) throws IOException { + public String fetchLockedMetadataFile(String filenamePrefix, String acquirerId) throws IOException { Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); List<String> lockFilesForAcquirer = lockFiles.stream() .filter(lockFile -> acquirerId.equals(FileLockInfo.LockFileUtils.getAcquirerIdFromLock(lockFile))) @@ -88,6 +89,11 @@ public String fetchLock(String filenamePrefix, String acquirerId) throws IOExcep return lockFilesForAcquirer.get(0); } + public Set<String> fetchLockedMetadataFiles(String filenamePrefix) throws IOException { + Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + return lockFiles.stream().map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock).collect(Collectors.toSet()); + } + /** * Checks whether a given file have any lock on it or not. * @param lockInfo File Lock Info instance for which we need to check if lock is acquired. diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java index 7166e9aa482e3..8097fd08da50a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java @@ -15,6 +15,8 @@ import org.opensearch.index.store.remote.utils.TransferManager; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; /** * This is an implementation of {@link OnDemandBlockIndexInput} where this class provides the main IndexInput using shard snapshot files. @@ -136,25 +138,45 @@ protected IndexInput fetchBlock(int blockId) throws IOException { final long blockStart = getBlockStart(blockId); final long blockEnd = blockStart + getActualBlockSize(blockId); - // If the snapshot file is chunked, we must account for this by - // choosing the appropriate file part and updating the position - // accordingly. - final int part = (int) (blockStart / partSize); - final long partStart = part * partSize; - - final long position = blockStart - partStart; - final long length = blockEnd - blockStart; - + // Block may be present on multiple chunks of a file, so we need + // to fetch each chunk/blob part separately to fetch an entire block. BlobFetchRequest blobFetchRequest = BlobFetchRequest.builder() - .position(position) - .length(length) - .blobName(fileInfo.partName(part)) + .blobParts(getBlobParts(blockStart, blockEnd)) .directory(directory) .fileName(blockFileName) .build(); return transferManager.fetchBlob(blobFetchRequest); } + /** + * Returns list of blob parts/chunks in a file for a given block. + */ + protected List<BlobFetchRequest.BlobPart> getBlobParts(long blockStart, long blockEnd) { + // If the snapshot file is chunked, we must account for this by + // choosing the appropriate file part and updating the position + // accordingly. + int partNum = (int) (blockStart / partSize); + long pos = blockStart; + long diff = (blockEnd - blockStart); + + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + while (diff > 0) { + long partStart = pos % partSize; + long partEnd; + if ((partStart + diff) > partSize) { + partEnd = partSize; + } else { + partEnd = (partStart + diff); + } + long fetchBytes = partEnd - partStart; + blobParts.add(new BlobFetchRequest.BlobPart(fileInfo.partName(partNum), partStart, fetchBytes)); + partNum++; + pos = pos + fetchBytes; + diff = (blockEnd - pos); + } + return blobParts; + } + @Override public OnDemandBlockSnapshotIndexInput clone() { OnDemandBlockSnapshotIndexInput clone = buildSlice("clone", 0L, this.length); diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index fb89e651e7616..0261ab24dfa7a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -11,16 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.settings.Settings; +import org.opensearch.common.inject.Provider; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -30,79 +27,90 @@ import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; /** - * IndexEventListener to clean up file cache when the index is deleted. The cached entries will be eligible + * IndexStoreListener to clean up file cache when the index is deleted. The cached entries will be eligible * for eviction when the shard is deleted, but this listener deterministically removes entries from memory and * from disk at the time of shard deletion as opposed to waiting for the cache to need to perform eviction. * * @opensearch.internal */ -public class FileCacheCleaner implements IndexEventListener { - private static final Logger log = LogManager.getLogger(FileCacheCleaner.class); +public class FileCacheCleaner implements NodeEnvironment.IndexStoreListener { + private static final Logger logger = LogManager.getLogger(FileCacheCleaner.class); - private final NodeEnvironment nodeEnvironment; - private final FileCache fileCache; + private final Provider<FileCache> fileCacheProvider; - public FileCacheCleaner(NodeEnvironment nodeEnvironment, FileCache fileCache) { - this.nodeEnvironment = nodeEnvironment; - this.fileCache = fileCache; + public FileCacheCleaner(Provider<FileCache> fileCacheProvider) { + this.fileCacheProvider = fileCacheProvider; } /** - * before shard deleted and after shard closed, cleans up the corresponding index file path entries from FC. - * @param shardId The shard id - * @param settings the shards index settings + * before shard path deleted, cleans up the corresponding index file path entries from FC and delete the corresponding shard file + * cache path. + * + * @param shardId the shard id + * @param indexSettings the index settings + * @param nodeEnvironment the node environment */ @Override - public void beforeIndexShardDeleted(ShardId shardId, Settings settings) { + public void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { + final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); + cleanupShardFileCache(shardPath); + deleteShardFileCacheDirectory(shardPath); + } + } + + /** + * Cleans up the corresponding index file path entries from FileCache + * + * @param shardPath the shard path + */ + private void cleanupShardFileCache(ShardPath shardPath) { try { - if (isRemoteSnapshot(settings)) { - final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); - final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); - try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { - for (Path subPath : ds) { - fileCache.remove(subPath.toRealPath()); - } + final FileCache fc = fileCacheProvider.get(); + assert fc != null; + final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); + try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { + for (Path subPath : ds) { + fc.remove(subPath.toRealPath()); } } } catch (IOException ioe) { - log.error(() -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardId), ioe); + logger.error( + () -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardPath.getShardId()), + ioe + ); } } - @Override - public void afterIndexShardDeleted(ShardId shardId, Settings settings) { - if (isRemoteSnapshot(settings)) { - final Path path = ShardPath.loadFileCachePath(nodeEnvironment, shardId).getDataPath(); - try { - if (Files.exists(path)) { - IOUtils.rm(path); - } - } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardId), e); + private void deleteShardFileCacheDirectory(ShardPath shardPath) { + final Path path = shardPath.getDataPath(); + try { + if (Files.exists(path)) { + IOUtils.rm(path); } + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardPath.getShardId()), e); } } + /** + * before index path deleted, delete the corresponding index file cache path. + * + * @param index the index + * @param indexSettings the index settings + * @param nodeEnvironment the node environment + */ @Override - public void afterIndexRemoved( - Index index, - IndexSettings indexSettings, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason - ) { - if (isRemoteSnapshot(indexSettings.getSettings()) - && reason == IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED) { + public void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { final Path indexCachePath = nodeEnvironment.fileCacheNodePath().fileCachePath.resolve(index.getUUID()); if (Files.exists(indexCachePath)) { try { IOUtils.rm(indexCachePath); } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); } } } } - - private static boolean isRemoteSnapshot(Settings settings) { - return IndexModule.Type.REMOTE_SNAPSHOT.match(settings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())); - } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java index d0508e9c6f4c7..f7e6545b5010e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java @@ -12,6 +12,7 @@ import org.apache.lucene.store.FSDirectory; import java.nio.file.Path; +import java.util.List; /** * The specification to fetch specific block from blob store @@ -20,37 +21,22 @@ */ public class BlobFetchRequest { - private final long position; - - private final long length; - - private final String blobName; - private final Path filePath; private final Directory directory; private final String fileName; + private final List<BlobPart> blobParts; + + private final long blobLength; + private BlobFetchRequest(Builder builder) { - this.position = builder.position; - this.length = builder.length; - this.blobName = builder.blobName; this.fileName = builder.fileName; this.filePath = builder.directory.getDirectory().resolve(fileName); this.directory = builder.directory; - } - - public long getPosition() { - return position; - } - - public long getLength() { - return length; - } - - public String getBlobName() { - return blobName; + this.blobParts = builder.blobParts; + this.blobLength = builder.blobParts.stream().mapToLong(o -> o.getLength()).sum(); } public Path getFilePath() { @@ -65,6 +51,14 @@ public String getFileName() { return fileName; } + public List<BlobPart> blobParts() { + return blobParts; + } + + public long getBlobLength() { + return blobLength; + } + public static Builder builder() { return new Builder(); } @@ -72,12 +66,8 @@ public static Builder builder() { @Override public String toString() { return "BlobFetchRequest{" - + "position=" - + position - + ", length=" - + length - + ", blobName='" - + blobName + + "blobParts=" + + blobParts + '\'' + ", filePath=" + filePath @@ -90,35 +80,45 @@ public String toString() { } /** - * Builder for BlobFetchRequest + * BlobPart represents a single chunk of a file */ - public static final class Builder { + public static class BlobPart { + private String blobName; private long position; private long length; - private String blobName; - private FSDirectory directory; - private String fileName; - - private Builder() {} - public Builder position(long position) { - this.position = position; - return this; - } - - public Builder length(long length) { + public BlobPart(String blobName, long position, long length) { + this.blobName = blobName; if (length <= 0) { - throw new IllegalArgumentException("Length for blob fetch request needs to be non-negative"); + throw new IllegalArgumentException("Length for blob part fetch request needs to be non-negative"); } this.length = length; - return this; + this.position = position; } - public Builder blobName(String blobName) { - this.blobName = blobName; - return this; + public String getBlobName() { + return blobName; + } + + public long getPosition() { + return position; } + public long getLength() { + return length; + } + } + + /** + * Builder for BlobFetchRequest + */ + public static final class Builder { + private List<BlobPart> blobParts; + private FSDirectory directory; + private String fileName; + + private Builder() {} + public Builder directory(FSDirectory directory) { this.directory = directory; return this; @@ -129,6 +129,11 @@ public Builder fileName(String fileName) { return this; } + public Builder blobParts(List<BlobPart> blobParts) { + this.blobParts = blobParts; + return this; + } + public BlobFetchRequest build() { return new BlobFetchRequest(this); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index c9469283ee921..98cad7bfadb09 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -48,11 +48,12 @@ public TransferManager(final BlobContainer blobContainer, final FileCache fileCa } /** - * Given a blobFetchRequest, return it's corresponding IndexInput. + * Given a blobFetchRequestList, return it's corresponding IndexInput. * @param blobFetchRequest to fetch * @return future of IndexInput augmented with internal caching maintenance tasks */ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOException { + final Path key = blobFetchRequest.getFilePath(); final CachedIndexInput cacheEntry = fileCache.compute(key, (path, cachedIndexInput) -> { @@ -75,6 +76,7 @@ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOExceptio } } + @SuppressWarnings("removal") private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobContainer blobContainer, BlobFetchRequest request) { // We need to do a privileged action here in order to fetch from remote // and write to the local file cache in case this is invoked as a side @@ -84,15 +86,20 @@ private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobCo try { if (Files.exists(request.getFilePath()) == false) { try ( - InputStream snapshotFileInputStream = blobContainer.readBlob( - request.getBlobName(), - request.getPosition(), - request.getLength() - ); OutputStream fileOutputStream = Files.newOutputStream(request.getFilePath()); OutputStream localFileOutputStream = new BufferedOutputStream(fileOutputStream) ) { - snapshotFileInputStream.transferTo(localFileOutputStream); + for (BlobFetchRequest.BlobPart blobPart : request.blobParts()) { + try ( + InputStream snapshotFileInputStream = blobContainer.readBlob( + blobPart.getBlobName(), + blobPart.getPosition(), + blobPart.getLength() + ); + ) { + snapshotFileInputStream.transferTo(localFileOutputStream); + } + } } } final IndexInput luceneIndexInput = request.getDirectory().openInput(request.getFileName(), IOContext.READ); @@ -152,7 +159,7 @@ public IndexInput getIndexInput() throws IOException { @Override public long length() { - return request.getLength(); + return request.getBlobLength(); } @Override diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index db5b93f073b03..630ce068ba782 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,6 +62,7 @@ import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.CachePolicyInfoWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; @@ -135,7 +136,6 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; @@ -362,7 +362,6 @@ public class IndicesService extends AbstractLifecycleComponent private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final FileCacheCleaner fileCacheCleaner; private final SearchRequestStats searchRequestStats; @@ -395,7 +394,6 @@ public IndicesService( Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier<RepositoriesService> repositoriesServiceSupplier, - FileCacheCleaner fileCacheCleaner, SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, RecoverySettings recoverySettings @@ -450,7 +448,6 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; - this.fileCacheCleaner = fileCacheCleaner; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -766,7 +763,6 @@ public void onStoreClosed(ShardId shardId) { }; finalListeners.add(onStoreClose); finalListeners.add(oldShardsStats); - finalListeners.add(fileCacheCleaner); final IndexService indexService = createIndexService( CREATE_INDEX, indexMetadata, @@ -1702,7 +1698,12 @@ public void loadIntoContext(ShardSearchRequest request, SearchContext context, Q boolean[] loadedFromCache = new boolean[] { true }; BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> { + long beforeQueryPhase = System.nanoTime(); queryPhase.execute(context); + CachePolicyInfoWrapper policyInfo = new CachePolicyInfoWrapper(System.nanoTime() - beforeQueryPhase); + policyInfo.writeTo(out); + // Write relevant info for cache tier policies before the whole QuerySearchResult, so we don't have to read + // the whole QSR into memory when we decide whether to allow it into a particular cache tier based on took time/other info context.queryResult().writeToNoId(out); loadedFromCache[0] = false; }); @@ -1711,6 +1712,7 @@ public void loadIntoContext(ShardSearchRequest request, SearchContext context, Q // restore the cached query result into the context final QuerySearchResult result = context.queryResult(); StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry); + CachePolicyInfoWrapper policyInfo = new CachePolicyInfoWrapper(in); // This wrapper is not needed outside the cache result.readFromWithId(context.id(), in); result.setSearchShardTarget(context.shardTarget()); } else if (context.queryResult().searchTimedOut()) { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 5351ae7fe08dd..2b41eb125d808 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -46,6 +46,8 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; +import java.util.concurrent.TimeUnit; + /** * Settings for the recovery mechanism * @@ -176,6 +178,13 @@ public class RecoverySettings { Property.Dynamic ); + public static final Setting<TimeValue> INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( + "indices.recovery.internal_remote_upload_timeout", + new TimeValue(1, TimeUnit.HOURS), + Property.Dynamic, + Property.NodeScope + ); + // choose 512KB-16B to ensure that the resulting byte[] is not a humongous allocation in G1. public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512 * 1024 - 16, ByteSizeUnit.BYTES); @@ -193,6 +202,7 @@ public class RecoverySettings { private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + private volatile TimeValue internalRemoteUploadTimeout; public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); @@ -216,6 +226,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { } logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec); + this.internalRemoteUploadTimeout = INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); @@ -237,6 +248,8 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, this::setMinRemoteSegmentMetadataFiles ); + clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); + } public RateLimiter rateLimiter() { @@ -267,6 +280,10 @@ public TimeValue internalActionLongTimeout() { return internalActionLongTimeout; } + public TimeValue internalRemoteUploadTimeout() { + return internalRemoteUploadTimeout; + } + public ByteSizeValue getChunkSize() { return chunkSize; } @@ -298,6 +315,10 @@ public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { this.internalActionLongTimeout = internalActionLongTimeout; } + public void setInternalRemoteUploadTimeout(TimeValue internalRemoteUploadTimeout) { + this.internalRemoteUploadTimeout = internalRemoteUploadTimeout; + } + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { this.maxBytesPerSec = maxBytesPerSec; if (maxBytesPerSec.getBytes() <= 0) { diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 02fc8feefd698..a17779810239a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -8,16 +8,14 @@ package org.opensearch.indices.replication; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; @@ -35,9 +33,7 @@ */ public class PrimaryShardReplicationSource implements SegmentReplicationSource { - private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); - - private final RetryableTransportClient transportClient; + private final TransportService transportService; private final DiscoveryNode sourceNode; private final DiscoveryNode targetNode; @@ -52,12 +48,7 @@ public PrimaryShardReplicationSource( DiscoveryNode sourceNode ) { this.targetAllocationId = targetAllocationId; - this.transportClient = new RetryableTransportClient( - transportService, - sourceNode, - recoverySettings.internalActionRetryTimeout(), - logger - ); + this.transportService = transportService; this.sourceNode = sourceNode; this.targetNode = targetNode; this.recoverySettings = recoverySettings; @@ -69,10 +60,14 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener<CheckpointInfoResponse> listener ) { - final Writeable.Reader<CheckpointInfoResponse> reader = CheckpointInfoResponse::new; - final ActionListener<CheckpointInfoResponse> responseListener = ActionListener.map(listener, r -> r); final CheckpointInfoRequest request = new CheckpointInfoRequest(replicationId, targetAllocationId, targetNode, checkpoint); - transportClient.executeRetryableAction(GET_CHECKPOINT_INFO, request, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_CHECKPOINT_INFO, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionRetryTimeout()).build(), + new ActionListenerResponseHandler<>(listener, CheckpointInfoResponse::new, ThreadPool.Names.GENERIC) + ); } @Override @@ -88,8 +83,6 @@ public void getSegmentFiles( // MultiFileWriter takes care of progress tracking for downloads in this scenario // TODO: Move state management and tracking into replication methods and use chunking and data // copy mechanisms only from MultiFileWriter - final Writeable.Reader<GetSegmentFilesResponse> reader = GetSegmentFilesResponse::new; - final ActionListener<GetSegmentFilesResponse> responseListener = ActionListener.map(listener, r -> r); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -97,20 +90,17 @@ public void getSegmentFiles( filesToFetch, checkpoint ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); - transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_SEGMENT_FILES, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), + new ActionListenerResponseHandler<>(listener, GetSegmentFilesResponse::new, ThreadPool.Names.GENERIC) + ); } @Override public String getDescription() { return sourceNode.getName(); } - - @Override - public void cancel() { - transportClient.cancel(); - } - } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index cc71ef816e525..af764556b7549 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -83,6 +83,16 @@ protected void closeInternal() { } } + @Override + protected void onCancel(String reason) { + try { + notifyListener(new ReplicationFailedException(reason), false); + } finally { + source.cancel(); + cancellableThreads.cancel(reason); + } + } + @Override protected String getPrefix() { return REPLICATION_PREFIX + UUIDs.randomBase64UUID() + "."; @@ -320,16 +330,4 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) } } } - - /** - * Trigger a cancellation, this method will not close the target a subsequent call to #fail is required from target service. - */ - @Override - public void cancel(String reason) { - if (finished.get() == false) { - logger.trace(new ParameterizedMessage("Cancelling replication for target {}", description())); - cancellableThreads.cancel(reason); - source.cancel(); - } - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index d6db154a4e0e3..f28f829545d59 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -84,10 +84,6 @@ public class SegmentReplicationTargetService extends AbstractLifecycleComponent private final ClusterService clusterService; private final TransportService transportService; - public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { - return onGoingReplications.get(replicationId); - } - /** * The internal actions * @@ -158,6 +154,7 @@ protected void doStart() { @Override protected void doStop() { if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + assert onGoingReplications.size() == 0 : "Replication collection should be empty on shutdown"; clusterService.removeListener(this); } } @@ -201,7 +198,7 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard closing"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard closing"); latestReceivedCheckpoint.remove(shardId); } } @@ -223,7 +220,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { if (oldRouting != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard has been promoted to primary"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard has been promoted to primary"); latestReceivedCheckpoint.remove(indexShard.shardId()); } } @@ -255,6 +252,14 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { .orElseGet(() -> getlatestCompletedEventSegmentReplicationState(shardId)); } + public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { + return onGoingReplications.get(replicationId); + } + + public SegmentReplicationTarget get(ShardId shardId) { + return onGoingReplications.getOngoingReplicationTarget(shardId); + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. @@ -454,7 +459,13 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa latestPublishedCheckpoint ) ); - Runnable runnable = () -> onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + Runnable runnable = () -> { + // if we retry ensure the shard is not in the process of being closed. + // it will be removed from indexService's collection before the shard is actually marked as closed. + if (indicesService.getShardOrNull(replicaShard.shardId()) != null) { + onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + } + }; // Checks if we are using same thread and forks if necessary. if (thread == Thread.currentThread()) { threadPool.generic().execute(runnable); @@ -548,9 +559,6 @@ public ReplicationRunner(long replicationId) { @Override public void onFailure(Exception e) { - try (final ReplicationRef<SegmentReplicationTarget> ref = onGoingReplications.get(replicationId)) { - logger.error(() -> new ParameterizedMessage("Error during segment replication, {}", ref.get().description()), e); - } onGoingReplications.fail(replicationId, new ReplicationFailedException("Unexpected Error during replication", e), false); } diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index 5185b740d90cb..a2c2137130587 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -387,6 +387,7 @@ private static <T> Map<String, T> readMap(String processorType, String processor /** * Returns and removes the specified property as an {@link Object} from the specified configuration map. + * If the property is missing an {@link OpenSearchParseException} is thrown */ public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -396,6 +397,13 @@ public static Object readObject(String processorType, String processorTag, Map<S return value; } + /** + * Returns and removes the specified property as an {@link Object} from the specified configuration map. + */ + public static Object readOptionalObject(Map<String, Object> configuration, String propertyName) { + return configuration.remove(propertyName); + } + public static OpenSearchException newConfigurationException( String processorType, String processorTag, diff --git a/server/src/main/java/org/opensearch/ingest/IngestDocument.java b/server/src/main/java/org/opensearch/ingest/IngestDocument.java index 10e9e64db561e..d975b0014de1f 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/opensearch/ingest/IngestDocument.java @@ -757,7 +757,7 @@ public static <K, V> Map<K, V> deepCopyMap(Map<K, V> source) { return (Map<K, V>) deepCopy(source); } - private static Object deepCopy(Object value) { + public static Object deepCopy(Object value) { if (value instanceof Map) { Map<?, ?> mapValue = (Map<?, ?>) value; Map<Object, Object> copy = new HashMap<>(mapValue.size()); diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index dc27ab0fb91c2..e3290bfec6905 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -216,6 +216,7 @@ private static boolean usingBundledJdkOrJre() { } } + @SuppressWarnings("removal") public static JvmInfo jvmInfo() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8510122c39fcb..1b8e4de3abe47 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -44,6 +44,7 @@ import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; +import org.opensearch.action.admin.indices.view.ViewService; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; @@ -83,6 +84,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.StopWatch; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.inject.Injector; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Module; @@ -178,6 +181,7 @@ import org.opensearch.persistent.PersistentTasksService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.AnalysisPlugin; +import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.ClusterPlugin; import org.opensearch.plugins.CryptoKeyProviderPlugin; @@ -527,7 +531,11 @@ protected Node( */ this.environment = new Environment(settings, initialEnvironment.configDir(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); Environment.assertEquivalent(initialEnvironment, this.environment); - nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + if (DiscoveryNode.isSearchNode(settings) == false) { + nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + } else { + nodeEnvironment = new NodeEnvironment(settings, environment, new FileCacheCleaner(this::fileCache)); + } logger.info( "node name [{}], node ID [{}], cluster name [{}], roles {}", NODE_NAME_SETTING.get(tmpSettings), @@ -678,7 +686,6 @@ protected Node( ); // File cache will be initialized by the node once circuit breakers are in place. initializeFileCache(settings, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST)); - final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnvironment, fileCache); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, fileCache); pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { @@ -789,6 +796,8 @@ protected Node( final SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); + CacheModule cacheModule = new CacheModule(pluginsService.filterPlugins(CachePlugin.class), settings); + CacheService cacheService = cacheModule.getCacheService(); final IndicesService indicesService = new IndicesService( settings, pluginsService, @@ -812,7 +821,6 @@ protected Node( recoveryStateFactories, remoteDirectoryFactory, repositoriesServiceReference::get, - fileCacheCleaner, searchRequestStats, remoteStoreStatsTrackerFactory, recoverySettings @@ -862,6 +870,8 @@ protected Node( metadataCreateIndexService ); + final ViewService viewService = new ViewService(clusterService, client, null); + Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class) .stream() .flatMap( @@ -1230,6 +1240,7 @@ protected Node( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(AwarenessReplicaBalance.class).toInstance(awarenessReplicaBalance); b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); + b.bind(ViewService.class).toInstance(viewService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class) diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index ca2413a057a6b..33b182dd3cc97 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -15,18 +15,22 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Setting; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Supplier; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; + /** * Contains all the method needed for a remote store backed node lifecycle. */ @@ -39,6 +43,33 @@ public class RemoteStoreNodeService { "remote_store.compatibility_mode", CompatibilityMode.STRICT.name(), CompatibilityMode::parseString, + value -> { + if (value == CompatibilityMode.MIXED + && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " mixed mode is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Direction> MIGRATION_DIRECTION_SETTING = new Setting<>( + "migration.direction", + Direction.NONE.name(), + Direction::parseString, + value -> { + if (value != Direction.NONE && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " migration.direction is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -49,7 +80,8 @@ public class RemoteStoreNodeService { * @opensearch.internal */ public enum CompatibilityMode { - STRICT("strict"); + STRICT("strict"), + MIXED("mixed"); public final String mode; @@ -66,13 +98,38 @@ public static CompatibilityMode parseString(String compatibilityMode) { + compatibilityMode + "] compatibility mode is not supported. " + "supported modes are [" - + CompatibilityMode.values().toString() + + Arrays.toString(CompatibilityMode.values()) + "]" ); } } } + /** + * Migration Direction intended for docrep to remote store migration and vice versa + * + * @opensearch.internal + */ + public enum Direction { + REMOTE_STORE("remote_store"), + NONE("none"), + DOCREP("docrep"); + + public final String direction; + + Direction(String d) { + this.direction = d; + } + + public static Direction parseString(String direction) { + try { + return Direction.valueOf(direction.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("[" + direction + "] migration.direction is not supported."); + } + } + } + public RemoteStoreNodeService(Supplier<RepositoriesService> repositoriesService, ThreadPool threadPool) { this.repositoriesService = repositoriesService; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/plugins/CachePlugin.java b/server/src/main/java/org/opensearch/plugins/CachePlugin.java new file mode 100644 index 0000000000000..d962ed1db14bf --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CachePlugin.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; + +import java.util.Map; + +/** + * Plugin to extend cache related classes + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CachePlugin { + + /** + * Returns a map of cacheStoreType and a factory via which objects can be created on demand. + * For example: + * If there are two implementations of this plugin, lets say A and B, each may return below which can be + * aggregated by fetching all plugins. + * + * A: Map.of(DISK, new ADiskCache.Factor(), + * ON_HEAP, new AOnHeapCache.Factor()) + * + * B: Map.of(ON_HEAP, new ADiskCache.Factor()) + * + * @return Map of cacheStoreType and an associated factory. + */ + Map<String, ICache.Factory> getCacheFactoryMap(); + + String getName(); +} diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index dc8fd6e604d72..79e57b3e8a0e8 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -32,20 +32,28 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.json.JsonReadFeature; + import org.opensearch.Version; import org.opensearch.bootstrap.JarHell; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.json.JsonXContentParser; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.SemverRange; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -65,11 +73,15 @@ public class PluginInfo implements Writeable, ToXContentObject { public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String OPENSEARCH_PLUGIN_POLICY = "plugin-security.policy"; + private static final JsonFactory jsonFactory = new JsonFactory().configure( + JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES.mappedFeature(), + true + ); private final String name; private final String description; private final String version; - private final Version opensearchVersion; + private final List<SemverRange> opensearchVersionRanges; private final String javaVersion; private final String classname; private final String customFolderName; @@ -99,11 +111,41 @@ public PluginInfo( String customFolderName, List<String> extendedPlugins, boolean hasNativeController + ) { + this( + name, + description, + version, + List.of(SemverRange.fromString(opensearchVersion.toString())), + javaVersion, + classname, + customFolderName, + extendedPlugins, + hasNativeController + ); + } + + public PluginInfo( + String name, + String description, + String version, + List<SemverRange> opensearchVersionRanges, + String javaVersion, + String classname, + String customFolderName, + List<String> extendedPlugins, + boolean hasNativeController ) { this.name = name; this.description = description; this.version = version; - this.opensearchVersion = opensearchVersion; + // Ensure only one range is specified (for now) + if (opensearchVersionRanges.size() != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [" + name + "]" + ); + } + this.opensearchVersionRanges = opensearchVersionRanges; this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; @@ -152,11 +194,16 @@ public PluginInfo( * @param in the stream * @throws IOException if an I/O exception occurred reading the plugin info from the stream */ + @SuppressWarnings("unchecked") public PluginInfo(final StreamInput in) throws IOException { this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); - this.opensearchVersion = in.readVersion(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + this.opensearchVersionRanges = (List<SemverRange>) in.readGenericValue(); + } else { + this.opensearchVersionRanges = List.of(new SemverRange(in.readVersion(), SemverRange.RangeOperator.DEFAULT)); + } this.javaVersion = in.readString(); this.classname = in.readString(); this.customFolderName = in.readString(); @@ -169,7 +216,15 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(description); out.writeString(version); - out.writeVersion(opensearchVersion); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeGenericValue(opensearchVersionRanges); + } else { + /* + This works for currently supported range notations (=,~) + As more notations get added, then a suitable version must be picked. + */ + out.writeVersion(opensearchVersionRanges.get(0).getRangeVersion()); + } out.writeString(javaVersion); out.writeString(classname); if (customFolderName != null) { @@ -214,10 +269,49 @@ public static PluginInfo readFromProperties(final Path path) throws IOException } final String opensearchVersionString = propsMap.remove("opensearch.version"); - if (opensearchVersionString == null) { - throw new IllegalArgumentException("property [opensearch.version] is missing for plugin [" + name + "]"); + final String dependenciesValue = propsMap.remove("dependencies"); + if (opensearchVersionString == null && dependenciesValue == null) { + throw new IllegalArgumentException( + "Either [opensearch.version] or [dependencies] property must be specified for the plugin [" + name + "]" + ); + } + if (opensearchVersionString != null && dependenciesValue != null) { + throw new IllegalArgumentException( + "Only one of [opensearch.version] or [dependencies] property can be specified for the plugin [" + name + "]" + ); + } + + final List<SemverRange> opensearchVersionRanges = new ArrayList<>(); + if (opensearchVersionString != null) { + opensearchVersionRanges.add(SemverRange.fromString(opensearchVersionString)); + } else { + Map<String, String> dependenciesMap; + try ( + final JsonXContentParser parser = new JsonXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + jsonFactory.createParser(dependenciesValue) + ) + ) { + dependenciesMap = parser.mapStrings(); + } + if (dependenciesMap.size() != 1) { + throw new IllegalArgumentException( + "Exactly one dependency is allowed to be specified in plugin descriptor properties: " + dependenciesMap + ); + } + if (dependenciesMap.keySet().stream().noneMatch(s -> s.equals("opensearch"))) { + throw new IllegalArgumentException("Only opensearch is allowed to be specified as a plugin dependency: " + dependenciesMap); + } + String[] ranges = dependenciesMap.get("opensearch").split(","); + if (ranges.length != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [\" + name + \"]" + ); + } + opensearchVersionRanges.add(SemverRange.fromString(ranges[0].trim())); } - final Version opensearchVersion = Version.fromString(opensearchVersionString); + final String javaVersionString = propsMap.remove("java.version"); if (javaVersionString == null) { throw new IllegalArgumentException("property [java.version] is missing for plugin [" + name + "]"); @@ -273,7 +367,7 @@ public static PluginInfo readFromProperties(final Path path) throws IOException name, description, version, - opensearchVersion, + opensearchVersionRanges, javaVersionString, classname, customFolderName, @@ -337,12 +431,26 @@ public String getVersion() { } /** - * The version of OpenSearch the plugin was built for. + * The list of OpenSearch version ranges the plugin is compatible with. * - * @return an OpenSearch version + * @return a list of OpenSearch version ranges */ - public Version getOpenSearchVersion() { - return opensearchVersion; + public List<SemverRange> getOpenSearchVersionRanges() { + return opensearchVersionRanges; + } + + /** + * Pretty print the semver ranges and return the string. + * @return semver ranges string + */ + public String getOpenSearchVersionRangesString() { + if (opensearchVersionRanges == null || opensearchVersionRanges.isEmpty()) { + return ""; + } + if (opensearchVersionRanges.size() == 1) { + return opensearchVersionRanges.get(0).toString(); + } + return opensearchVersionRanges.stream().map(Object::toString).collect(Collectors.joining(",", "[", "]")); } /** @@ -378,7 +486,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field("name", name); builder.field("version", version); - builder.field("opensearch_version", opensearchVersion); + builder.field("opensearch_version", opensearchVersionRanges); builder.field("java_version", javaVersion); builder.field("description", description); builder.field("classname", classname); @@ -432,7 +540,7 @@ public String toString(String prefix) { .append("\n") .append(prefix) .append("OpenSearch Version: ") - .append(opensearchVersion) + .append(getOpenSearchVersionRangesString()) .append("\n") .append(prefix) .append("Java Version: ") diff --git a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java index e7d92016d4082..1bf8642d1112f 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java +++ b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java @@ -135,6 +135,7 @@ static String formatPermission(Permission permission) { /** * Parses plugin policy into a set of permissions. Each permission is formatted for output to users. */ + @SuppressWarnings("removal") public static Set<String> parsePermissions(Path file, Path tmpDir) throws IOException { // create a zero byte file for "comparison" // this is necessary because the default policy impl automatically grants two permissions: diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index cc9cc5b5b5fbf..a6eefd2f4fd17 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -52,6 +52,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.service.ReportingService; import org.opensearch.index.IndexModule; +import org.opensearch.semver.SemverRange; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.transport.TransportSettings; @@ -387,12 +388,12 @@ public static List<Path> findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current OpenSearch installation. */ static void verifyCompatibility(PluginInfo info) { - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!isPluginVersionCompatible(info, Version.CURRENT)) { throw new IllegalArgumentException( "Plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getOpenSearchVersion() + + info.getOpenSearchVersionRangesString() + " but version " + Version.CURRENT + " is running" @@ -401,6 +402,16 @@ static void verifyCompatibility(PluginInfo info) { JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + public static boolean isPluginVersionCompatible(final PluginInfo pluginInfo, final Version coreVersion) { + // Core version must satisfy the semver range in plugin info + for (SemverRange range : pluginInfo.getOpenSearchVersionRanges()) { + if (!range.isSatisfiedBy(coreVersion)) { + return false; + } + } + return true; + } + static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { /* * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the @@ -682,6 +693,7 @@ static void checkBundleJarHell(Set<URL> classpath, Bundle bundle, Map<String, Se } } + @SuppressWarnings("removal") private Plugin loadBundle(Bundle bundle, Map<String, Plugin> loaded) { String name = bundle.plugin.getName(); diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java index cc4d3c006d84c..afb6e530b0eec 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java @@ -39,6 +39,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -71,6 +72,11 @@ public RepositoriesModule( metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) ); + factories.put( + ReloadableFsRepository.TYPE, + metadata -> new ReloadableFsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + for (RepositoryPlugin repoPlugin : repoPlugins) { Map<String, Repository.Factory> newRepoTypes = repoPlugin.getRepositories( env, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 8a2260e1f6d90..18f4ab70024f4 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -1116,10 +1116,7 @@ private void executeStaleShardDelete( String indexId = fileToDeletePath[1]; String shardId = fileToDeletePath[2]; String shallowSnapBlob = fileToDeletePath[3]; - String snapshotUUID = shallowSnapBlob.substring( - SHALLOW_SNAPSHOT_PREFIX.length(), - shallowSnapBlob.length() - ".dat".length() - ); + String snapshotUUID = extractShallowSnapshotUUID(shallowSnapBlob).orElseThrow(); BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( @@ -1586,44 +1583,43 @@ private void executeOneStaleIndexDelete( try { logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); if (remoteStoreLockManagerFactory != null) { - Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); - if (!shardBlobs.isEmpty()) { - for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { - Map<String, BlobMetadata> shardLevelBlobs = shardBlob.getValue().listBlobs(); - for (Map.Entry<String, BlobMetadata> shardLevelBlob : shardLevelBlobs.entrySet()) { - String blob = shardLevelBlob.getKey(); - String snapshotUUID = blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()); - if (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) && blob.endsWith(".dat")) { - RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = - REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( - shardBlob.getValue(), - snapshotUUID, - namedXContentRegistry - ); - String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); - String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); - // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure - // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file - // and that would be used during next delete operation for releasing this stale lock file - RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()); - remoteStoreMetadataLockManager.release( - FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() + final Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); + for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { + for (String blob : shardBlob.getValue().listBlobs().keySet()) { + final Optional<String> snapshotUUID = extractShallowSnapshotUUID(blob); + if (snapshotUUID.isPresent()) { + RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = + REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( + shardBlob.getValue(), + snapshotUUID.get(), + namedXContentRegistry ); - if (!isIndexPresent(clusterService, indexUUID)) { - // this is a temporary solution where snapshot deletion triggers remote store side - // cleanup if index is already deleted. We will add a poller in future to take - // care of remote store side cleanup. - // see https://github.com/opensearch-project/OpenSearch/issues/8469 - new RemoteSegmentStoreDirectoryFactory( - remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool - ).newDirectory( - remoteStoreRepoForIndex, - indexUUID, - new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardBlob.getKey())) - ).close(); - } + String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); + String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); + // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure + // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file + // and that would be used during next delete operation for releasing this stale lock file + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepoForIndex, + indexUUID, + shardBlob.getKey() + ); + remoteStoreMetadataLockManager.release( + FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID.get()).build() + ); + if (!isIndexPresent(clusterService, indexUUID)) { + // this is a temporary solution where snapshot deletion triggers remote store side + // cleanup if index is already deleted. We will add a poller in future to take + // care of remote store side cleanup. + // see https://github.com/opensearch-project/OpenSearch/issues/8469 + new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardBlob.getKey())) + ).close(); } } } @@ -3362,12 +3358,7 @@ private static List<String> unusedBlobs( blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) ) == false) || (remoteStoreLockManagerFactory != null - ? (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) - && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) - ) == false) - : false) + && extractShallowSnapshotUUID(blob).map(survivingSnapshotUUIDs::contains).orElse(false)) || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) || FsBlobContainer.isTempBlobName(blob) ) @@ -3509,6 +3500,13 @@ private static void failStoreIfCorrupted(Store store, Exception e) { } } + private static Optional<String> extractShallowSnapshotUUID(String blobName) { + if (blobName.startsWith(SHALLOW_SNAPSHOT_PREFIX)) { + return Optional.of(blobName.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blobName.length() - ".dat".length())); + } + return Optional.empty(); + } + /** * The result of removing a snapshot from a shard folder in the repository. */ diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java index c06c805a39396..e8020a432a58a 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java @@ -8,18 +8,52 @@ package org.opensearch.repositories.fs; +import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Random; + /** - * Extension of {@link FsRepository} that can be reloaded inplace + * Extension of {@link FsRepository} that can be reloaded inplace , supports failing operation and slowing it down * * @opensearch.internal */ public class ReloadableFsRepository extends FsRepository { + public static final String TYPE = "reloadable-fs"; + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public static final Setting<Integer> REPOSITORIES_FAILRATE_SETTING = Setting.intSetting( + "repositories.fail.rate", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + + public static final Setting<Integer> REPOSITORIES_SLOWDOWN_SETTING = Setting.intSetting( + "repositories.slowdown", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + /** * Constructs a shared file system repository that is reloadable in-place. */ @@ -31,6 +65,11 @@ public ReloadableFsRepository( RecoverySettings recoverySettings ) { super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + fail = new FailSwitch(); + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown = new SlowDownWriteSwitch(); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + readRepositoryMetadata(); } @Override @@ -40,12 +79,124 @@ public boolean isReloadable() { @Override public void reload(RepositoryMetadata repositoryMetadata) { - if (isReloadable() == false) { - return; - } - super.reload(repositoryMetadata); + readRepositoryMetadata(); validateLocation(); readMetadata(); } + + private void readRepositoryMetadata() { + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + } + + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail, slowDown); + } + + // A random integer from min-max (inclusive). + public static int randomIntBetween(int min, int max) { + Random random = Randomness.get(); + return random.nextInt(max - min + 1) + min; + } + + static class FailSwitch { + private volatile int failRate; + private volatile boolean onceFailedFailAlways = false; + + public boolean fail() { + final int rnd = randomIntBetween(1, 100); + boolean fail = rnd <= failRate; + if (fail && onceFailedFailAlways) { + failAlways(); + } + return fail; + } + + public void failAlways() { + failRate = 100; + } + + public void failRate(int rate) { + failRate = rate; + } + + public void onceFailedFailAlways() { + onceFailedFailAlways = true; + } + } + + static class SlowDownWriteSwitch { + private volatile int sleepSeconds; + + public void setSleepSeconds(int sleepSeconds) { + this.sleepSeconds = sleepSeconds; + } + + public int getSleepSeconds() { + return sleepSeconds; + } + } + + private static class ThrowingBlobStore extends FsBlobStore { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, FailSwitch fail, SlowDownWriteSwitch slowDown) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail, slowDown); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + } + + private static class ThrowingBlobContainer extends FsBlobContainer { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, FailSwitch fail, SlowDownWriteSwitch slowDown) { + super(blobStore, blobPath, path); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); + } + + private void checkFailRateAndSleep(String blobName) throws IOException { + if (fail.fail() && blobName.contains(".dat") == false) { + throw new IOException("blob container throwing error"); + } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } } diff --git a/server/src/main/java/org/opensearch/rest/MethodHandlers.java b/server/src/main/java/org/opensearch/rest/MethodHandlers.java index 8c29bf2e66036..30221705e1aba 100644 --- a/server/src/main/java/org/opensearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/opensearch/rest/MethodHandlers.java @@ -6,82 +6,24 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.rest; -import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; -import java.util.HashMap; -import java.util.Map; import java.util.Set; /** - * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. - * - * @opensearch.api + * A collection of REST method handlers. */ -final class MethodHandlers { - - private final String path; - private final Map<RestRequest.Method, RestHandler> methodHandlers; - - MethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { - this.path = path; - this.methodHandlers = new HashMap<>(methods.length); - for (RestRequest.Method method : methods) { - methodHandlers.put(method, handler); - } - } - - /** - * Add a handler for an additional array of methods. Note that {@code MethodHandlers} - * does not allow replacing the handler for an already existing method. - */ - MethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { - for (RestRequest.Method method : methods) { - RestHandler existing = methodHandlers.putIfAbsent(method, handler); - if (existing != null) { - throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); - } - } - return this; - } - +@PublicApi(since = "2.12.0") +public interface MethodHandlers { /** - * Returns the handler for the given method or {@code null} if none exists. + * Return a set of all valid HTTP methods for the particular path. */ - @Nullable - RestHandler getHandler(RestRequest.Method method) { - return methodHandlers.get(method); - } + Set<RestRequest.Method> getValidMethods(); /** - * Return a set of all valid HTTP methods for the particular path + * Returns the relative HTTP path of the set of method handlers. */ - Set<RestRequest.Method> getValidMethods() { - return methodHandlers.keySet(); - } + String getPath(); } diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index cc48b59699a17..95abb9b3daeca 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -65,6 +65,7 @@ import java.io.IOException; import java.io.InputStream; import java.net.URI; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -107,7 +108,7 @@ public class RestController implements HttpServerTransport.Dispatcher { } } - private final PathTrie<MethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); + private final PathTrie<RestMethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); private final UnaryOperator<RestHandler> handlerWrapper; @@ -144,6 +145,16 @@ public RestController( ); } + /** + * Returns an iterator over registered REST method handlers. + * @return {@link Iterator} of {@link MethodHandlers} + */ + public Iterator<MethodHandlers> getAllHandlers() { + List<MethodHandlers> methodHandlers = new ArrayList<>(); + handlers.retrieveAll().forEachRemaining(methodHandlers::add); + return methodHandlers.iterator(); + } + /** * Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request. * @@ -221,7 +232,7 @@ protected void registerHandler(RestRequest.Method method, String path, RestHandl private void registerHandlerNoWrap(RestRequest.Method method, String path, RestHandler maybeWrappedHandler) { handlers.insertOrUpdate( path, - new MethodHandlers(path, maybeWrappedHandler, method), + new RestMethodHandlers(path, maybeWrappedHandler, method), (mHandlers, newMHandler) -> mHandlers.addMethods(maybeWrappedHandler, method) ); } @@ -392,10 +403,10 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel // Resolves the HTTP method and fails if the method is invalid requestMethod = request.method(); // Loop through all possible handlers, attempting to dispatch the request - Iterator<MethodHandlers> allHandlers = getAllHandlers(request.params(), rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(request.params(), rawPath); while (allHandlers.hasNext()) { final RestHandler handler; - final MethodHandlers handlers = allHandlers.next(); + final RestMethodHandlers handlers = allHandlers.next(); if (handlers == null) { handler = null; } else { @@ -423,7 +434,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel handleBadRequest(uri, requestMethod, channel); } - Iterator<MethodHandlers> getAllHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { + Iterator<RestMethodHandlers> getAllRestMethodHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { final Supplier<Map<String, String>> paramsSupplier; if (requestParamsRef == null) { paramsSupplier = () -> null; @@ -561,7 +572,7 @@ private boolean handleAuthenticateUser(final RestRequest request, final RestChan */ private Set<RestRequest.Method> getValidHandlerMethodSet(String rawPath) { Set<RestRequest.Method> validMethods = new HashSet<>(); - Iterator<MethodHandlers> allHandlers = getAllHandlers(null, rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(null, rawPath); while (allHandlers.hasNext()) { final MethodHandlers methodHandlers = allHandlers.next(); if (methodHandlers != null) { diff --git a/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java new file mode 100644 index 0000000000000..a430d8ace447c --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.common.Nullable; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. + */ +final class RestMethodHandlers implements MethodHandlers { + + private final String path; + private final Map<RestRequest.Method, RestHandler> methodHandlers; + + RestMethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { + this.path = path; + this.methodHandlers = new HashMap<>(methods.length); + for (RestRequest.Method method : methods) { + methodHandlers.put(method, handler); + } + } + + /** + * Add a handler for an additional array of methods. Note that {@code MethodHandlers} + * does not allow replacing the handler for an already existing method. + */ + public RestMethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { + for (RestRequest.Method method : methods) { + RestHandler existing = methodHandlers.putIfAbsent(method, handler); + if (existing != null) { + throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); + } + } + return this; + } + + /** + * Returns the handler for the given method or {@code null} if none exists. + */ + @Nullable + public RestHandler getHandler(RestRequest.Method method) { + return methodHandlers.get(method); + } + + /** + * Return a set of all valid HTTP methods for the particular path. + */ + public Set<RestRequest.Method> getValidMethods() { + return methodHandlers.keySet(); + } + + /** + * Returns the relative HTTP path of the set of method handlers. + */ + public String getPath() { + return path; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java index 28edba4db387d..138f9fdf5c813 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -53,7 +53,7 @@ public final class RestRemoteClusterInfoAction extends BaseRestHandler { @Override public List<Route> routes() { - return singletonList(new Route(GET, "_remote/info")); + return singletonList(new Route(GET, "/_remote/info")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java index 8fdf000139d89..0d805f5f3bfb8 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java @@ -66,10 +66,10 @@ public class RestPutMappingAction extends BaseRestHandler { public List<Route> routes() { return unmodifiableList( asList( - new Route(POST, "/{index}/_mapping/"), - new Route(PUT, "/{index}/_mapping/"), - new Route(POST, "/{index}/_mappings/"), - new Route(PUT, "/{index}/_mappings/") + new Route(POST, "/{index}/_mapping"), + new Route(PUT, "/{index}/_mapping"), + new Route(POST, "/{index}/_mappings"), + new Route(PUT, "/{index}/_mappings") ) ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java new file mode 100644 index 0000000000000..47be439a97fc4 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java @@ -0,0 +1,240 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.rest.action.RestStatusToXContentListener; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.rest.action.search.RestSearchAction; + +import java.io.IOException; +import java.util.List; +import java.util.function.IntConsumer; + +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** All rest handlers for view actions */ +@ExperimentalApi +public class RestViewAction { + + public static final String VIEW_NAME = "view_name"; + public static final String VIEW_NAME_PARAMETER = "{" + VIEW_NAME + "}"; + + /** Handler for create view */ + @ExperimentalApi + public static class CreateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views").method(POST).uniqueName(CreateViewAction.NAME).build()); + } + + @Override + public String getName() { + return CreateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request createViewAction = CreateViewAction.Request.fromXContent(parser); + + final ValidationException validationResult = createViewAction.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().createView(createViewAction, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for delete view */ + @ExperimentalApi + public static class DeleteViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(DELETE).uniqueName(DeleteViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return DeleteViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final DeleteViewAction.Request deleteRequest = new DeleteViewAction.Request(viewId); + + final ValidationException validationResult = deleteRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().deleteView(deleteRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for update view */ + @ExperimentalApi + public static class UpdateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(PUT).uniqueName(UpdateViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return UpdateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request updateRequest = UpdateViewAction.Request.fromXContent(parser, viewId); + + final ValidationException validationResult = updateRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().updateView(updateRequest, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class GetViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(GET).uniqueName(GetViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return GetViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final GetViewAction.Request getRequest = new GetViewAction.Request(viewId); + + final ValidationException validationResult = getRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().getView(getRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class ListViewNamesHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views/").method(GET).uniqueName(ListViewNamesAction.NAME).build()); + } + + @Override + public String getName() { + return ListViewNamesAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.listViewNames(new ListViewNamesAction.Request(), new RestToXContentListener<>(channel)); + } + } + + /** Handler for search view */ + @ExperimentalApi + public static class SearchViewHandler extends BaseRestHandler { + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(GET) + .uniqueName(SearchViewAction.NAME) + .build(), + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(POST) + .uniqueName(SearchViewAction.NAME) + .build() + ); + } + + @Override + public String getName() { + return SearchViewAction.NAME; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final SearchViewAction.Request viewSearchRequest = new SearchViewAction.Request(viewId, new SearchRequest()); + final IntConsumer setSize = size -> viewSearchRequest.source().size(size); + + request.withContentOrSourceParamParserOrNull( + parser -> RestSearchAction.parseSearchRequest( + viewSearchRequest, + request, + parser, + client.getNamedWriteableRegistry(), + setSize + ) + ); + + final ValidationException validationResult = viewSearchRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> { + final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(SearchViewAction.INSTANCE, viewSearchRequest, new RestStatusToXContentListener<>(channel)); + }; + } + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 23cc1cb507072..9dc711f804144 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -54,7 +54,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -582,31 +581,29 @@ protected Table getTableWithHeader(final RestRequest request) { "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops" ); table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); + table.addCell( + "search.concurrent_query_current", + "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); - table.addCell( - "search.concurrent_query_time", - "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); + table.addCell( + "search.concurrent_query_time", + "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); - table.addCell( - "search.concurrent_query_total", - "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" - ); - table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); + table.addCell( + "search.concurrent_query_total", + "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" + ); + table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); - table.addCell( - "search.concurrent_avg_slice_count", - "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); - } + table.addCell( + "search.concurrent_avg_slice_count", + "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); table.addCell( "search.scroll_current", @@ -916,19 +913,17 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCount()); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); - } + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCurrent()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCurrent()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index dd3e0ba836557..e11012a23fce7 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -47,7 +47,6 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; @@ -304,24 +303,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell( - "search.concurrent_query_time", - "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell( - "search.concurrent_query_total", - "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" - ); - table.addCell( - "search.concurrent_avg_slice_count", - "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - } + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -548,12 +545,10 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); - } + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index d0d00e4c4596a..4cd10c6874e0a 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -44,7 +44,6 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.CommitStats; @@ -220,24 +219,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell( - "search.concurrent_query_time", - "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell( - "search.concurrent_query_total", - "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" - ); - table.addCell( - "search.concurrent_avg_slice_count", - "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - } + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -419,13 +416,11 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCount())); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); - } table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 080366e536da1..80dc34c4d5d68 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -86,10 +86,13 @@ public class RestSearchAction extends BaseRestHandler { */ public static final String TOTAL_HITS_AS_INT_PARAM = "rest_total_hits_as_int"; public static final String TYPED_KEYS_PARAM = "typed_keys"; + public static final String INCLUDE_NAMED_QUERIES_SCORE_PARAM = "include_named_queries_score"; private static final Set<String> RESPONSE_PARAMS; static { - final Set<String> responseParams = new HashSet<>(Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM)); + final Set<String> responseParams = new HashSet<>( + Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM, INCLUDE_NAMED_QUERIES_SCORE_PARAM) + ); RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); } @@ -209,6 +212,7 @@ public static void parseSearchRequest( searchRequest.pipeline(request.param("search_pipeline")); checkRestTotalHits(request, searchRequest); + request.paramAsBoolean(INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); if (searchRequest.pointInTimeBuilder() != null) { preparePointInTime(searchRequest, request, namedWriteableRegistry); @@ -286,6 +290,10 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); } + if (request.hasParam("include_named_queries_score")) { + searchSourceBuilder.includeNamedQueriesScores(request.paramAsBoolean("include_named_queries_score", false)); + } + if (request.hasParam("track_total_hits")) { if (Booleans.isBoolean(request.param("track_total_hits"))) { searchSourceBuilder.trackTotalHits(request.paramAsBoolean("track_total_hits", true)); diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 960b46d68977b..061aa2f6e5896 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -50,7 +50,6 @@ import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -149,6 +148,8 @@ final class DefaultSearchContext extends SearchContext { private SortAndFormats sort; private Float minimumScore; private boolean trackScores = false; // when sorting, track scores as well... + + private boolean includeNamedQueriesScore = false; private int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; private FieldDoc searchAfter; private CollapseContext collapse; @@ -636,6 +637,17 @@ public boolean trackScores() { return this.trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; @@ -950,9 +962,7 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * false: otherwise */ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - && (clusterService != null) - && (concurrentSearchExecutor != null)) { + if ((clusterService != null) && (concurrentSearchExecutor != null)) { return indexService.getIndexSettings() .getSettings() .getAsBoolean( diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index ca088203733c6..a99da674836f2 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -685,6 +685,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return this.value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } @@ -745,6 +750,11 @@ public boolean advanceExact(int parentDoc) throws IOException { public double doubleValue() throws IOException { return lastEmittedValue; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 10e65fca3afb5..6391353cfe5b1 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -64,19 +64,21 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.fetch.subphase.highlight.HighlightField; import org.opensearch.search.lookup.SourceLookup; import org.opensearch.transport.RemoteClusterAware; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -120,7 +122,7 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do private SearchSortValues sortValues = SearchSortValues.EMPTY; - private String[] matchedQueries = Strings.EMPTY_ARRAY; + private Map<String, Float> matchedQueries = new HashMap<>(); private Explanation explanation; @@ -203,10 +205,20 @@ public SearchHit(StreamInput in) throws IOException { sortValues = new SearchSortValues(in); size = in.readVInt(); - if (size > 0) { - matchedQueries = new String[size]; + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + if (size > 0) { + Map<String, Float> tempMap = in.readMap(StreamInput::readString, StreamInput::readFloat); + matchedQueries = tempMap.entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .collect( + Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> oldValue, LinkedHashMap::new) + ); + } + } else { + matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = in.readString(); + matchedQueries.put(in.readString(), Float.NaN); } } // we call the setter here because that also sets the local index parameter @@ -224,36 +236,6 @@ public SearchHit(StreamInput in) throws IOException { } } - private Map<String, DocumentField> readFields(StreamInput in) throws IOException { - Map<String, DocumentField> fields; - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = new DocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - fields = new HashMap<>(size); - for (int i = 0; i < size; i++) { - DocumentField field = new DocumentField(in); - fields.put(field.getName(), field); - } - fields = unmodifiableMap(fields); - } - return fields; - } - - private void writeFields(StreamOutput out, Map<String, DocumentField> fields) throws IOException { - if (fields == null) { - out.writeVInt(0); - } else { - out.writeVInt(fields.size()); - for (DocumentField field : fields.values()) { - field.writeTo(out); - } - } - } - private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @Override @@ -286,11 +268,13 @@ public void writeTo(StreamOutput out) throws IOException { } sortValues.writeTo(out); - if (matchedQueries.length == 0) { - out.writeVInt(0); + out.writeVInt(matchedQueries.size()); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + if (!matchedQueries.isEmpty()) { + out.writeMap(matchedQueries, StreamOutput::writeString, StreamOutput::writeFloat); + } } else { - out.writeVInt(matchedQueries.length); - for (String matchedFilter : matchedQueries) { + for (String matchedFilter : matchedQueries.keySet()) { out.writeString(matchedFilter); } } @@ -458,11 +442,11 @@ public DocumentField field(String fieldName) { } /* - * Adds a new DocumentField to the map in case both parameters are not null. - * */ + * Adds a new DocumentField to the map in case both parameters are not null. + * */ public void setDocumentField(String fieldName, DocumentField field) { if (fieldName == null || field == null) return; - if (documentFields.size() == 0) this.documentFields = new HashMap<>(); + if (documentFields.isEmpty()) this.documentFields = new HashMap<>(); this.documentFields.put(fieldName, field); } @@ -475,7 +459,7 @@ public DocumentField removeDocumentField(String fieldName) { * were required to be loaded. */ public Map<String, DocumentField> getFields() { - if (metaFields.size() > 0 || documentFields.size() > 0) { + if (!metaFields.isEmpty() || !documentFields.isEmpty()) { final Map<String, DocumentField> fields = new HashMap<>(); fields.putAll(metaFields); fields.putAll(documentFields); @@ -560,14 +544,45 @@ public String getClusterAlias() { } public void matchedQueries(String[] matchedQueries) { - this.matchedQueries = matchedQueries; + if (matchedQueries != null) { + for (String query : matchedQueries) { + this.matchedQueries.put(query, Float.NaN); + } + } + } + + public void matchedQueriesWithScores(Map<String, Float> matchedQueries) { + if (matchedQueries != null) { + this.matchedQueries = matchedQueries; + } } /** * The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries. */ public String[] getMatchedQueries() { - return this.matchedQueries; + return matchedQueries == null ? new String[0] : matchedQueries.keySet().toArray(new String[0]); + } + + /** + * Returns the score of the provided named query if it matches. + * <p> + * If the 'include_named_queries_score' is not set, this method will return {@link Float#NaN} + * for each named query instead of a numerical score. + * </p> + * + * @param name The name of the query to retrieve the score for. + * @return The score of the named query, or {@link Float#NaN} if 'include_named_queries_score' is not set. + */ + public Float getMatchedQueryScore(String name) { + return getMatchedQueriesAndScores().get(name); + } + + /** + * @return The map of the named queries that matched and their associated score. + */ + public Map<String, Float> getMatchedQueriesAndScores() { + return matchedQueries == null ? Collections.emptyMap() : matchedQueries; } /** @@ -654,7 +669,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t for (DocumentField field : metaFields.values()) { // ignore empty metadata fields - if (field.getValues().size() == 0) { + if (field.getValues().isEmpty()) { continue; } // _ignored is the only multi-valued meta field @@ -670,10 +685,10 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } if (documentFields.isEmpty() == false && // ignore fields all together if they are all empty - documentFields.values().stream().anyMatch(df -> df.getValues().size() > 0)) { + documentFields.values().stream().anyMatch(df -> !df.getValues().isEmpty())) { builder.startObject(Fields.FIELDS); for (DocumentField field : documentFields.values()) { - if (field.getValues().size() > 0) { + if (!field.getValues().isEmpty()) { field.toXContent(builder, params); } } @@ -687,12 +702,21 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t builder.endObject(); } sortValues.toXContent(builder, params); - if (matchedQueries.length > 0) { - builder.startArray(Fields.MATCHED_QUERIES); - for (String matchedFilter : matchedQueries) { - builder.value(matchedFilter); + if (!matchedQueries.isEmpty()) { + boolean includeMatchedQueriesScore = params.paramAsBoolean(RestSearchAction.INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); + if (includeMatchedQueriesScore) { + builder.startObject(Fields.MATCHED_QUERIES); + for (Map.Entry<String, Float> entry : matchedQueries.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } else { + builder.startArray(Fields.MATCHED_QUERIES); + for (String matchedFilter : matchedQueries.keySet()) { + builder.value(matchedFilter); + } + builder.endArray(); } - builder.endArray(); } if (getExplanation() != null) { builder.field(Fields._EXPLANATION); @@ -797,7 +821,27 @@ public static void declareInnerHitsParseFields(ObjectParser<Map<String, Object>, (p, c) -> parseInnerHits(p), new ParseField(Fields.INNER_HITS) ); - parser.declareStringArray((map, list) -> map.put(Fields.MATCHED_QUERIES, list), new ParseField(Fields.MATCHED_QUERIES)); + parser.declareField((p, map, context) -> { + XContentParser.Token token = p.currentToken(); + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + if (token == XContentParser.Token.START_OBJECT) { + String fieldName = null; + while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = p.currentName(); + } else if (token.isValue()) { + matchedQueries.put(fieldName, p.floatValue()); + } + } + } else if (token == XContentParser.Token.START_ARRAY) { + while (p.nextToken() != XContentParser.Token.END_ARRAY) { + matchedQueries.put(p.text(), Float.NaN); + } + } else { + throw new IllegalStateException("expected object or array but got [" + token + "]"); + } + map.put(Fields.MATCHED_QUERIES, matchedQueries); + }, new ParseField(Fields.MATCHED_QUERIES), ObjectParser.ValueType.OBJECT_ARRAY); parser.declareField( (map, list) -> map.put(Fields.SORT, list), SearchSortValues::fromXContent, @@ -828,7 +872,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { assert shardId.getIndexName().equals(index); searchHit.shard(new SearchShardTarget(nodeId, shardId, clusterAlias, OriginalIndices.NONE)); } else { - // these fields get set anyways when setting the shard target, + // these fields get set anyway when setting the shard target, // but we set them explicitly when we don't have enough info to rebuild the shard target searchHit.index = index; searchHit.clusterAlias = clusterAlias; @@ -842,10 +886,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); searchHit.explanation(get(Fields._EXPLANATION, values, null)); searchHit.setInnerHits(get(Fields.INNER_HITS, values, null)); - List<String> matchedQueries = get(Fields.MATCHED_QUERIES, values, null); - if (matchedQueries != null) { - searchHit.matchedQueries(matchedQueries.toArray(new String[0])); - } + searchHit.matchedQueriesWithScores(get(Fields.MATCHED_QUERIES, values, null)); return searchHit; } @@ -965,7 +1006,7 @@ public boolean equals(Object obj) { && Objects.equals(documentFields, other.documentFields) && Objects.equals(metaFields, other.metaFields) && Objects.equals(getHighlightFields(), other.getHighlightFields()) - && Arrays.equals(matchedQueries, other.matchedQueries) + && Objects.equals(getMatchedQueriesAndScores(), other.getMatchedQueriesAndScores()) && Objects.equals(explanation, other.explanation) && Objects.equals(shard, other.shard) && Objects.equals(innerHits, other.innerHits) @@ -985,7 +1026,7 @@ public int hashCode() { documentFields, metaFields, getHighlightFields(), - Arrays.hashCode(matchedQueries), + getMatchedQueriesAndScores(), explanation, shard, innerHits, diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 62d397de58187..88218896dceae 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -39,7 +39,6 @@ import org.opensearch.common.geo.ShapesAvailability; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ParseFieldRegistry; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -1279,7 +1278,7 @@ private SearchPlugin.ExecutorServiceProvider registerIndexSearcherExecutorProvid } } - if (provider == null && FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + if (provider == null) { provider = (ThreadPool threadPool) -> threadPool.executor(INDEX_SEARCHER); } return provider; diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 2c85fcbb25f35..62eb597e387e6 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -250,7 +250,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final Setting<Boolean> CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( "search.concurrent_segment_search.enabled", - true, + false, Property.Dynamic, Property.NodeScope ); @@ -1274,6 +1274,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } context.trackScores(source.trackScores()); + context.includeNamedQueriesScore(source.includeNamedQueriesScore()); if (source.trackTotalHitsUpTo() != null && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE && context.scrollContext() != null) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java new file mode 100644 index 0000000000000..e587b7f169e5f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -0,0 +1,497 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.FieldExistsQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Rounding; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.query.DateRangeIncludingNowQuery; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; +import org.opensearch.search.aggregations.bucket.composite.RoundingValuesSource; +import org.opensearch.search.aggregations.bucket.histogram.LongBounds; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalLong; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * Utility class to help rewrite aggregations into filters. + * Instead of aggregation collects documents one by one, filter may count all documents that match in one pass. + * <p> + * Currently supported rewrite: + * <ul> + * <li> date histogram : date range filter. + * Applied: DateHistogramAggregator, AutoDateHistogramAggregator, CompositeAggregator </li> + * </ul> + * + * @opensearch.internal + */ +public final class FastFilterRewriteHelper { + + private FastFilterRewriteHelper() {} + + private static final Logger logger = LogManager.getLogger(FastFilterRewriteHelper.class); + + private static final int MAX_NUM_FILTER_BUCKETS = 1024; + private static final Map<Class<?>, Function<Query, Query>> queryWrappers; + + // Initialize the wrapper map for unwrapping the query + static { + queryWrappers = new HashMap<>(); + queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); + queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); + queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); + queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); + } + + /** + * Recursively unwraps query into the concrete form + * for applying the optimization + */ + private static Query unwrapIntoConcreteQuery(Query query) { + while (queryWrappers.containsKey(query.getClass())) { + query = queryWrappers.get(query.getClass()).apply(query); + } + + return query; + } + + /** + * Finds the global min and max bounds of the field for the shard across all segments + * + * @return null if the field is empty or not indexed + */ + private static long[] getShardBounds(final SearchContext context, final String fieldName) throws IOException { + final List<LeafReaderContext> leaves = context.searcher().getIndexReader().leaves(); + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + for (LeafReaderContext leaf : leaves) { + final PointValues values = leaf.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } + return new long[] { min, max }; + } + + /** + * Finds the min and max bounds of the field for the segment + * + * @return null if the field is empty or not indexed + */ + private static long[] getSegmentBounds(final LeafReaderContext context, final String fieldName) throws IOException { + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + final PointValues values = context.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } + return new long[] { min, max }; + } + + /** + * This method also acts as a pre-condition check for the optimization + * + * @return null if the processed query not as expected + */ + public static long[] getDateHistoAggBounds(final SearchContext context, final String fieldName) throws IOException { + final Query cq = unwrapIntoConcreteQuery(context.query()); + if (cq instanceof PointRangeQuery) { + final PointRangeQuery prq = (PointRangeQuery) cq; + final long[] indexBounds = getShardBounds(context, fieldName); + if (indexBounds == null) return null; + return getBoundsWithRangeQuery(prq, fieldName, indexBounds); + } else if (cq instanceof MatchAllDocsQuery) { + return getShardBounds(context, fieldName); + } else if (cq instanceof FieldExistsQuery) { + // when a range query covers all values of a shard, it will be rewrite field exists query + if (((FieldExistsQuery) cq).getField().equals(fieldName)) { + return getShardBounds(context, fieldName); + } + } + + return null; + } + + private static long[] getBoundsWithRangeQuery(PointRangeQuery prq, String fieldName, long[] indexBounds) { + // Ensure that the query and aggregation are on the same field + if (prq.getField().equals(fieldName)) { + // Minimum bound for aggregation is the max between query and global + long lower = Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]); + // Maximum bound for aggregation is the min between query and global + long upper = Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]); + if (lower > upper) { + return null; + } + return new long[] { lower, upper }; + } + + return null; + } + + /** + * Creates the date range filters for aggregations using the interval, min/max + * bounds and prepared rounding + */ + private static Weight[] createFilterForAggregations( + final SearchContext context, + final DateFieldMapper.DateFieldType fieldType, + final long interval, + final Rounding.Prepared preparedRounding, + long low, + final long high + ) throws IOException { + // Calculate the number of buckets using range and interval + long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + long prevRounded = roundedLow; + int bucketCount = 0; + while (roundedLow <= fieldType.convertNanosToMillis(high)) { + bucketCount++; + if (bucketCount > MAX_NUM_FILTER_BUCKETS) { + logger.debug("Max number of filters reached [{}], skip the fast filter optimization", MAX_NUM_FILTER_BUCKETS); + return null; + } + // Below rounding is needed as the interval could return in + // non-rounded values for something like calendar month + roundedLow = preparedRounding.round(roundedLow + interval); + if (prevRounded == roundedLow) break; // prevents getting into an infinite loop + prevRounded = roundedLow; + } + + Weight[] filters = null; + if (bucketCount > 0) { + filters = new Weight[bucketCount]; + roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + + int i = 0; + while (i < bucketCount) { + // Calculate the lower bucket bound + final byte[] lower = new byte[8]; + NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); + + // Calculate the upper bucket bound + roundedLow = preparedRounding.round(roundedLow + interval); + final byte[] upper = new byte[8]; + NumericUtils.longToSortableBytes(i + 1 == bucketCount ? high : + // Subtract -1 if the minimum is roundedLow as roundedLow itself + // is included in the next bucket + fieldType.convertRoundedMillisToNanos(roundedLow) - 1, upper, 0); + + filters[i++] = context.searcher().createWeight(new PointRangeQuery(fieldType.name(), lower, upper, 1) { + @Override + protected String toString(int dimension, byte[] value) { + return Long.toString(LongPoint.decodeDimension(value, 0)); + } + }, ScoreMode.COMPLETE_NO_SCORES, 1); + } + } + + return filters; + } + + /** + * Context object for fast filter optimization + * <p> + * Usage: first set aggregation type, then check isRewriteable, then buildFastFilter + */ + public static class FastFilterContext { + private boolean rewriteable = false; + private Weight[] filters = null; + private boolean filtersBuiltAtShardLevel = false; + + private AggregationType aggregationType; + private final SearchContext context; + + public FastFilterContext(SearchContext context) { + this.context = context; + } + + public AggregationType getAggregationType() { + return aggregationType; + } + + public void setAggregationType(AggregationType aggregationType) { + this.aggregationType = aggregationType; + } + + public boolean isRewriteable(final Object parent, final int subAggLength) { + boolean rewriteable = aggregationType.isRewriteable(parent, subAggLength); + logger.debug("Fast filter rewriteable: {} for shard {}", rewriteable, context.indexShard().shardId()); + this.rewriteable = rewriteable; + return rewriteable; + } + + public void buildFastFilter() throws IOException { + assert filters == null : "Filters should only be built once, but they are already built"; + this.filters = this.aggregationType.buildFastFilter(context); + if (filters != null) { + logger.debug("Fast filter built for shard {}", context.indexShard().shardId()); + filtersBuiltAtShardLevel = true; + } + } + + /** + * Built filters for a segment + */ + public Weight[] buildFastFilter(LeafReaderContext leaf) throws IOException { + Weight[] filters = this.aggregationType.buildFastFilter(leaf, context); + if (filters != null) { + logger.debug("Fast filter built for shard {} segment {}", context.indexShard().shardId(), leaf.ord); + } + return filters; + } + } + + /** + * Different types have different pre-conditions, filter building logic, etc. + */ + interface AggregationType { + + boolean isRewriteable(Object parent, int subAggLength); + + Weight[] buildFastFilter(SearchContext ctx) throws IOException; + + Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext ctx) throws IOException; + + default int getSize() { + return Integer.MAX_VALUE; + } + } + + /** + * For date histogram aggregation + */ + public static abstract class AbstractDateHistogramAggregationType implements AggregationType { + private final MappedFieldType fieldType; + private final boolean missing; + private final boolean hasScript; + private LongBounds hardBounds; + + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + this.fieldType = fieldType; + this.missing = missing; + this.hasScript = hasScript; + } + + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + this(fieldType, missing, hasScript); + this.hardBounds = hardBounds; + } + + @Override + public boolean isRewriteable(Object parent, int subAggLength) { + if (parent == null && subAggLength == 0 && !missing && !hasScript) { + if (fieldType != null && fieldType instanceof DateFieldMapper.DateFieldType) { + return fieldType.isSearchable(); + } + } + return false; + } + + @Override + public Weight[] buildFastFilter(SearchContext context) throws IOException { + long[] bounds = getDateHistoAggBounds(context, fieldType.name()); + logger.debug("Bounds are {} for shard {}", bounds, context.indexShard().shardId()); + return buildFastFilter(context, bounds); + } + + @Override + public Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext context) throws IOException { + long[] bounds = getSegmentBounds(leaf, fieldType.name()); + logger.debug("Bounds are {} for shard {} segment {}", bounds, context.indexShard().shardId(), leaf.ord); + return buildFastFilter(context, bounds); + } + + private Weight[] buildFastFilter(SearchContext context, long[] bounds) throws IOException { + bounds = processHardBounds(bounds); + if (bounds == null) { + return null; + } + assert bounds[0] <= bounds[1] : "Low bound should be less than high bound"; + + final Rounding rounding = getRounding(bounds[0], bounds[1]); + final OptionalLong intervalOpt = Rounding.getInterval(rounding); + if (intervalOpt.isEmpty()) { + return null; + } + final long interval = intervalOpt.getAsLong(); + + // process the after key of composite agg + processAfterKey(bounds, interval); + + return FastFilterRewriteHelper.createFilterForAggregations( + context, + (DateFieldMapper.DateFieldType) fieldType, + interval, + getRoundingPrepared(), + bounds[0], + bounds[1] + ); + } + + protected abstract Rounding getRounding(final long low, final long high); + + protected abstract Rounding.Prepared getRoundingPrepared(); + + protected void processAfterKey(long[] bound, long interval) {} + + protected long[] processHardBounds(long[] bounds) { + if (bounds != null) { + // Update min/max limit if user specified any hard bounds + if (hardBounds != null) { + if (hardBounds.getMin() > bounds[0]) { + bounds[0] = hardBounds.getMin(); + } + if (hardBounds.getMax() - 1 < bounds[1]) { + bounds[1] = hardBounds.getMax() - 1; // hard bounds max is exclusive + } + if (bounds[0] > bounds[1]) { + return null; + } + } + } + return bounds; + } + + public DateFieldMapper.DateFieldType getFieldType() { + assert fieldType instanceof DateFieldMapper.DateFieldType; + return (DateFieldMapper.DateFieldType) fieldType; + } + } + + public static boolean isCompositeAggRewriteable(CompositeValuesSourceConfig[] sourceConfigs) { + return sourceConfigs.length == 1 && sourceConfigs[0].valuesSource() instanceof RoundingValuesSource; + } + + public static long getBucketOrd(long bucketOrd) { + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + } + + return bucketOrd; + } + + /** + * Try to get the bucket doc counts from the fast filters for the aggregation + * <p> + * Usage: invoked at segment level — in getLeafCollector of aggregator + * + * @param incrementDocCount takes in the bucket key value and the bucket count + */ + public static boolean tryFastFilterAggregation( + final LeafReaderContext ctx, + FastFilterContext fastFilterContext, + final BiConsumer<Long, Integer> incrementDocCount + ) throws IOException { + if (fastFilterContext == null) return false; + if (!fastFilterContext.rewriteable) { + return false; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + logger.debug( + "Shard {} segment {} has at least one document with _doc_count field, skip fast filter optimization", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + return false; + } + + // if no filters built at shard level (see getDateHistoAggBounds method for possible reasons) + // check if the query is functionally match-all at segment level + if (!fastFilterContext.filtersBuiltAtShardLevel && !segmentMatchAll(fastFilterContext.context, ctx)) { + return false; + } + Weight[] filters = fastFilterContext.filters; + if (filters == null) { + logger.debug( + "Shard {} segment {} functionally match all documents. Build the fast filter", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + filters = fastFilterContext.buildFastFilter(ctx); + if (filters == null) { + return false; + } + } + + final int[] counts = new int[filters.length]; + int i; + for (i = 0; i < filters.length; i++) { + counts[i] = filters[i].count(ctx); + if (counts[i] == -1) { + // Cannot use the optimization if any of the counts + // is -1 indicating the segment might have deleted documents + return false; + } + } + + int s = 0; + int size = fastFilterContext.aggregationType.getSize(); + for (i = 0; i < filters.length; i++) { + if (counts[i] > 0) { + long bucketKey = i; // the index of filters is the key for filters aggregation + if (fastFilterContext.aggregationType instanceof AbstractDateHistogramAggregationType) { + final DateFieldMapper.DateFieldType fieldType = + ((AbstractDateHistogramAggregationType) fastFilterContext.aggregationType).getFieldType(); + bucketKey = fieldType.convertNanosToMillis( + NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) + ); + } + incrementDocCount.accept(bucketKey, counts[i]); + s++; + if (s > size) { + break; + } + } + } + + logger.debug("Fast filter optimization applied to shard {} segment {}", fastFilterContext.context.indexShard().shardId(), ctx.ord); + return true; + } + + private static boolean segmentMatchAll(SearchContext ctx, LeafReaderContext leafCtx) throws IOException { + Weight weight = ctx.searcher().createWeight(ctx.query(), ScoreMode.COMPLETE_NO_SCORES, 1f); + return weight != null && weight.count(leafCtx) == leafCtx.reader().numDocs(); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 5e8791441d83a..e57acba5bc0ad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -44,7 +44,9 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregatorFactory; import org.opensearch.search.aggregations.bucket.nested.NestedAggregatorFactory; +import org.opensearch.search.aggregations.bucket.nested.ReverseNestedAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import java.io.IOException; @@ -240,14 +242,16 @@ public BucketCardinality bucketCardinality() { * this aggregator or the instance of the parent's factory that is incompatible with * the composite aggregation. */ - private AggregatorFactory checkParentIsNullOrNested(AggregatorFactory factory) { + private static AggregatorFactory checkParentIsSafe(AggregatorFactory factory) { if (factory == null) { return null; - } else if (factory instanceof NestedAggregatorFactory) { - return checkParentIsNullOrNested(factory.getParent()); - } else { - return factory; - } + } else if (factory instanceof NestedAggregatorFactory + || factory instanceof FilterAggregatorFactory + || factory instanceof ReverseNestedAggregatorFactory) { + return checkParentIsSafe(factory.getParent()); + } else { + return factory; + } } private static void validateSources(List<CompositeValuesSourceBuilder<?>> sources) { @@ -278,7 +282,7 @@ protected AggregatorFactory doBuild( AggregatorFactory parent, AggregatorFactories.Builder subfactoriesBuilder ) throws IOException { - AggregatorFactory invalid = checkParentIsNullOrNested(parent); + AggregatorFactory invalid = checkParentIsSafe(parent); if (invalid != null) { throw new IllegalArgumentException( "[composite] aggregation cannot be used with a parent aggregation of" diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 2ff79fb623def..4af14ab014db5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -80,6 +80,7 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return false; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 317c2a357bac5..b97c814cdf645 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -56,7 +56,9 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RoaringDocIdSet; +import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; import org.opensearch.index.IndexSortConfig; import org.opensearch.lucene.queries.SearchAfterSortedDocQuery; @@ -71,7 +73,9 @@ import org.opensearch.search.aggregations.MultiBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; +import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.searchafter.SearchAfterBuilder; import org.opensearch.search.sort.SortAndFormats; @@ -80,6 +84,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.LongUnaryOperator; @@ -111,6 +116,10 @@ final class CompositeAggregator extends BucketsAggregator { private boolean earlyTerminated; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + private LongKeyedBucketOrds bucketOrds = null; + private Rounding.Prepared preparedRounding = null; + CompositeAggregator( String name, AggregatorFactories factories, @@ -154,12 +163,64 @@ final class CompositeAggregator extends BucketsAggregator { } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.rawAfterKey = rawAfterKey; + + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + if (!FastFilterRewriteHelper.isCompositeAggRewriteable(sourceConfigs)) return; + fastFilterContext.setAggregationType(new CompositeAggregationType()); + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + // bucketOrds is used for saving date histogram results + bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), CardinalityUpperBound.ONE); + preparedRounding = ((CompositeAggregationType) fastFilterContext.getAggregationType()).getRoundingPrepared(); + fastFilterContext.buildFastFilter(); + } + } + + /** + * Currently the filter rewrite is only supported for date histograms + */ + private class CompositeAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + private final RoundingValuesSource valuesSource; + private long afterKey = -1L; + + public CompositeAggregationType() { + super(sourceConfigs[0].fieldType(), sourceConfigs[0].missingBucket(), sourceConfigs[0].hasScript()); + this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); + if (rawAfterKey != null) { + assert rawAfterKey.size() == 1 && formats.size() == 1; + this.afterKey = formats.get(0).parseLong(rawAfterKey.get(0).toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); + } + } + + public Rounding getRounding(final long low, final long high) { + return valuesSource.getRounding(); + } + + public Rounding.Prepared getRoundingPrepared() { + return valuesSource.getPreparedRounding(); + } + + @Override + protected void processAfterKey(long[] bound, long interval) { + // afterKey is the last bucket key in previous response, and the bucket key + // is the minimum of all values in the bucket, so need to add the interval + if (afterKey != -1L) { + bound[0] = afterKey + interval; + } + } + + @Override + public int getSize() { + return size; + } } @Override protected void doClose() { try { Releasables.close(queue); + Releasables.close(bucketOrds); } finally { Releasables.close(sources); } @@ -187,12 +248,14 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } int num = Math.min(size, queue.size()); - final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + long[] bucketOrdsToCollect = new long[queue.size()]; for (int i = 0; i < queue.size(); i++) { bucketOrdsToCollect[i] = i; } InternalAggregations[] subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect); + while (queue.size() > 0) { int slot = queue.pop(); CompositeKey key = queue.toCompositeKey(slot); @@ -208,6 +271,43 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I aggs ); } + + // Build results from fast filters optimization + if (bucketOrds != null) { + // CompositeKey is the value of bucket key + final Map<CompositeKey, InternalComposite.InternalBucket> bucketMap = new HashMap<>(); + // Some segments may not be optimized, so buckets may contain results from the queue. + for (InternalComposite.InternalBucket internalBucket : buckets) { + bucketMap.put(internalBucket.getRawKey(), internalBucket); + } + // Loop over the buckets in the bucketOrds, and populate the map accordingly + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(0); + while (ordsEnum.next()) { + Long bucketKeyValue = ordsEnum.value(); + CompositeKey key = new CompositeKey(bucketKeyValue); + if (bucketMap.containsKey(key)) { + long docCount = bucketDocCount(ordsEnum.ord()) + bucketMap.get(key).getDocCount(); + bucketMap.get(key).setDocCount(docCount); + } else { + InternalComposite.InternalBucket bucket = new InternalComposite.InternalBucket( + sourceNames, + formats, + key, + reverseMuls, + missingOrders, + bucketDocCount(ordsEnum.ord()), + buildEmptySubAggregations() + ); + bucketMap.put(key, bucket); + } + } + // since a map is not sorted structure, sort it before transform back to buckets + List<InternalComposite.InternalBucket> bucketList = new ArrayList<>(bucketMap.values()); + CollectionUtil.introSort(bucketList, InternalComposite.InternalBucket::compareKey); + buckets = bucketList.subList(0, Math.min(size, bucketList.size())).toArray(InternalComposite.InternalBucket[]::new); + num = buckets.length; + } + CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; return new InternalAggregation[] { new InternalComposite( @@ -296,7 +396,7 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException if (indexSortField.getReverse() != (source.reverseMul == -1)) { if (i == 0) { - // the leading index sort matches the leading source field but the order is reversed + // the leading index sort matches the leading source field, but the order is reversed, // so we don't check the other sources. return new Sort(indexSortField); } @@ -304,8 +404,8 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException } sortFields.add(indexSortField); if (sourceConfig.valuesSource() instanceof RoundingValuesSource) { - // the rounding "squashes" many values together, that breaks the ordering of sub-values - // so we ignore subsequent source even if they match the index sort. + // the rounding "squashes" many values together, that breaks the ordering of sub-values, + // so we ignore the subsequent sources even if they match the index sort. break; } } @@ -448,6 +548,16 @@ private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) t @Override protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + finishLeaf(); boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR; @@ -477,9 +587,10 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucket docIdSetBuilder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); } if (rawAfterKey != null && sortPrefixLen > 0) { - // We have an after key and index sort is applicable so we jump directly to the doc - // that is after the index sort prefix using the rawAfterKey and we start collecting - // document from there. + // We have an after key and index sort is applicable, so we jump directly to the doc + // after the index sort prefix using the rawAfterKey and we start collecting + // documents from there. + assert indexSortPrefix != null; processLeafFromQuery(ctx, indexSortPrefix); throw new CollectionTerminatedException(); } else { @@ -507,6 +618,8 @@ public void collect(int doc, long bucket) throws IOException { try { long docCount = docCountProvider.getDocCount(doc); if (queue.addIfCompetitive(indexSortPrefix, docCount)) { + // one doc may contain multiple values, we iterate over and collect one by one + // so the same doc can appear multiple times here if (builder != null && lastDoc != doc) { builder.add(doc); lastDoc = doc; @@ -569,7 +682,7 @@ private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollec @Override public void collect(int doc, long zeroBucket) throws IOException { assert zeroBucket == 0; - Integer slot = queue.compareCurrent(); + Integer slot = queue.getCurrentSlot(); if (slot != null) { // The candidate key is a top bucket. // We can defer the collection of this document/bucket to the sub collector diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java index 5ddeb22d33a6f..338ebdc66eef7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java @@ -44,7 +44,7 @@ * * @opensearch.internal */ -class CompositeKey implements Writeable { +public class CompositeKey implements Writeable { private final Comparable[] values; CompositeKey(Comparable... values) { @@ -64,11 +64,11 @@ Comparable[] values() { return values; } - int size() { + public int size() { return values.length; } - Comparable get(int pos) { + public Comparable get(int pos) { assert pos < values.length; return values[pos]; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 6ee1682a7b196..2c4d451322bca 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -47,6 +47,8 @@ /** * A specialized {@link PriorityQueue} implementation for composite buckets. + * Can think of this as a max heap that holds the top small buckets slots in order. + * Each slot holds the values of the composite bucket key it represents. * * @opensearch.internal */ @@ -77,7 +79,7 @@ public int hashCode() { private final BigArrays bigArrays; private final int maxSize; - private final Map<Slot, Integer> map; + private final Map<Slot, Integer> map; // to quickly find the slot for a value private final SingleDimensionValuesSource<?>[] arrays; private LongArray docCounts; @@ -108,7 +110,7 @@ public int hashCode() { @Override protected boolean lessThan(Integer a, Integer b) { - return compare(a, b) > 0; + return compare(a, b) > 0; // max heap } /** @@ -119,10 +121,10 @@ boolean isFull() { } /** - * Compares the current candidate with the values in the queue and returns + * Try to get the slot of the current/candidate values in the queue and returns * the slot if the candidate is already in the queue or null if the candidate is not present. */ - Integer compareCurrent() { + Integer getCurrentSlot() { return map.get(new Slot(CANDIDATE_SLOT)); } @@ -281,32 +283,34 @@ boolean addIfCompetitive(long inc) { */ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { // checks if the candidate key is competitive - Integer topSlot = compareCurrent(); - if (topSlot != null) { + Integer curSlot = getCurrentSlot(); + if (curSlot != null) { // this key is already in the top N, skip it - docCounts.increment(topSlot, inc); + docCounts.increment(curSlot, inc); return true; } + if (afterKeyIsSet) { int cmp = compareCurrentWithAfter(); if (cmp <= 0) { if (indexSortSourcePrefix < 0 && cmp == indexSortSourcePrefix) { - // the leading index sort is in the reverse order of the leading source + // the leading index sort is and the leading source order are both reversed, // so we can early terminate when we reach a document that is smaller // than the after key (collected on a previous page). throw new CollectionTerminatedException(); } - // key was collected on a previous page, skip it (>= afterKey). + // the key was collected on a previous page, skip it. return false; } } + + // the heap is full, check if the candidate key larger than max heap top if (size() >= maxSize) { - // the tree map is full, check if the candidate key should be kept int cmp = compare(CANDIDATE_SLOT, top()); if (cmp > 0) { if (cmp <= indexSortSourcePrefix) { - // index sort guarantees that there is no key greater or equal than the - // current one in the subsequent documents so we can early terminate. + // index sort guarantees the following documents will have a key larger than the current candidate, + // so we can early terminate. throw new CollectionTerminatedException(); } // the candidate key is not competitive, skip it. @@ -324,7 +328,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { } else { newSlot = size(); } - // move the candidate key to its new slot + // move the candidate key to its new slot by copy its values to the new slot copyCurrent(newSlot, inc); map.put(new Slot(newSlot), newSlot); add(newSlot); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 788a4ddc15374..5289b3a34ab34 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -156,7 +156,7 @@ public MissingOrder missingOrder() { /** * Returns true if the source contains a script that can change the value. */ - protected boolean hasScript() { + public boolean hasScript() { return hasScript; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index fd94ba355238a..3926ce9bbecb7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -298,7 +298,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { // TODO once composite is plugged in to the values source registry or at least understands Date values source types use it // here Rounding.Prepared preparedRounding = rounding.prepareForUnknown(); - RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding); + RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding, rounding); // is specified in the builder. final DocValueFormat docValueFormat = format == null ? DocValueFormat.RAW : valuesSourceConfig.format(); final MappedFieldType fieldType = valuesSourceConfig.fieldType(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 9f8a4cff5f3fc..43f1ad32a66f4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -339,7 +339,7 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern KeyComparable<InternalBucket> { private final CompositeKey key; - private final long docCount; + private long docCount; private final InternalAggregations aggregations; private final transient int[] reverseMuls; private final transient MissingOrder[] missingOrders; @@ -436,6 +436,10 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + @Override public Aggregations getAggregations() { return aggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java index 3d6730203b6ae..dc130eb54c0ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -68,6 +68,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade // no value for the field return DocIdSet.EMPTY; } + long lowerBucket = Long.MIN_VALUE; Comparable lowerValue = queue.getLowerValueLeadSource(); if (lowerValue != null) { @@ -76,7 +77,6 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } lowerBucket = (Long) lowerValue; } - long upperBucket = Long.MAX_VALUE; Comparable upperValue = queue.getUpperValueLeadSource(); if (upperValue != null) { @@ -85,6 +85,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } upperBucket = (Long) upperValue; } + DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), values, field) : null; Visitor visitor = new Visitor(context, queue, builder, values.getBytesPerDimension(), lowerBucket, upperBucket); try { @@ -146,6 +147,7 @@ public void visit(int docID, byte[] packedValue) throws IOException { } long bucket = bucketFunction.applyAsLong(packedValue); + // process previous bucket when new bucket appears if (first == false && bucket != lastBucket) { final DocIdSet docIdSet = bucketDocsBuilder.build(); if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && @@ -182,13 +184,13 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue return PointValues.Relation.CELL_OUTSIDE_QUERY; } } - if (upperBucket != Long.MAX_VALUE) { long minBucket = bucketFunction.applyAsLong(minPackedValue); if (minBucket > upperBucket) { return PointValues.Relation.CELL_OUTSIDE_QUERY; } } + return PointValues.Relation.CELL_CROSSES_QUERY; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 89315724ff9ed..3f5cf919f1755 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -47,17 +47,19 @@ * * @opensearch.internal */ -class RoundingValuesSource extends ValuesSource.Numeric { +public class RoundingValuesSource extends ValuesSource.Numeric { private final ValuesSource.Numeric vs; - private final Rounding.Prepared rounding; + private final Rounding.Prepared preparedRounding; + private final Rounding rounding; /** - * - * @param vs The original values source - * @param rounding How to round the values + * @param vs The original values source + * @param preparedRounding How to round the values + * @param rounding The rounding strategy */ - RoundingValuesSource(Numeric vs, Rounding.Prepared rounding) { + RoundingValuesSource(Numeric vs, Rounding.Prepared preparedRounding, Rounding rounding) { this.vs = vs; + this.preparedRounding = preparedRounding; this.rounding = rounding; } @@ -71,8 +73,16 @@ public boolean isBigInteger() { return false; } + public Rounding.Prepared getPreparedRounding() { + return preparedRounding; + } + + public Rounding getRounding() { + return rounding; + } + public long round(long value) { - return rounding.round(value); + return preparedRounding.round(value); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index a0a636c121e12..db21b384c77ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -56,7 +56,7 @@ public class FilterAggregatorFactory extends AggregatorFactory { private Weight weight; - private Query filter; + private final Query filter; public FilterAggregatorFactory( String name, @@ -85,7 +85,7 @@ public Weight getWeight() { try { weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { - throw new AggregationInitializationException("Failed to initialse filter", e); + throw new AggregationInitializationException("Failed to initialise filter", e); } } return weight; @@ -98,7 +98,7 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map<String, Object> metadata ) throws IOException { - return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, cardinality, metadata); + return new FilterAggregator(name, this::getWeight, factories, searchContext, parent, cardinality, metadata); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index a71c15d551927..12aefc540e75c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -33,8 +33,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.Prepared; @@ -42,7 +42,7 @@ import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -53,6 +53,7 @@ import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator; import org.opensearch.search.aggregations.bucket.DeferringBucketCollector; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.MergingBucketsDeferringCollector; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; @@ -127,14 +128,14 @@ static AutoDateHistogramAggregator build( * {@link MergingBucketsDeferringCollector#mergeBuckets(long[])}. */ private MergingBucketsDeferringCollector deferringCollector; - private final Weight[] filters; - private final DateFieldMapper.DateFieldType fieldType; protected final RoundingInfo[] roundingInfos; protected final int targetBuckets; protected int roundingIdx; protected Rounding.Prepared preparedRounding; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + private AutoDateHistogramAggregator( String name, AggregatorFactories factories, @@ -156,45 +157,53 @@ private AutoDateHistogramAggregator( this.roundingPreparer = roundingPreparer; this.preparedRounding = prepareRounding(0); - FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( - parent(), - subAggregators.length, - context, - b -> getMinimumRounding(b[0], b[1]), - // Passing prepared rounding as supplier to ensure the correct prepared - // rounding is set as it is done during getMinimumRounding - () -> preparedRounding, - valuesSourceConfig, - fc -> FilterRewriteHelper.getAggregationBounds(context, fc.field()) + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + fastFilterContext.setAggregationType( + new AutoHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null + ) ); - if (filterContext != null) { - fieldType = filterContext.fieldType; - filters = filterContext.filters; - } else { - fieldType = null; - filters = null; + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter(); } } - private Rounding getMinimumRounding(final long low, final long high) { - // max - min / targetBuckets = bestDuration - // find the right innerInterval this bestDuration belongs to - // since we cannot exceed targetBuckets, bestDuration should go up, - // so the right innerInterval should be an upper bound - long bestDuration = (high - low) / targetBuckets; - while (roundingIdx < roundingInfos.length - 1) { - final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; - final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; - // If the interval duration is covered by the maximum inner interval, - // we can start with this outer interval for creating the buckets - if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { - break; + private class AutoHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public AutoHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + super(fieldType, missing, hasScript); + } + + @Override + protected Rounding getRounding(final long low, final long high) { + // max - min / targetBuckets = bestDuration + // find the right innerInterval this bestDuration belongs to + // since we cannot exceed targetBuckets, bestDuration should go up, + // so the right innerInterval should be an upper bound + long bestDuration = (high - low) / targetBuckets; + // reset so this function is idempotent + roundingIdx = 0; + while (roundingIdx < roundingInfos.length - 1) { + final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; + final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; + // If the interval duration is covered by the maximum inner interval, + // we can start with this outer interval for creating the buckets + if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { + break; + } + roundingIdx++; } - roundingIdx++; + + preparedRounding = prepareRounding(roundingIdx); + return roundingInfos[roundingIdx].rounding; } - preparedRounding = prepareRounding(roundingIdx); - return roundingInfos[roundingIdx].rounding; + @Override + protected Prepared getRoundingPrepared() { + return preparedRounding; + } } protected abstract LongKeyedBucketOrds getBucketOrds(); @@ -226,28 +235,21 @@ public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBuc return LeafBucketCollector.NO_OP_COLLECTOR; } + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(getBucketOrds().add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + final SortedNumericDocValues values = valuesSource.longValues(ctx); final LeafBucketCollector iteratingCollector = getLeafCollector(values, sub); - - // Need to be declared as final and array for usage within the - // LeafBucketCollectorBase subclass below - final boolean[] useOpt = new boolean[1]; - useOpt[0] = filters != null; - return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - // Try fast filter aggregation if the filters have been created - // Skip if tried before and gave incorrect/incomplete results - if (useOpt[0]) { - useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { - incrementBucketDocCount( - FilterRewriteHelper.getBucketOrd(getBucketOrds().add(owningBucketOrd, preparedRounding.round(key))), - count - ); - }); - } - iteratingCollector.collect(doc, owningBucketOrd); } }; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8437e1dce9fe0..0e830106c8284 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,13 +33,13 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; -import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -49,8 +49,8 @@ import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; -import org.opensearch.search.aggregations.support.FieldContext; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -81,9 +81,9 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg private final long minDocCount; private final LongBounds extendedBounds; private final LongBounds hardBounds; - private final Weight[] filters; private final LongKeyedBucketOrds bucketOrds; - private final DateFieldMapper.DateFieldType fieldType; + + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; DateHistogramAggregator( String name, @@ -116,34 +116,35 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); - FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( - parent, - subAggregators.length, - context, - x -> rounding, - () -> preparedRounding, - valuesSourceConfig, - this::computeBounds + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + fastFilterContext.setAggregationType( + new DateHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null, + hardBounds + ) ); - if (filterContext != null) { - fieldType = filterContext.fieldType; - filters = filterContext.filters; - } else { - filters = null; - fieldType = null; + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter(); } } - private long[] computeBounds(final FieldContext fieldContext) throws IOException { - final long[] bounds = FilterRewriteHelper.getAggregationBounds(context, fieldContext.field()); - if (bounds != null) { - // Update min/max limit if user specified any hard bounds - if (hardBounds != null) { - bounds[0] = Math.max(bounds[0], hardBounds.getMin()); - bounds[1] = Math.min(bounds[1], hardBounds.getMax() - 1); // hard bounds max is exclusive - } + private class DateHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public DateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + super(fieldType, missing, hasScript, hardBounds); + } + + @Override + protected Rounding getRounding(long low, long high) { + return rounding; + } + + @Override + protected Rounding.Prepared getRoundingPrepared() { + return preparedRounding; } - return bounds; } @Override @@ -160,26 +161,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol return LeafBucketCollector.NO_OP_COLLECTOR; } - // Need to be declared as final and array for usage within the - // LeafBucketCollectorBase subclass below - final boolean[] useOpt = new boolean[1]; - useOpt[0] = filters != null; + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); SortedNumericDocValues values = valuesSource.longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - // Try fast filter aggregation if the filters have been created - // Skip if tried before and gave incorrect/incomplete results - if (useOpt[0]) { - useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { - incrementBucketDocCount( - FilterRewriteHelper.getBucketOrd(bucketOrds.add(owningBucketOrd, preparedRounding.round(key))), - count - ); - }); - } - if (values.advanceExact(doc)) { int valuesCount = values.docValueCount(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java deleted file mode 100644 index 29cecd5b382cd..0000000000000 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.aggregations.bucket.histogram; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.PointRangeQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.NumericUtils; -import org.opensearch.common.CheckedFunction; -import org.opensearch.common.Rounding; -import org.opensearch.common.lucene.search.function.FunctionScoreQuery; -import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.index.query.DateRangeIncludingNowQuery; -import org.opensearch.search.aggregations.support.FieldContext; -import org.opensearch.search.aggregations.support.ValuesSourceConfig; -import org.opensearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.OptionalLong; -import java.util.function.BiConsumer; -import java.util.function.Function; -import java.util.function.Supplier; - -/** - * Helpers functions to rewrite and optimize aggregations using - * range filter queries - * - * @opensearch.internal - */ -public class FilterRewriteHelper { - - static class FilterContext { - final DateFieldMapper.DateFieldType fieldType; - final Weight[] filters; - - public FilterContext(DateFieldMapper.DateFieldType fieldType, Weight[] filters) { - this.fieldType = fieldType; - this.filters = filters; - } - } - - private static final int MAX_NUM_FILTER_BUCKETS = 1024; - private static final Map<Class<?>, Function<Query, Query>> queryWrappers; - - // Initialize the wrappers map for unwrapping the query - static { - queryWrappers = new HashMap<>(); - queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); - queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); - queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); - queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); - } - - /** - * Recursively unwraps query into the concrete form - * for applying the optimization - */ - private static Query unwrapIntoConcreteQuery(Query query) { - while (queryWrappers.containsKey(query.getClass())) { - query = queryWrappers.get(query.getClass()).apply(query); - } - - return query; - } - - /** - * Finds the min and max bounds for segments within the passed search context - */ - private static long[] getIndexBoundsFromLeaves(final SearchContext context, final String fieldName) throws IOException { - final List<LeafReaderContext> leaves = context.searcher().getIndexReader().leaves(); - long min = Long.MAX_VALUE, max = Long.MIN_VALUE; - // Since the query does not specify bounds for aggregation, we can - // build the global min/max from local min/max within each segment - for (LeafReaderContext leaf : leaves) { - final PointValues values = leaf.reader().getPointValues(fieldName); - if (values != null) { - min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); - max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); - } - } - - if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) return null; - - return new long[] { min, max }; - } - - static long[] getAggregationBounds(final SearchContext context, final String fieldName) throws IOException { - final Query cq = unwrapIntoConcreteQuery(context.query()); - final long[] indexBounds = getIndexBoundsFromLeaves(context, fieldName); - if (cq instanceof PointRangeQuery) { - final PointRangeQuery prq = (PointRangeQuery) cq; - // Ensure that the query and aggregation are on the same field - if (prq.getField().equals(fieldName)) { - return new long[] { - // Minimum bound for aggregation is the max between query and global - Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]), - // Maximum bound for aggregation is the min between query and global - Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]) }; - } - } else if (cq instanceof MatchAllDocsQuery) { - return indexBounds; - } - - return null; - } - - /** - * Creates the range query filters for aggregations using the interval, min/max - * bounds and the rounding values - */ - private static Weight[] createFilterForAggregations( - final SearchContext context, - final Rounding rounding, - final Rounding.Prepared preparedRounding, - final String field, - final DateFieldMapper.DateFieldType fieldType, - final long low, - final long high - ) throws IOException { - final OptionalLong intervalOpt = Rounding.getInterval(rounding); - if (intervalOpt.isEmpty()) { - return null; - } - - final long interval = intervalOpt.getAsLong(); - // Calculate the number of buckets using range and interval - long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); - long prevRounded = roundedLow; - int bucketCount = 0; - while (roundedLow <= fieldType.convertNanosToMillis(high)) { - bucketCount++; - // Below rounding is needed as the interval could return in - // non-rounded values for something like calendar month - roundedLow = preparedRounding.round(roundedLow + interval); - if (prevRounded == roundedLow) break; - prevRounded = roundedLow; - } - - Weight[] filters = null; - if (bucketCount > 0 && bucketCount <= MAX_NUM_FILTER_BUCKETS) { - int i = 0; - filters = new Weight[bucketCount]; - roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); - while (i < bucketCount) { - // Calculate the lower bucket bound - final byte[] lower = new byte[8]; - NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); - // Calculate the upper bucket bound - final byte[] upper = new byte[8]; - roundedLow = preparedRounding.round(roundedLow + interval); - // Subtract -1 if the minimum is roundedLow as roundedLow itself - // is included in the next bucket - NumericUtils.longToSortableBytes( - i + 1 == bucketCount ? high : fieldType.convertRoundedMillisToNanos(roundedLow) - 1, - upper, - 0 - ); - filters[i++] = context.searcher().createWeight(new PointRangeQuery(field, lower, upper, 1) { - @Override - protected String toString(int dimension, byte[] value) { - return null; - } - }, ScoreMode.COMPLETE_NO_SCORES, 1); - } - } - - return filters; - } - - static FilterContext buildFastFilterContext( - final Object parent, - final int subAggLength, - SearchContext context, - Function<long[], Rounding> roundingFunction, - Supplier<Rounding.Prepared> preparedRoundingSupplier, - ValuesSourceConfig valuesSourceConfig, - CheckedFunction<FieldContext, long[], IOException> computeBounds - ) throws IOException { - // Create the filters for fast aggregation only if the query is instance - // of point range query and there aren't any parent/sub aggregations - if (parent == null && subAggLength == 0 && valuesSourceConfig.missing() == null && valuesSourceConfig.script() == null) { - final FieldContext fieldContext = valuesSourceConfig.fieldContext(); - if (fieldContext != null) { - final String fieldName = fieldContext.field(); - final long[] bounds = computeBounds.apply(fieldContext); - if (bounds != null) { - assert fieldContext.fieldType() instanceof DateFieldMapper.DateFieldType; - final DateFieldMapper.DateFieldType fieldType = (DateFieldMapper.DateFieldType) fieldContext.fieldType(); - final Rounding rounding = roundingFunction.apply(bounds); - final Weight[] filters = FilterRewriteHelper.createFilterForAggregations( - context, - rounding, - preparedRoundingSupplier.get(), - fieldName, - fieldType, - bounds[0], - bounds[1] - ); - return new FilterContext(fieldType, filters); - } - } - } - return null; - } - - static long getBucketOrd(long bucketOrd) { - if (bucketOrd < 0) { // already seen - bucketOrd = -1 - bucketOrd; - } - - return bucketOrd; - } - - static boolean tryFastFilterAggregation( - final LeafReaderContext ctx, - final Weight[] filters, - final DateFieldMapper.DateFieldType fieldType, - final BiConsumer<Long, Integer> incrementDocCount - ) throws IOException { - final int[] counts = new int[filters.length]; - int i; - for (i = 0; i < filters.length; i++) { - counts[i] = filters[i].count(ctx); - if (counts[i] == -1) { - // Cannot use the optimization if any of the counts - // is -1 indicating the segment might have deleted documents - return false; - } - } - - for (i = 0; i < filters.length; i++) { - if (counts[i] > 0) { - incrementDocCount.accept( - fieldType.convertNanosToMillis( - NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) - ), - counts[i] - ); - } - } - throw new CollectionTerminatedException(); - } -} diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index da1d9961ed81b..d21737a8366b2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -227,6 +227,10 @@ public String toString() { return "anon SortedNumericDoubleValues of [" + super.toString() + "]"; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index 1a76183ac1a2d..3ce1f0447dfcc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -576,6 +576,11 @@ public boolean advanceExact(int target) throws IOException { } return false; } + + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } } } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 434e630893f25..1a5a9dc6d1f03 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -117,6 +117,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField IGNORE_FAILURE_FIELD = new ParseField("ignore_failure"); public static final ParseField SORT_FIELD = new ParseField("sort"); public static final ParseField TRACK_SCORES_FIELD = new ParseField("track_scores"); + public static final ParseField INCLUDE_NAMED_QUERIES_SCORE = new ParseField("include_named_queries_score"); public static final ParseField TRACK_TOTAL_HITS_FIELD = new ParseField("track_total_hits"); public static final ParseField INDICES_BOOST_FIELD = new ParseField("indices_boost"); public static final ParseField AGGREGATIONS_FIELD = new ParseField("aggregations"); @@ -175,6 +176,8 @@ public static HighlightBuilder highlight() { private boolean trackScores = false; + private Boolean includeNamedQueriesScore; + private Integer trackTotalHitsUpTo; private SearchAfterBuilder searchAfterBuilder; @@ -276,6 +279,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchPipelineSource = in.readMap(); } } + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + includeNamedQueriesScore = in.readOptionalBoolean(); + } } @Override @@ -341,6 +347,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(searchPipelineSource); } } + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeOptionalBoolean(includeNamedQueriesScore); + } } /** @@ -568,6 +577,22 @@ public SearchSourceBuilder trackScores(boolean trackScores) { return this; } + /** + * Applies when there are named queries, to return the scores along as well + * Defaults to {@code false}. + */ + public SearchSourceBuilder includeNamedQueriesScores(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + /** + * Indicates whether scores will be returned as part of every search matched query.s + */ + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore != null && includeNamedQueriesScore; + } + /** * Indicates whether scores will be tracked for this request. */ @@ -1103,6 +1128,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.terminateAfter = terminateAfter; rewrittenBuilder.timeout = timeout; rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.includeNamedQueriesScore = includeNamedQueriesScore; rewrittenBuilder.trackTotalHitsUpTo = trackTotalHitsUpTo; rewrittenBuilder.version = version; rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; @@ -1155,6 +1181,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { trackScores = parser.booleanValue(); + } else if (INCLUDE_NAMED_QUERIES_SCORE.match(currentFieldName, parser.getDeprecationHandler())) { + includeNamedQueriesScore = parser.booleanValue(); } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_BOOLEAN || (token == XContentParser.Token.VALUE_STRING && Booleans.isBoolean(parser.text()))) { @@ -1418,6 +1446,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(TRACK_SCORES_FIELD.getPreferredName(), true); } + if (includeNamedQueriesScore != null) { + builder.field(INCLUDE_NAMED_QUERIES_SCORE.getPreferredName(), includeNamedQueriesScore); + } + if (trackTotalHitsUpTo != null) { builder.field(TRACK_TOTAL_HITS_FIELD.getPreferredName(), trackTotalHitsUpTo); } @@ -1749,6 +1781,7 @@ public int hashCode() { terminateAfter, timeout, trackScores, + includeNamedQueriesScore, version, seqNoAndPrimaryTerm, profile, @@ -1791,6 +1824,7 @@ public boolean equals(Object obj) { && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) + && Objects.equals(includeNamedQueriesScore, other.includeNamedQueriesScore) && Objects.equals(version, other.version) && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm) && Objects.equals(profile, other.profile) diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java index 7e36ace9e2112..5be3733106655 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java @@ -188,6 +188,10 @@ public boolean fetchScores() { return searchContext.sort() != null && searchContext.trackScores(); } + public boolean includeNamedQueriesScore() { + return searchContext.includeNamedQueriesScore(); + } + /** * Configuration for returning inner hits */ diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index a842c0f1adc6e..1698f41caaf2b 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -91,7 +91,7 @@ /** * Fetch phase of a search request, used to fetch the actual top matching documents to be returned to the client, identified - * after reducing all of the matches returned by the query phase + * after reducing all the matches returned by the query phase * * @opensearch.api */ diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java index 6c589438d6b4c..406d9c8b4bc03 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java @@ -28,12 +28,12 @@ * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ - package org.opensearch.search.fetch.subphase; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -45,6 +45,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -67,25 +68,69 @@ public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOExcept if (namedQueries.isEmpty()) { return null; } + + Map<String, Weight> weights = prepareWeights(context, namedQueries); + + return context.includeNamedQueriesScore() ? createScoringProcessor(weights) : createNonScoringProcessor(weights); + } + + private Map<String, Weight> prepareWeights(FetchContext context, Map<String, Query> namedQueries) throws IOException { Map<String, Weight> weights = new HashMap<>(); + ScoreMode scoreMode = context.includeNamedQueriesScore() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; for (Map.Entry<String, Query> entry : namedQueries.entrySet()) { - weights.put( - entry.getKey(), - context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), ScoreMode.COMPLETE_NO_SCORES, 1) - ); + weights.put(entry.getKey(), context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), scoreMode, 1)); } + return weights; + } + + private FetchSubPhaseProcessor createScoringProcessor(Map<String, Weight> weights) { return new FetchSubPhaseProcessor() { + final Map<String, Scorer> matchingScorers = new HashMap<>(); + + @Override + public void setNextReader(LeafReaderContext readerContext) throws IOException { + matchingScorers.clear(); + for (Map.Entry<String, Weight> entry : weights.entrySet()) { + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Scorer scorer = scorerSupplier.get(0L); + if (scorer != null) { + matchingScorers.put(entry.getKey(), scorer); + } + } + } + } + + @Override + public void process(HitContext hitContext) throws IOException { + Map<String, Float> matches = new LinkedHashMap<>(); + int docId = hitContext.docId(); + for (Map.Entry<String, Scorer> entry : matchingScorers.entrySet()) { + Scorer scorer = entry.getValue(); + if (scorer.iterator().docID() < docId) { + scorer.iterator().advance(docId); + } + if (scorer.iterator().docID() == docId) { + matches.put(entry.getKey(), scorer.score()); + } + } + hitContext.hit().matchedQueriesWithScores(matches); + } + }; + } - final Map<String, Bits> matchingIterators = new HashMap<>(); + private FetchSubPhaseProcessor createNonScoringProcessor(Map<String, Weight> weights) { + return new FetchSubPhaseProcessor() { + final Map<String, Bits> matchingBits = new HashMap<>(); @Override public void setNextReader(LeafReaderContext readerContext) throws IOException { - matchingIterators.clear(); + matchingBits.clear(); for (Map.Entry<String, Weight> entry : weights.entrySet()) { - ScorerSupplier ss = entry.getValue().scorerSupplier(readerContext); - if (ss != null) { - Bits matchingBits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), ss); - matchingIterators.put(entry.getKey(), matchingBits); + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Bits bits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), scorerSupplier); + matchingBits.put(entry.getKey(), bits); } } } @@ -93,15 +138,14 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException { @Override public void process(HitContext hitContext) { List<String> matches = new ArrayList<>(); - int doc = hitContext.docId(); - for (Map.Entry<String, Bits> iterator : matchingIterators.entrySet()) { - if (iterator.getValue().get(doc)) { - matches.add(iterator.getKey()); + int docId = hitContext.docId(); + for (Map.Entry<String, Bits> entry : matchingBits.entrySet()) { + if (entry.getValue().get(docId)) { + matches.add(entry.getKey()); } } hitContext.hit().matchedQueries(matches.toArray(new String[0])); } }; } - } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java index eb5f4f3c14eb2..c06a733203434 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -123,13 +123,27 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper().mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset(); + final Integer fieldMaxAnalyzedOffset = field.fieldOptions().maxAnalyzerOffset(); + if (fieldMaxAnalyzedOffset != null && fieldMaxAnalyzedOffset > maxAnalyzedOffset) { + throw new IllegalArgumentException( + "max_analyzer_offset has exceeded [" + + maxAnalyzedOffset + + "] - maximum allowed to be analyzed for highlighting. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + + "] index level setting. " + + "For large texts, indexing with offsets or term vectors is recommended!" + ); + } textsToHighlight = HighlightUtils.loadFieldValues(fieldType, context.getQueryShardContext(), hitContext, fieldContext.forceSource); for (Object textToHighlight : textsToHighlight) { String text = convertFieldValue(fieldType, textToHighlight); int textLength = text.length(); - if (textLength > maxAnalyzedOffset) { + if (fieldMaxAnalyzedOffset != null && textLength > fieldMaxAnalyzedOffset) { + text = text.substring(0, fieldMaxAnalyzedOffset); + } else if (textLength > maxAnalyzedOffset) { throw new IllegalArgumentException( "The length of [" + fieldContext.fieldName diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 151ef97a2a141..3a3b46366a6d2 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -340,6 +340,14 @@ public FieldDoc searchAfter() { return in.searchAfter(); } + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + return in.includeNamedQueriesScore(includeNamedQueriesScore); + } + + public boolean includeNamedQueriesScore() { + return in.includeNamedQueriesScore(); + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { return in.parsedPostFilter(postFilter); diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 02837da64dafd..cd8f9f8410d50 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -305,6 +305,29 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { public abstract boolean trackScores(); + /** + * Determines whether named queries' scores should be included in the search results. + * By default, this is set to return false, indicating that scores from named queries are not included. + * + * @param includeNamedQueriesScore true to include scores from named queries, false otherwise. + */ + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + // Default implementation does nothing and returns this for chaining. + // Implementations of SearchContext should override this method to actually store the value. + return this; + } + + /** + * Checks if scores from named queries are included in the search results. + * + * @return true if scores from named queries are included, false otherwise. + */ + public boolean includeNamedQueriesScore() { + // Default implementation returns false. + // Implementations of SearchContext should override this method to return the actual value. + return false; + } + public abstract SearchContext trackTotalHitsUpTo(int trackTotalHits); /** diff --git a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java index 55315013ea8c9..b2c97baf78d91 100644 --- a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java @@ -82,6 +82,8 @@ public class SubSearchContext extends FilteredSearchContext { private boolean explain; private boolean trackScores; + + private boolean includeNamedQueriesScore; private boolean version; private boolean seqNoAndPrimaryTerm; @@ -234,6 +236,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { throw new UnsupportedOperationException("Not supported"); diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java index 3bd1c5118b5fb..70cbd8d7ad6c3 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java @@ -78,6 +78,7 @@ public void setDocument(int docId) { this.docId = docId; } + @SuppressWarnings("removal") @Override public ScriptDocValues<?> get(Object key) { // assume its a string... diff --git a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java index 8813865a657dc..906616eb9ba5f 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java @@ -60,6 +60,12 @@ public class SearchLookup { */ private static final int MAX_FIELD_CHAIN_DEPTH = 5; + /** + * This constant should be used in cases when shard id is unknown. + * Mostly it should be used in tests. + */ + public static final int UNKNOWN_SHARD_ID = -1; + /** * The chain of fields for which this lookup was created, used for detecting * loops caused by runtime fields referring to other runtime fields. The chain is empty @@ -74,14 +80,27 @@ public class SearchLookup { private final SourceLookup sourceLookup; private final FieldsLookup fieldsLookup; private final BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup; + private final int shardId; /** - * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, - * stored fields, or _source. + * Constructor for backwards compatibility. Use the one with explicit shardId argument. */ + @Deprecated public SearchLookup( MapperService mapperService, BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup + ) { + this(mapperService, fieldDataLookup, UNKNOWN_SHARD_ID); + } + + /** + * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, + * stored fields, or _source. + */ + public SearchLookup( + MapperService mapperService, + BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup, + int shardId ) { this.fieldChain = Collections.emptySet(); docMap = new DocLookup( @@ -91,6 +110,7 @@ public SearchLookup( sourceLookup = new SourceLookup(); fieldsLookup = new FieldsLookup(mapperService); this.fieldDataLookup = fieldDataLookup; + this.shardId = shardId; } /** @@ -109,6 +129,7 @@ private SearchLookup(SearchLookup searchLookup, Set<String> fieldChain) { this.sourceLookup = searchLookup.sourceLookup; this.fieldsLookup = searchLookup.fieldsLookup; this.fieldDataLookup = searchLookup.fieldDataLookup; + this.shardId = searchLookup.shardId; } /** @@ -143,4 +164,11 @@ public DocLookup doc() { public SourceLookup source() { return sourceLookup; } + + public int shardId() { + if (shardId == UNKNOWN_SHARD_ID) { + throw new IllegalStateException("Shard id is unknown for this lookup"); + } + return shardId; + } } diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java index 631ace41090d7..19a59e9f7bebe 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.AggregationProcessor; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; @@ -33,9 +32,7 @@ public class QueryPhaseSearcherWrapper implements QueryPhaseSearcher { public QueryPhaseSearcherWrapper() { this.defaultQueryPhaseSearcher = new QueryPhase.DefaultQueryPhaseSearcher(); - this.concurrentQueryPhaseSearcher = FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - ? new ConcurrentQueryPhaseSearcher() - : null; + this.concurrentQueryPhaseSearcher = new ConcurrentQueryPhaseSearcher(); } /** @@ -58,10 +55,8 @@ public boolean searchWith( boolean hasTimeout ) throws IOException { if (searchContext.shouldUseConcurrentSearch()) { - LOGGER.debug("Using concurrent search over segments (experimental) for request with context id {}", searchContext.id()); return concurrentQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } else { - LOGGER.debug("Using non-concurrent search over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } } @@ -74,13 +69,8 @@ public boolean searchWith( @Override public AggregationProcessor aggregationProcessor(SearchContext searchContext) { if (searchContext.shouldUseConcurrentSearch()) { - LOGGER.debug( - "Using concurrent aggregation processor over segments (experimental) for request with context id {}", - searchContext.id() - ); return concurrentQueryPhaseSearcher.aggregationProcessor(searchContext); } else { - LOGGER.debug("Using non-concurrent aggregation processor over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.aggregationProcessor(searchContext); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 9d2c7eb882fa1..bf2c7fc74be92 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -121,6 +121,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.common.util.set.Sets.newHashSet; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; @@ -226,6 +227,16 @@ public RestoreService( */ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionListener<RestoreCompletionResponse> listener) { try { + // Setting INDEX_STORE_TYPE_SETTING as REMOTE_SNAPSHOT is intended to be a system-managed index setting that is configured when + // restoring a snapshot and should not be manually set by user. + String storeTypeSetting = request.indexSettings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new SnapshotRestoreException( + request.repository(), + request.snapshot(), + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ); + } // Read snapshot info and metadata from the repository final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index 24dcab98c8870..4b8897a318531 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -66,7 +66,6 @@ public class TelemetrySettings { private volatile boolean tracingEnabled; private volatile double samplingProbability; - private final boolean tracingFeatureEnabled; private final boolean metricsFeatureEnabled; @@ -98,6 +97,7 @@ public void setSamplingProbability(double samplingProbability) { /** * Get sampling ratio + * @return double */ public double getSamplingProbability() { return samplingProbability; @@ -110,4 +110,5 @@ public boolean isTracingFeatureEnabled() { public boolean isMetricsFeatureEnabled() { return metricsFeatureEnabled; } + } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index b6b2cf360d1c5..6a97914b04ebc 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -40,6 +40,11 @@ private AttributeNames() { */ public static final String HTTP_URI = "http.uri"; + /** + * Http Request Query Parameters. + */ + public static final String HTTP_REQ_QUERY_PARAMS = "url.query"; + /** * Rest Request ID. */ diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index 1dce422943b7a..70658c5d71bf3 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -11,6 +11,7 @@ import org.opensearch.action.bulk.BulkShardRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; @@ -75,7 +76,9 @@ public static SpanCreationContext from(String spanName, String nodeId, Replicate } private static String createSpanName(HttpRequest httpRequest) { - return httpRequest.method().name() + SEPARATOR + httpRequest.uri(); + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String path = uriParts.v1(); + return httpRequest.method().name() + SEPARATOR + path; } private static Attributes buildSpanAttributes(HttpRequest httpRequest) { @@ -84,9 +87,26 @@ private static Attributes buildSpanAttributes(HttpRequest httpRequest) { .addAttribute(AttributeNames.HTTP_METHOD, httpRequest.method().name()) .addAttribute(AttributeNames.HTTP_PROTOCOL_VERSION, httpRequest.protocolVersion().name()); populateHeader(httpRequest, attributes); + + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + return attributes; } + private static Tuple<String, String> splitUri(String uri) { + int index = uri.indexOf('?'); + if (index >= 0 && index < uri.length() - 1) { + String path = uri.substring(0, index); + String query = uri.substring(index + 1); + return new Tuple<>(path, query); + } + return new Tuple<>(uri, ""); + } + private static void populateHeader(HttpRequest httpRequest, Attributes attributes) { HEADERS_TO_BE_ADDED_AS_ATTRIBUTES.forEach(x -> { if (httpRequest.getHeaders() != null @@ -102,9 +122,8 @@ private static String createSpanName(RestRequest restRequest) { if (restRequest != null) { try { String methodName = restRequest.method().name(); - // path() does the decoding, which may give error - String path = restRequest.path(); - spanName = methodName + SEPARATOR + path; + String rawPath = restRequest.rawPath(); + spanName = methodName + SEPARATOR + rawPath; } catch (Exception e) { // swallow the exception and keep the default name. } @@ -114,9 +133,16 @@ private static String createSpanName(RestRequest restRequest) { private static Attributes buildSpanAttributes(RestRequest restRequest) { if (restRequest != null) { - return Attributes.create() + Attributes attributes = Attributes.create() .addAttribute(AttributeNames.REST_REQ_ID, restRequest.getRequestId()) .addAttribute(AttributeNames.REST_REQ_RAW_PATH, restRequest.rawPath()); + + Tuple<String, String> uriParts = splitUri(restRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + return attributes; } else { return Attributes.EMPTY; } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 12052598d3671..0b9026b81eb4e 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -42,7 +42,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; @@ -187,9 +186,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); - } + map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -282,12 +279,16 @@ public ThreadPool( TimeValue.timeValueMinutes(5) ) ); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - builders.put( + builders.put( + Names.INDEX_SEARCHER, + new ResizableExecutorBuilder( + settings, Names.INDEX_SEARCHER, - new ResizableExecutorBuilder(settings, Names.INDEX_SEARCHER, allocatedProcessors, 1000, runnableTaskListener) - ); - } + twiceAllocatedProcessors(allocatedProcessors), + 1000, + runnableTaskListener + ) + ); for (final ExecutorBuilder<?> builder : customBuilders) { if (builders.containsKey(builder.name())) { diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index d50266d8c9e4a..652d57f4c5348 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -1105,7 +1105,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost "cluster:admin", "cluster:monitor", "cluster:internal", - "internal:" + "internal:", + "views:" ) ) ); diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 2c92f0ecd3f51..80b1d25064885 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1 +1,2 @@ org.apache.lucene.search.suggest.document.Completion50PostingsFormat +org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 77cd0ab05278e..e1226345ef961 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -187,5 +187,4 @@ grant { permission java.io.FilePermission "/sys/fs/cgroup/cpuacct/-", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory/-", "read"; - }; diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 2f47bfb4df70a..d7026159d9ec0 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -40,6 +40,8 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.TimestampParsingException; +import org.opensearch.action.admin.indices.view.ViewAlreadyExistsException; +import org.opensearch.action.admin.indices.view.ViewNotFoundException; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; @@ -892,6 +894,8 @@ public void testIds() { ids.put(169, NodeWeighedAwayException.class); ids.put(170, SearchPipelineProcessingException.class); ids.put(171, CryptoRegistryException.class); + ids.put(172, ViewNotFoundException.class); + ids.put(173, ViewAlreadyExistsException.class); ids.put(10001, IndexCreateBlockException.class); Map<Class<? extends OpenSearchException>, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index d94bb9a7aa88e..6c4337d267c8d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -13,6 +13,8 @@ import org.apache.lucene.util.Constants; import org.opensearch.ExceptionsHelper; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; @@ -563,8 +565,57 @@ public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException assertNotNull(taskInfo.getResourceStats()); assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total") instanceof TaskResourceUsage); - TaskResourceUsage taskResourceUsage = (TaskResourceUsage) taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); + assertTrue(taskResourceUsage.getMemoryInBytes() > 0); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener<NodesResponse>() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileGetTask() throws InterruptedException { + setup(true, false); + + final AtomicReference<Throwable> throwableReference = new AtomicReference<>(); + final AtomicReference<NodesResponse> responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map<Long, Task> resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + assertFalse(resourceTasks.isEmpty()); + GetTaskResponse getTaskResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportGetTaskAction, + new GetTaskRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), new ArrayList<>(resourceTasks.values()).get(0).getId())) + ); + + TaskInfo taskInfo = getTaskResponse.getTask().getTask(); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); assertTrue(taskResourceUsage.getMemoryInBytes() > 0); }; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index a3fa0f9cb16e4..8d87fd5135663 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.FailedNodeException; import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; +import org.opensearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.BaseNodeResponse; @@ -41,6 +42,7 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.client.Client; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; @@ -57,6 +59,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; @@ -85,6 +88,7 @@ import static java.util.Collections.emptySet; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.mockito.Mockito.mock; /** * The test case for unit testing task manager and related transport actions @@ -249,6 +253,17 @@ protected TaskManager createTaskManager( taskResourceTrackingService ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); + Client mockClient = mock(Client.class); + NamedXContentRegistry namedXContentRegistry = mock(NamedXContentRegistry.class); + transportGetTaskAction = new TransportGetTaskAction( + threadPool, + transportService, + actionFilters, + clusterService, + mockClient, + namedXContentRegistry, + taskResourceTrackingService + ); transportService.acceptIncomingRequests(); } @@ -258,6 +273,7 @@ protected TaskManager createTaskManager( private final SetOnce<DiscoveryNode> discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; + public final TransportGetTaskAction transportGetTaskAction; @Override public void close() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java index 398d9e3338580..89e072d783747 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -50,6 +50,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class CreateIndexRequestTests extends OpenSearchTestCase { @@ -150,6 +151,20 @@ public void testSettingsType() throws IOException { assertThat(e.getMessage(), equalTo("key [settings] must be an object")); } + public void testToString() throws IOException { + CreateIndexRequest request = new CreateIndexRequest("foo"); + String mapping = JsonXContent.contentBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .endObject() + .endObject() + .toString(); + request.mapping(mapping); + + assertThat(request.toString(), containsString("index='foo'")); + assertThat(request.toString(), containsString("mappings='{\"_doc\":{}}'")); + } + public static void assertMappingsEqual(Map<String, String> expected, Map<String, String> actual) throws IOException { assertEquals(expected.keySet(), actual.keySet()); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java new file mode 100644 index 0000000000000..e2211bb120366 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class CreateViewRequestTests extends AbstractWireSerializingTestCase<CreateViewAction.Request> { + + @Override + protected Writeable.Reader<CreateViewAction.Request> instanceReader() { + return CreateViewAction.Request::new; + } + + @Override + protected CreateViewAction.Request createTestInstance() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomList(5, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void testValidateRequest() { + final CreateViewAction.Request request = new CreateViewAction.Request( + "my-view", + "this is a description", + List.of(new CreateViewAction.Request.Target("my-indices-*")) + ); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final CreateViewAction.Request request = new CreateViewAction.Request("", null, null); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null", "targets cannot be empty")); + } + + public void testSizeThresholds() { + final String validName = randomAlphaOfLength(8); + final String validDescription = randomAlphaOfLength(20); + final int validTargetLength = randomIntBetween(1, 5); + final String validIndexPattern = randomAlphaOfLength(8); + + final CreateViewAction.Request requestNameTooBig = new CreateViewAction.Request( + randomAlphaOfLength(65), + validDescription, + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestNameTooBig.validate().validationErrors(), + contains("name must be less than 64 characters in length") + ); + + final CreateViewAction.Request requestDescriptionTooBig = new CreateViewAction.Request( + validName, + randomAlphaOfLength(257), + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestDescriptionTooBig.validate().validationErrors(), + contains("description must be less than 256 characters in length") + ); + + final CreateViewAction.Request requestTargetsSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(26, 26, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat(requestTargetsSize.validate().validationErrors(), contains("view cannot have more than 25 targets")); + + final CreateViewAction.Request requestTargetsIndexPatternSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(1, 1, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(65))) + ); + MatcherAssert.assertThat( + requestTargetsIndexPatternSize.validate().validationErrors(), + contains("target index pattern must be less than 64 characters in length") + ); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java new file mode 100644 index 0000000000000..29305e3dfb92f --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class DeleteViewRequestTests extends AbstractWireSerializingTestCase<DeleteViewAction.Request> { + + @Override + protected Writeable.Reader<DeleteViewAction.Request> instanceReader() { + return DeleteViewAction.Request::new; + } + + @Override + protected DeleteViewAction.Request createTestInstance() { + return new DeleteViewAction.Request(randomAlphaOfLength(8)); + } + + public void testValidateRequest() { + final DeleteViewAction.Request request = new DeleteViewAction.Request("my-view"); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final DeleteViewAction.Request request = new DeleteViewAction.Request(""); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java new file mode 100644 index 0000000000000..44dfbe5f1d781 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +import java.util.TreeSet; + +public class GetViewResponseTests extends AbstractWireSerializingTestCase<GetViewAction.Response> { + + @Override + protected Writeable.Reader<GetViewAction.Response> instanceReader() { + return GetViewAction.Response::new; + } + + @Override + protected GetViewAction.Response createTestInstance() { + return new GetViewAction.Response( + new View( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomLong(), + randomLong(), + new TreeSet<>(randomList(5, () -> new View.Target(randomAlphaOfLength(8)))) + ) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java new file mode 100644 index 0000000000000..80a2827d158bb --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.nullValue; + +public class ListViewNamesRequestTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Request> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Request> instanceReader() { + return ListViewNamesAction.Request::new; + } + + @Override + protected ListViewNamesAction.Request createTestInstance() { + return new ListViewNamesAction.Request(); + } + + public void testValidateRequest() { + final ListViewNamesAction.Request request = new ListViewNamesAction.Request(); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java new file mode 100644 index 0000000000000..ee8409fe3c805 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +public class ListViewNamesResponseTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Response> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Response> instanceReader() { + return ListViewNamesAction.Response::new; + } + + @Override + protected ListViewNamesAction.Response createTestInstance() { + return new ListViewNamesAction.Response(randomList(5, () -> randomAlphaOfLength(8))); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java new file mode 100644 index 0000000000000..d49c0c1a8f2bd --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class SearchViewRequestTests extends AbstractWireSerializingTestCase<SearchViewAction.Request> { + + @Override + protected Writeable.Reader<SearchViewAction.Request> instanceReader() { + return SearchViewAction.Request::new; + } + + @Override + protected SearchViewAction.Request createTestInstance() { + try { + return new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest()); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + public void testValidateRequest() throws IOException { + final SearchViewAction.Request request = new SearchViewAction.Request("my-view", new SearchRequest()); + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final SearchViewAction.Request request = new SearchViewAction.Request((String) null, new SearchRequest()); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors().size(), equalTo(1)); + MatcherAssert.assertThat(e.validationErrors().get(0), containsString("View is required")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java new file mode 100644 index 0000000000000..91813e1336cf2 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +public class ViewServiceTest { + + private final View.Target typicalTarget = new View.Target(randomAlphaOfLength(8)); + private final View typicalView = new View( + "view-" + randomAlphaOfLength(8), + "description " + randomAlphaOfLength(20), + -1L, + -1L, + Set.of(typicalTarget) + ); + + private ClusterService clusterService; + private NodeClient nodeClient; + private final AtomicLong currentTime = new AtomicLong(0); + private LongSupplier timeProvider = currentTime::longValue; + private ViewService viewService; + + @Before + public void before() { + clusterService = mock(ClusterService.class); + nodeClient = mock(NodeClient.class); + timeProvider = mock(LongSupplier.class); + doAnswer(invocation -> currentTime.get()).when(timeProvider).getAsLong(); + viewService = spy(new ViewService(clusterService, nodeClient, timeProvider)); + } + + @After + public void after() { + verifyNoMoreInteractions(timeProvider, clusterService, nodeClient); + } + + private CreateViewAction.Request createTypicalViewRequest() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(20), + List.of(new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void createView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.createView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("create_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.updateView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("update_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView_doesNotExist() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final Exception ex = assertThrows(ResourceNotFoundException.class, () -> viewService.updateView(request, listener)); + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void deleteView() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.deleteView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("delete_view_task"), any()); + } + + public void deleteView_doesNotExist() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.deleteView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void getView() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.getView(request, listener); + + verify(listener).onResponse(any()); + } + + public void getView_doesNotExist() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.getView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void listViewNames() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).metadata( + new Metadata.Builder().views(Map.of(typicalView.getName(), typicalView)).build() + ).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void listViewNames_noViews() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void searchView() { + final var request = spy(new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest())); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.searchView(request, listener); + + verify(nodeClient).executeLocally(eq(SearchAction.INSTANCE), any(), any(ActionListener.class)); + verify(request).indices(typicalTarget.getIndexPattern()); + } + + private void setGetViewOrThrowExceptionToReturnTypicalView() { + doAnswer(invocation -> typicalView).when(viewService).getViewOrThrowException(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java index f000b7a10a30b..c6e880fbd137e 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.get; +import org.opensearch.action.get.MultiGetRequest.Item; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; @@ -141,6 +142,13 @@ public void testXContentSerialization() throws IOException { } } + public void testToString() { + MultiGetRequest req = createTestInstance(); + for (Item items : req.getItems()) { + assertThat(req.toString(), containsString(items.toString())); + } + } + private MultiGetRequest createTestInstance() { int numItems = randomIntBetween(0, 128); MultiGetRequest request = new MultiGetRequest(); diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 76129341fc9a2..601aa9dc1856e 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -85,6 +85,8 @@ import java.util.function.BiFunction; import java.util.stream.IntStream; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -95,6 +97,7 @@ public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List<Tuple<String, String>> resolvedNodes = new ArrayList<>(); private final Set<ShardSearchContextId> releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + private SearchRequestOperationsListener assertingListener; ThreadPool threadPool; @Before @@ -103,6 +106,27 @@ public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); threadPool = new TestThreadPool(getClass().getName()); + assertingListener = new SearchRequestOperationsListener() { + private volatile SearchPhase phase; + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phase, is(nullValue())); + phase = context.getCurrentPhase(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + }; } @After @@ -178,7 +202,10 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( results, request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ) ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { @@ -334,7 +361,7 @@ public void testOnPhaseFailureAndVerifyListeners() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List<SearchRequestOperationsListener> requestOperationListeners = List.of(testListener); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); action.start(); assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 56dcf66d5607d..30fc50f91dabd 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; @@ -41,37 +42,84 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.stream.IntStream; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; public class CanMatchPreFilterSearchPhaseTests extends OpenSearchTestCase { + private SearchRequestOperationsListener assertingListener; + private Set<SearchPhase> phases; + + @Before + public void setUp() throws Exception { + super.setUp(); + + phases = Collections.newSetFromMap(new IdentityHashMap<>()); + assertingListener = new SearchRequestOperationsListener() { + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phases.contains(context.getCurrentPhase()), is(false)); + phases.add(context.getCurrentPhase()); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phases.contains(context.getCurrentPhase()), is(true)); + phases.remove(context.getCurrentPhase()); + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context) { + assertThat(phases.contains(context.getCurrentPhase()), is(true)); + phases.remove(context.getCurrentPhase()); + } + }; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + assertBusy(() -> assertThat(phases, empty()), 5, TimeUnit.SECONDS); + } public void testFilterShards() throws InterruptedException { @@ -135,11 +183,12 @@ public void sendCanMatch( public void run() throws IOException { result.set(iter); latch.countDown(); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest ) ); @@ -230,11 +279,12 @@ public void sendCanMatch( public void run() throws IOException { result.set(iter); latch.countDown(); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest ) ); @@ -366,6 +416,7 @@ protected void executePhaseOnShard( canMatchPhase.start(); latch.await(); + executor.shutdown(); } @@ -443,17 +494,19 @@ public void sendCanMatch( public void run() { result.set(iter); latch.countDown(); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest ) ); canMatchPhase.start(); latch.await(); + ShardId[] expected = IntStream.range(0, shardIds.size()) .boxed() .sorted(Comparator.comparing(minAndMaxes::get, MinAndMax.getComparator(order)).thenComparing(shardIds::get)) @@ -546,17 +599,19 @@ public void sendCanMatch( public void run() { result.set(iter); latch.countDown(); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest ) ); canMatchPhase.start(); latch.await(); + int shardId = 0; for (SearchShardIterator i : result.get()) { assertThat(i.shardId().id(), equalTo(shardId++)); @@ -565,4 +620,190 @@ public void run() { assertThat(result.get().size(), equalTo(numShards)); } } + + public void testAsyncAction() throws InterruptedException { + + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + + Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>(); + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + lookup.put("node_1", new SearchAsyncActionTests.MockConnection(primaryNode)); + lookup.put("node_2", new SearchAsyncActionTests.MockConnection(replicaNode)); + final boolean shard1 = randomBoolean(); + final boolean shard2 = randomBoolean(); + + SearchTransportService searchTransportService = new SearchTransportService(null, null) { + @Override + public void sendCanMatch( + Transport.Connection connection, + ShardSearchRequest request, + SearchTask task, + ActionListener<SearchService.CanMatchResponse> listener + ) { + new Thread( + () -> listener.onResponse(new SearchService.CanMatchResponse(request.shardId().id() == 0 ? shard1 : shard2, null)) + ).start(); + } + }; + + AtomicReference<GroupShardsIterator<SearchShardIterator>> result = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter( + "idx", + new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS), + 2, + randomBoolean(), + primaryNode, + replicaNode + ); + final SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(true); + + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + ExecutorService executor = OpenSearchExecutors.newDirectExecutorService(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + searchRequest + ); + + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + null, + (iter) -> { + AbstractSearchAsyncAction<? extends SearchPhaseResult> action = new SearchDfsQueryAsyncAction( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + controller, + executor, + resultConsumer, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + task, + SearchResponse.Clusters.EMPTY, + searchRequestContext + ); + return new WrappingSearchAsyncActionPhase(action) { + @Override + public void run() { + super.run(); + latch.countDown(); + } + }; + }, + SearchResponse.Clusters.EMPTY, + searchRequestContext + ); + + canMatchPhase.start(); + latch.await(); + + assertThat(result.get(), is(nullValue())); + } + + private static final class SearchDfsQueryAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> { + private final SearchRequestOperationsListener listener; + + SearchDfsQueryAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final BiFunction<String, String, Transport.Connection> nodeIdToConnection, + final Map<String, AliasFilter> aliasFilter, + final Map<String, Float> concreteIndexBoosts, + final Map<String, Set<String>> indexRoutings, + final SearchPhaseController searchPhaseController, + final Executor executor, + final QueryPhaseResultConsumer queryPhaseResultConsumer, + final SearchRequest request, + final ActionListener<SearchResponse> listener, + final GroupShardsIterator<SearchShardIterator> shardsIts, + final TransportSearchAction.SearchTimeProvider timeProvider, + final ClusterState clusterState, + final SearchTask task, + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext + ) { + super( + SearchPhaseName.DFS_PRE_QUERY.getName(), + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + request.getMaxConcurrentShardRequests(), + clusters, + searchRequestContext + ); + this.listener = searchRequestContext.getSearchRequestOperationsListener(); + } + + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener<DfsSearchResult> listener + ) { + final DfsSearchResult response = new DfsSearchResult(shardIt.getSearchContextId(), shard, null); + response.setShardIndex(shard.getShardId().getId()); + listener.innerOnResponse(response); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) { + return new SearchPhase("last") { + @Override + public void run() throws IOException { + listener.onPhaseEnd(context, null); + } + }; + } + } + } diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index 04a00a09dcbc4..cc10da8fc1f12 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -67,17 +67,27 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { final Set<ShardSearchContextId> releasedSearchContexts = new HashSet<>(); final SearchRequest searchRequest; final AtomicReference<SearchResponse> searchResponse = new AtomicReference<>(); + final SearchPhase currentPhase; public MockSearchPhaseContext(int numShards) { this(numShards, new SearchRequest()); } public MockSearchPhaseContext(int numShards, SearchRequest searchRequest) { + this(numShards, searchRequest, null); + } + + public MockSearchPhaseContext(int numShards, SearchRequest searchRequest, SearchPhase currentPhase) { this.numShards = numShards; this.searchRequest = searchRequest; + this.currentPhase = currentPhase; numSuccess = new AtomicInteger(numShards); } + public MockSearchPhaseContext(int numShards, SearchPhase currentPhase) { + this(numShards, new SearchRequest(), currentPhase); + } + public void assertNoFailure() { if (phaseFailure.get() != null) { throw new AssertionError(phaseFailure.get()); @@ -106,7 +116,7 @@ public SearchRequest getRequest() { @Override public SearchPhase getCurrentPhase() { - return null; + return currentPhase; } @Override diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java index 908c122edc455..2577dfdc20698 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java @@ -73,6 +73,7 @@ import static java.util.Collections.singletonList; import static org.opensearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -560,6 +561,13 @@ public void testEqualsAndHashcode() { checkEqualsAndHashCode(createMultiSearchRequest(), MultiSearchRequestTests::copyRequest, MultiSearchRequestTests::mutate); } + public void testToString() { + MultiSearchRequest req = createMultiSearchRequest(); + for (SearchRequest subReq : req.requests()) { + assertThat(req.toString(), containsString(subReq.toString())); + } + } + private static MultiSearchRequest mutate(MultiSearchRequest searchRequest) throws IOException { MultiSearchRequest mutation = copyRequest(searchRequest); List<CheckedRunnable<IOException>> mutators = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java index 78c5ba4412c68..1cb336e18b12c 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -119,13 +119,13 @@ public void testStandardListenerAndPerRequestListenerDisabled() { public SearchRequestOperationsListener createTestSearchRequestOperationsListener() { return new SearchRequestOperationsListener() { @Override - void onPhaseStart(SearchPhaseContext context) {} + protected void onPhaseStart(SearchPhaseContext context) {} @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context) {} }; } } diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index 4b0bde0984ad1..e84b5213be39e 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -64,9 +64,8 @@ import org.apache.lucene.store.Directory; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -82,10 +81,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTermVectorsTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractTermVectorsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public AbstractTermVectorsTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public AbstractTermVectorsTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -96,11 +95,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - protected static class TestFieldSetting { public final String name; public final boolean storedOffset; diff --git a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java index b1e27ea9c66e3..2b4d2a755f543 100644 --- a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java @@ -49,6 +49,7 @@ public class OpenSearchPolicyTests extends OpenSearchTestCase { /** * test restricting privileges to no permissions actually works */ + @SuppressWarnings("removal") public void testRestrictPrivileges() { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index ea4ef96ec0f77..69e561bb8fd89 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -72,6 +72,7 @@ public void testEnsureRegularFile() throws IOException { } /** can't execute processes */ + @SuppressWarnings("removal") public void testProcessExecution() throws Exception { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5952cc1bcaac2..be25bee5fe7b1 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -52,6 +52,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -67,11 +68,14 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -393,6 +397,7 @@ public void testJoinClusterWithNonRemoteStoreNodeJoiningNonRemoteStoreCluster() } public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) @@ -406,6 +411,62 @@ public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluste assertTrue(e.getMessage().equals("a remote store node [" + joiningNode + "] is trying to join a non remote " + "store cluster")); } + public void testRemoteStoreNodeJoiningNonRemoteStoreClusterMixedMode() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .metadata(metadata) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testAllTypesNodeJoiningRemoteStoreClusterMixedMode() { + final DiscoveryNode docrepNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(docrepNode) + .localNodeId(docrepNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build() + ) + .metadata(metadata) + .build(); + + // compatible remote node should not be able to join a mixed mode having a remote node + DiscoveryNode goodRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(goodRemoteNode, currentState.getNodes(), currentState.metadata()); + + // incompatible node should not be able to join a mixed mode + DiscoveryNode badRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(TRANSLOG_REPO, TRANSLOG_REPO)); + assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(badRemoteNode, currentState.getNodes(), currentState.metadata()) + ); + + // DocRep node should be able to join a mixed mode + DiscoveryNode docrepNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + JoinTaskExecutor.ensureNodesCompatibility(docrepNode2, currentState.getNodes(), currentState.metadata()); + } + public void testJoinClusterWithRemoteStoreNodeJoiningRemoteStoreCluster() { final DiscoveryNode existingNode = new DiscoveryNode( UUIDs.base64UUID(), diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cea151748bfb6..6d1f359d210ac 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -133,6 +134,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; @@ -146,6 +148,7 @@ import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -1936,6 +1939,35 @@ public void testRequestDurabilityWhenRestrictSettingTrue() { assertEquals(Translog.Durability.REQUEST, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); } + public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { + // This checks that aggregateIndexSettings throws exception for the case when the index setting + // index.store.type is set to remote_snapshot + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT); + request.settings(requestSettings.build()); + final IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + assertThat( + error.getMessage(), + containsString( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ) + ); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer<IndexTemplateMetadata.Builder> configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java new file mode 100644 index 0000000000000..ad39e2b103087 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.metadata.View.Target; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class ViewTests extends AbstractSerializingTestCase<View> { + + private static Set<Target> randomTargets() { + int numTargets = randomIntBetween(1, 25); + return new TreeSet<>(randomList(1, numTargets, () -> new View.Target(randomAlphaOfLength(8)))); + } + + private static View randomInstance() { + final Set<Target> targets = randomTargets(); + final String viewName = randomAlphaOfLength(10); + final String description = randomAlphaOfLength(100); + return new View(viewName, description, Math.abs(randomLong()), Math.abs(randomLong()), targets); + } + + @Override + protected View doParseInstance(XContentParser parser) throws IOException { + return View.fromXContent(parser); + } + + @Override + protected Writeable.Reader<View> instanceReader() { + return View::new; + } + + @Override + protected View createTestInstance() { + return randomInstance(); + } + + public void testNullName() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View(null, null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Name must be provided")); + } + + public void testNullTargets() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View("name", null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Targets are required on a view")); + } + + public void testNullTargetIndexPattern() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View.Target((String) null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("IndexPattern is required")); + } + + public void testDefaultValues() { + final View view = new View("myName", null, null, null, Set.of()); + + MatcherAssert.assertThat(view.getName(), equalTo("myName")); + MatcherAssert.assertThat(view.getDescription(), equalTo(null)); + MatcherAssert.assertThat(view.getCreatedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getModifiedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getTargets(), empty()); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java index b78d1b56364eb..e19bde5d53d8a 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java @@ -8,29 +8,60 @@ package org.opensearch.cluster.metadata; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractXContentTestCase; +import org.opensearch.test.AbstractDiffableSerializationTestCase; import java.io.IOException; +import java.util.HashMap; import java.util.Map; -public class WeightedRoutingMetadataTests extends AbstractXContentTestCase<WeightedRoutingMetadata> { +public class WeightedRoutingMetadataTests extends AbstractDiffableSerializationTestCase<Metadata.Custom> { + + @Override + protected Writeable.Reader<Metadata.Custom> instanceReader() { + return WeightedRoutingMetadata::new; + } + @Override protected WeightedRoutingMetadata createTestInstance() { + String attributeName = "zone"; Map<String, Double> weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); - WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + if (randomBoolean()) { + weights = new HashMap<>(); + attributeName = ""; + } + WeightedRouting weightedRouting = new WeightedRouting(attributeName, weights); WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } + @Override protected WeightedRoutingMetadata doParseInstance(XContentParser parser) throws IOException { return WeightedRoutingMetadata.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + + WeightedRouting weightedRouting = new WeightedRouting("", new HashMap<>()); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + + @Override + protected Writeable.Reader<Diff<Metadata.Custom>> diffReader() { + return WeightedRoutingMetadata::readDiffFrom; + } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java index 780d041c25d04..7a0fd76b0fbd9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java @@ -46,8 +46,6 @@ import java.util.Iterator; import java.util.List; -import org.mockito.Mockito; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -206,40 +204,4 @@ public void testInterleavedShardIteratorReplicaFirst() { } assertEquals(shardCount, this.totalNumberOfShards); } - - public void testSwapPrimaryWithReplica() { - // Initialize all the shards for test index 1 and 2 - initPrimaries(); - startInitializingShards(TEST_INDEX_1); - startInitializingShards(TEST_INDEX_1); - startInitializingShards(TEST_INDEX_2); - startInitializingShards(TEST_INDEX_2); - - // Create primary shard count imbalance between two nodes - final RoutingNodes routingNodes = this.clusterState.getRoutingNodes(); - final RoutingNode node0 = routingNodes.node("node0"); - final RoutingNode node1 = routingNodes.node("node1"); - final List<ShardRouting> shardRoutingList = node0.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); - final RoutingChangesObserver routingChangesObserver = Mockito.mock(RoutingChangesObserver.class); - int swaps = 0; - - for (ShardRouting routing : shardRoutingList) { - if (routing.primary()) { - ShardRouting swap = node1.getByShardId(routing.shardId()); - routingNodes.swapPrimaryWithReplica(logger, routing, swap, routingChangesObserver); - swaps++; - } - } - Mockito.verify(routingChangesObserver, Mockito.times(swaps)).replicaPromoted(Mockito.any()); - - final List<ShardRouting> shards = node1.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); - int shardCount = 0; - for (ShardRouting shard : shards) { - if (shard.primary()) { - shardCount++; - } - } - - assertTrue(shardCount >= swaps); - } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index a1db6cd83ab6c..6a03a1f79bcde 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -229,6 +229,11 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return Decision.ALWAYS; } } + + @Override + public Decision canAllocateAnyShardToNode(RoutingNode node, RoutingAllocation allocation) { + return throttle ? Decision.THROTTLE : Decision.YES; + } }); Collections.shuffle(deciders, random()); return new AllocationDeciders(deciders); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java index f2e79b319d0dd..b840b78eff448 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java @@ -47,7 +47,7 @@ public void testExcludeNodeIdMoveBlocked() { /** * Test move operations for index level allocation settings. - * Supported for local indices, not supported for remote indices. + * Supported for local indices and remote indices. */ public void testIndexLevelExclusions() throws InterruptedException { int localOnlyNodes = 7; @@ -102,8 +102,9 @@ public void testIndexLevelExclusions() throws InterruptedException { // No shard of updated local index should be on excluded local capable node assertTrue(routingTable.allShards(localIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedLocalOnlyNode))); - // Since remote index shards are untouched, at least one shard should - // continue to stay on the excluded remote capable node - assertTrue(routingTable.allShards(remoteIndex).stream().anyMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode))); + // No shard of updated remote index should be on excluded remote capable node + assertTrue( + routingTable.allShards(remoteIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode)) + ); } } diff --git a/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java new file mode 100644 index 0000000000000..35d7877343909 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheModuleTests extends OpenSearchTestCase { + + public void testWithMultiplePlugins() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache1", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache2", factory2)); + + CacheModule cacheModule = new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY); + + Map<String, ICache.Factory> factoryMap = cacheModule.getCacheStoreTypeFactories(); + assertEquals(factoryMap.get("cache1"), factory1); + assertEquals(factoryMap.get("cache2"), factory2); + } + + public void testWithSameCacheStoreTypeAndName() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(factory1.getCacheName()).thenReturn("cache"); + when(factory2.getCacheName()).thenReturn("cache"); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache", factory2)); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY) + ); + assertEquals("Cache name: cache is already registered", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java new file mode 100644 index 0000000000000..9d39f8a43ea58 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static junit.framework.TestCase.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheServiceTests extends OpenSearchTestCase { + + public void testWithCreateCacheForIndicesRequestCacheType() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + + CacheModule cacheModule = new CacheModule( + List.of(mockPlugin1), + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").build() + ); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> onHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(onHeapCache); + + CacheService cacheService = cacheModule.getCacheService(); + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(onHeapCache, ircCache); + } + + public void testWithCreateCacheWithNoStoreNamePresentForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService(factoryMap, Settings.builder().build()); + + CacheConfig<String, String> config = mock(CacheConfig.class); + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No configuration exists for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } + + public void testWithCreateCacheWithInvalidStoreNameAssociatedForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache").build() + ); + + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> onHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(config, CacheType.INDICES_REQUEST_CACHE, factoryMap)).thenReturn(onHeapCache); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No store name: [cache] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index e8ddfde11f4cc..4fd8986d0b428 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -779,7 +779,9 @@ public void testSamePrinterOutput() { DateTime jodaDate = new DateTime(year, month, day, hour, minute, second, DateTimeZone.UTC); for (FormatNames format : FormatNames.values()) { - if (format == FormatNames.ISO8601 || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS) { + if (format == FormatNames.ISO8601 + || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS + || format == FormatNames.RFC3339_LENIENT) { // Nanos aren't supported by joda continue; } diff --git a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java index 17c4f9d0fe13d..d9db57aef15b6 100644 --- a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java @@ -53,40 +53,45 @@ public void testParameterizedMessageLambda() throws Exception { appender.start(); final Logger testLogger = LogManager.getLogger(LoggersTests.class); Loggers.addAppender(testLogger, appender); - Loggers.setLevel(testLogger, Level.TRACE); + try { + Loggers.setLevel(testLogger, Level.TRACE); - Throwable ex = randomException(); - testLogger.error(() -> new ParameterizedMessage("an error message"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); - ex = randomException(); - testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); - testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); - assertThat(appender.lastEvent.getThrown(), nullValue()); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); - ex = randomException(); - testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); - ex = randomException(); - testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + } finally { + Loggers.removeAppender(testLogger, appender); + appender.stop(); + } } private Throwable randomException() { diff --git a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java index 827f9dd992294..ee71cfef7d925 100644 --- a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java @@ -32,6 +32,8 @@ package org.opensearch.common.lucene.store; +import org.apache.lucene.store.IndexInput; + import java.io.EOFException; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -153,4 +155,34 @@ public void testRandomAccessReads() throws IOException { // 10001001 00100101 10001001 00110000 11100111 00100100 10110001 00101110 assertEquals(-8564288273245753042L, indexInput.readLong(1)); } + + public void testReadBytesWithSlice() throws IOException { + int inputLength = randomIntBetween(100, 1000); + + byte[] input = randomUnicodeOfLength(inputLength).getBytes(StandardCharsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + + int sliceOffset = randomIntBetween(1, inputLength - 10); + int sliceLength = randomIntBetween(2, inputLength - sliceOffset); + IndexInput slice = indexInput.slice("slice", sliceOffset, sliceLength); + + // read a byte from sliced index input and verify if the read value is correct + assertEquals(input[sliceOffset], slice.readByte()); + + // read few more bytes into a byte array + int bytesToRead = randomIntBetween(1, sliceLength - 1); + slice.readBytes(new byte[bytesToRead], 0, bytesToRead); + + // now try to read beyond the boundary of the slice, but within the + // boundary of the original IndexInput. We've already read few bytes + // so this is expected to fail + assertThrows(EOFException.class, () -> slice.readBytes(new byte[sliceLength], 0, sliceLength)); + + // seek to EOF and then try to read + slice.seek(sliceLength); + assertThrows(EOFException.class, () -> slice.readBytes(new byte[1], 0, 1)); + + slice.close(); + indexInput.close(); + } } diff --git a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java index e366972feeaf2..2f0618ee299b4 100644 --- a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java +++ b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java @@ -36,8 +36,10 @@ import org.opensearch.rest.RestUtils; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -286,4 +288,33 @@ public void testEscapedSlashWithinUrl() { assertThat(params.get("type"), equalTo("type")); assertThat(params.get("id"), equalTo("id")); } + + public void testRetrieveAllEmpty() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + Iterator<String> allPaths = trie.retrieveAll(); + assertFalse(allPaths.hasNext()); + } + + public void testRetrieveAll() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", "test1"); + trie.insert("{testA}/{testB}", "test2"); + trie.insert("a/{testB}", "test3"); + trie.insert("{testA}/b", "test4"); + trie.insert("{testA}/b/c", "test5"); + + Iterator<String> iterator = trie.retrieveAll(); + assertTrue(iterator.hasNext()); + List<String> paths = new ArrayList<>(); + iterator.forEachRemaining(paths::add); + assertEquals(paths, List.of("test1", "test4", "test5", "test2", "test3")); + assertFalse(iterator.hasNext()); + } + + public void testRetrieveAllWithNllValue() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", null); + Iterator<String> iterator = trie.retrieveAll(); + assertFalse(iterator.hasNext()); + } } diff --git a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java index b256ab956f963..9358013826a1c 100644 --- a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java @@ -25,7 +25,7 @@ public class InsecureSettingTests extends OpenSearchTestCase { private List<String> rootLogMsgs = new ArrayList<>(); private AbstractAppender rootAppender; - protected void assertSettingWarning() { + private void assertSettingWarning() { assertWarnings( "[setting.name] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." ); @@ -50,13 +50,14 @@ public void append(LogEvent event) { InsecureSettingTests.this.rootLogMsgs.add(message); } }; - Loggers.addAppender(LogManager.getRootLogger(), rootAppender); rootAppender.start(); + Loggers.addAppender(LogManager.getLogger(SecureSetting.class), rootAppender); } @After public void removeInsecureSettingsAppender() { - Loggers.removeAppender(LogManager.getRootLogger(), rootAppender); + Loggers.removeAppender(LogManager.getLogger(SecureSetting.class), rootAppender); + rootAppender.stop(); } public void testShouldRaiseExceptionByDefault() { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index 13cecc7157d82..c6da96b521276 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -909,6 +909,18 @@ public void testDynamicKeySetting() { } } + public void testAffixKeySettingWithDynamicPrefix() { + Setting.AffixSetting<Boolean> setting = Setting.suffixKeySetting( + "enable", + (key) -> Setting.boolSetting(key, false, Property.NodeScope) + ); + Setting<Boolean> concreteSetting = setting.getConcreteSettingForNamespace("foo.bar"); + assertEquals("foo.bar.enable", concreteSetting.getKey()); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> setting.getConcreteSettingForNamespace("foo.")); + assertEquals("key [foo..enable] must match [*.enable] but didn't.", ex.getMessage()); + } + public void testAffixKeySetting() { Setting<Boolean> setting = Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); assertTrue(setting.hasComplexMatcher()); diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index 0becb6cde5e64..66c9801d16598 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -286,24 +286,9 @@ public void testDynamicIndexSettingsRegistration() { } public void testConcurrentSegmentSearchClusterSettings() { - // Test that we throw an exception without the feature flag - Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(); - SettingsException ex = expectThrows(SettingsException.class, () -> new SettingsModule(settings)); - assertEquals( - "unknown setting [" - + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey() - + "] please check that any required plugins are installed, or check the breaking " - + "changes documentation for removed settings", - ex.getMessage() - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); boolean settingValue = randomBoolean(); - Settings settingsWithFeatureFlag = Settings.builder() - .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue) - .build(); - SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); + Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build(); + SettingsModule settingsModule = new SettingsModule(settings); assertEquals(settingValue, SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settingsModule.getSettings())); } @@ -311,24 +296,9 @@ public void testConcurrentSegmentSearchIndexSettings() { Settings.Builder target = Settings.builder().put(Settings.EMPTY); Settings.Builder update = Settings.builder(); - // Test that we throw an exception without the feature flag SettingsModule module = new SettingsModule(Settings.EMPTY); IndexScopedSettings indexScopedSettings = module.getIndexScopedSettings(); - expectThrows( - SettingsException.class, - () -> indexScopedSettings.updateDynamicSettings( - Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), - target, - update, - "node" - ) - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); - SettingsModule moduleWithFeatureFlag = new SettingsModule(Settings.EMPTY); - IndexScopedSettings indexScopedSettingsWithFeatureFlag = moduleWithFeatureFlag.getIndexScopedSettings(); - indexScopedSettingsWithFeatureFlag.updateDynamicSettings( + indexScopedSettings.updateDynamicSettings( Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), target, update, @@ -337,23 +307,11 @@ public void testConcurrentSegmentSearchIndexSettings() { } public void testMaxSliceCountClusterSettingsForConcurrentSearch() { - // Test that we throw an exception without the feature flag - Settings settings = Settings.builder() - .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), 2) - .build(); - SettingsException ex = expectThrows(SettingsException.class, () -> new SettingsModule(settings)); - assertTrue( - ex.getMessage() - .contains("unknown setting [" + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey() + "]") - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); int settingValue = randomIntBetween(0, 10); - Settings settingsWithFeatureFlag = Settings.builder() + Settings settings = Settings.builder() .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); - SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); + SettingsModule settingsModule = new SettingsModule(settings); assertEquals( settingValue, (int) SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.get(settingsModule.getSettings()) @@ -361,10 +319,10 @@ public void testMaxSliceCountClusterSettingsForConcurrentSearch() { // Test that negative value is not allowed settingValue = -1; - final Settings settingsWithFeatureFlag_2 = Settings.builder() + final Settings settings_2 = Settings.builder() .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settingsWithFeatureFlag_2)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings_2)); assertTrue(iae.getMessage().contains(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())); } } diff --git a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java index 681daf1755890..85c9919275c3a 100644 --- a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java @@ -249,10 +249,19 @@ public void testEpochSecondParserWithFraction() { } public void testEpochMilliParsersWithDifferentFormatters() { - DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); - TemporalAccessor accessor = formatter.parse("123"); - assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); - assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||epoch_millis")); + } } public void testParsersWithMultipleInternalFormats() throws Exception { @@ -317,6 +326,11 @@ public void testEqualsAndHashcode() { assertThat(epochMillisFormatter.hashCode(), is(DateFormatters.forPattern("epoch_millis").hashCode())); assertThat(epochMillisFormatter, sameInstance(DateFormatters.forPattern("epoch_millis"))); assertThat(epochMillisFormatter, equalTo(DateFormatters.forPattern("epoch_millis"))); + + DateFormatter rfc339Formatter = DateFormatters.forPattern("rfc3339_lenient"); + assertThat(rfc339Formatter.hashCode(), is(DateFormatters.forPattern("rfc3339_lenient").hashCode())); + assertThat(rfc339Formatter, sameInstance(DateFormatters.forPattern("rfc3339_lenient"))); + assertThat(rfc339Formatter, equalTo(DateFormatters.forPattern("rfc3339_lenient"))); } public void testSupportBackwardsJava8Format() { @@ -461,6 +475,132 @@ public void testIso8601Parsing() { formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); } + public void testRFC3339Parsing() { + DateFormatter formatter = DateFormatters.forPattern("rfc3339_lenient"); + + // timezone not allowed with just date + formatter.format(formatter.parse("2018")); + formatter.format(formatter.parse("2018-05")); + formatter.format(formatter.parse("2018-05-15")); + + formatter.format(formatter.parse("2018-05-15T17:14Z")); + formatter.format(formatter.parse("2018-05-15T17:14z")); + formatter.format(formatter.parse("2018-05-15T17:14+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14-01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14:56Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56z")); + formatter.format(formatter.parse("2018-05-15T17:14:56+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56-01:00")); + + // milliseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+01:00")); + + // microseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-01:00")); + + // nanoseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); + + // 1994-11-05T08:15:30-05:00 corresponds to November 5, 1994, 8:15:30 am, US Eastern Standard Time/ + // 1994-11-05T13:15:30Z corresponds to the same instant. + final Instant instantA = DateFormatters.from(formatter.parse("1994-11-05T08:15:30-05:00")).toInstant(); + final Instant instantB = DateFormatters.from(formatter.parse("1994-11-05T13:15:30Z")).toInstant(); + assertThat(instantA, is(instantB)); + + // Invalid dates should throw an exception + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), is("failed to parse date field [abc] with format [rfc3339_lenient]")); + // Invalid offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56-00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56-00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.+00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.+00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56_00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56_00:00] with format [rfc3339_lenient]")); + // No offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // No end of fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // Invalid fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.abcZ")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.abcZ] with format [rfc3339_lenient]")); + // Invalid date + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("201805-15T17:14:56.123456+0000")); + assertThat(e.getMessage(), is("failed to parse date field [201805-15T17:14:56.123456+0000] with format [rfc3339_lenient]")); + // More than 9 digits of nanosecond resolution + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.1234567891Z")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.1234567891Z] with format [rfc3339_lenient]")); + } + + public void testRFC3339ParserWithDifferentFormatters() { + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||rfc3339_lenient"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896000L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||rfc3339_lenient")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123456789+0100"); + assertThat(DateFormatters.from(accessor).toInstant().getNano(), is(123456789)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + } + + public void testRFC3339ParserAgainstDifferentFormatters() { + DateFormatter rfc3339Formatter = DateFormatter.forPattern("rfc3339_lenient"); + { + DateFormatter isoFormatter = DateFormatter.forPattern("strict_date_optional_time"); + + assertDateTimeEquals("2018-05-15T17:14Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123456Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789-01:00", rfc3339Formatter, isoFormatter); + } + } + public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second @@ -683,4 +823,10 @@ public void testCamelCaseDeprecation() { } } } + + void assertDateTimeEquals(String toTest, DateFormatter candidateParser, DateFormatter baselineParser) { + Instant gotInstant = DateFormatters.from(candidateParser.parse(toTest)).toInstant(); + Instant expectedInstant = DateFormatters.from(baselineParser.parse(toTest)).toInstant(); + assertThat(gotInstant, is(expectedInstant)); + } } diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java index 2063cd26a9e8e..4823ce7a238e3 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java @@ -49,6 +49,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * Tests for OpenSearchExecutors and its components like OpenSearchAbortPolicy. @@ -279,6 +280,41 @@ public void testScaleDown() throws Exception { terminate(pool); } + /** + * The test case is adapted from https://bugs.openjdk.org/browse/JDK-8323659 reproducer. + */ + public void testScaleUpWithSpawningTask() throws Exception { + ThreadPoolExecutor pool = OpenSearchExecutors.newScaling( + getClass().getName() + "/" + getTestName(), + 0, + 1, + between(1, 100), + randomTimeUnit(), + OpenSearchExecutors.daemonThreadFactory("test"), + threadContext + ); + assertThat("Min property", pool.getCorePoolSize(), equalTo(0)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(1)); + + final CountDownLatch latch = new CountDownLatch(10); + class TestTask implements Runnable { + @Override + public void run() { + latch.countDown(); + if (latch.getCount() > 0) { + pool.execute(TestTask.this); + } + } + } + pool.execute(new TestTask()); + latch.await(); + + assertThat("wrong pool size", pool.getPoolSize(), lessThanOrEqualTo(1)); + assertThat("wrong active size", pool.getActiveCount(), lessThanOrEqualTo(1)); + + terminate(pool); + } + public void testRejectionMessageAndShuttingDownFlag() throws InterruptedException { int pool = between(1, 10); int queue = between(0, 100); diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 7f669934579ee..962eb743dca6e 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -359,6 +359,57 @@ protected void doRun() throws Exception { env.close(); } + public void testIndexStoreListener() throws Exception { + final AtomicInteger shardCounter = new AtomicInteger(0); + final AtomicInteger indexCounter = new AtomicInteger(0); + final Index index = new Index("foo", "fooUUID"); + final ShardId shardId = new ShardId(index, 0); + final NodeEnvironment.IndexStoreListener listener = new NodeEnvironment.IndexStoreListener() { + @Override + public void beforeShardPathDeleted(ShardId inShardId, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(shardId, inShardId); + shardCounter.incrementAndGet(); + } + + @Override + public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(index, inIndex); + indexCounter.incrementAndGet(); + } + }; + final NodeEnvironment env = newNodeEnvironment(listener); + + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve("0")); + } + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path.resolve("0"))); + } + assertEquals(0, shardCounter.get()); + + env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path.resolve("0"))); + } + assertEquals(1, shardCounter.get()); + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path)); + } + assertEquals(0, indexCounter.get()); + + env.deleteIndexDirectorySafe(index, 5000, idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path)); + } + assertEquals(1, indexCounter.get()); + assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); + env.close(); + } + public void testStressShardLock() throws IOException, InterruptedException { class Int { int value = 0; @@ -629,6 +680,11 @@ public NodeEnvironment newNodeEnvironment() throws IOException { return newNodeEnvironment(Settings.EMPTY); } + public NodeEnvironment newNodeEnvironment(NodeEnvironment.IndexStoreListener listener) throws IOException { + Settings build = buildEnvSettings(Settings.EMPTY); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), listener); + } + @Override public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = buildEnvSettings(settings); diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java new file mode 100644 index 0000000000000..92669d5bc1d92 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +public class BloomFilterTests extends OpenSearchTestCase { + + public void testBloomFilterSerializationDeserialization() throws IOException { + int elementCount = randomIntBetween(1, 100); + long maxDocs = elementCount * 10L; // Keeping this high so that it ensures some bits are not set. + BloomFilter filter = new BloomFilter(maxDocs, getFpp(), () -> idIterator(elementCount)); + byte[] buffer = new byte[(int) maxDocs * 5]; + ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); + + // Write in the format readable through factory + out.writeString(filter.setType().getSetName()); + filter.writeTo(out); + + FuzzySet reconstructedFilter = FuzzySetFactory.deserializeFuzzySet(new ByteArrayIndexInput("filter", buffer)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, reconstructedFilter.setType()); + + Iterator<BytesRef> idIterator = idIterator(elementCount); + while (idIterator.hasNext()) { + BytesRef element = idIterator.next(); + assertEquals(FuzzySet.Result.MAYBE, reconstructedFilter.contains(element)); + assertEquals(FuzzySet.Result.MAYBE, filter.contains(element)); + } + } + + public void testBloomFilterIsSaturated_returnsTrue() throws IOException { + BloomFilter bloomFilter = new BloomFilter(1L, getFpp(), () -> idIterator(1000)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(true, bloomFilter.isSaturated()); + } + + public void testBloomFilterIsSaturated_returnsFalse() throws IOException { + int elementCount = randomIntBetween(1, 100); + BloomFilter bloomFilter = new BloomFilter(20000, getFpp(), () -> idIterator(elementCount)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(false, bloomFilter.isSaturated()); + } + + public void testBloomFilterWithLargeCapacity() throws IOException { + long maxDocs = randomLongBetween(Integer.MAX_VALUE, 5L * Integer.MAX_VALUE); + BloomFilter bloomFilter = new BloomFilter(maxDocs, getFpp(), () -> List.of(new BytesRef("bar")).iterator()); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + } + + private double getFpp() { + return randomDoubleBetween(0.01, 0.50, true); + } + + private Iterator<BytesRef> idIterator(int count) { + return new Iterator<BytesRef>() { + int cnt = count; + + @Override + public boolean hasNext() { + return cnt-- > 0; + } + + @Override + public BytesRef next() { + return new BytesRef(Integer.toString(cnt)); + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java new file mode 100644 index 0000000000000..868c2175d0689 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; + +import java.util.TreeMap; + +public class FuzzyFilterPostingsFormatTests extends BasePostingsFormatTestCase { + + private TreeMap<String, FuzzySetParameters> params = new TreeMap<>() { + @Override + public FuzzySetParameters get(Object k) { + return new FuzzySetParameters(() -> FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY); + } + }; + + private Codec fuzzyFilterCodec = TestUtil.alwaysPostingsFormat( + new FuzzyFilterPostingsFormat(TestUtil.getDefaultPostingsFormat(), new FuzzySetFactory(params)) + ); + + @Override + protected Codec getCodec() { + return fuzzyFilterCodec; + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 5b586524d0bfc..cc927a19fd01a 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -78,6 +78,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.apache.lucene.tests.index.ForceMergePolicy; import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; @@ -152,6 +153,7 @@ import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.MockLogAppender; import org.opensearch.test.VersionUtils; @@ -3278,12 +3280,15 @@ public void onFailedEngine(String reason, Exception e) { final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>( new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) ); + + // Just allow force merge so that regular merge does not close the shard first before any any other operation + // InternalEngine engine = createEngine( config( defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3377,7 +3382,7 @@ public void onFailedEngine(String reason, Exception e) { defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3446,7 +3451,8 @@ public void eval(MockDirectoryWrapper dir) throws IOException { wrapper.failOn(fail); MockLogAppender mockAppender = MockLogAppender.createForLoggers(Loggers.getLogger(Engine.class, shardId)); try { - Store store = createStore(wrapper); + // Create a store where directory is closed during unreferenced file cleanup. + Store store = createFailingDirectoryStore(wrapper); final Engine.EventListener eventListener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, Exception e) { @@ -3473,7 +3479,7 @@ public void onFailedEngine(String reason, Exception e) { defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3534,6 +3540,33 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); } + /** + * Creates a merge policy which only supports force merge. + * @return returns a merge policy which only supports force merge. + */ + private MergePolicy newForceMergePolicy() { + return new ForceMergePolicy(new TieredMergePolicy()); + } + + /** + * Create a store where directory is closed when referenced while unreferenced file cleanup. + * + * @param directory directory used for creating the store. + * @return a store where directory is closed when referenced while unreferenced file cleanup. + */ + private Store createFailingDirectoryStore(final Directory directory) { + return new Store(shardId, INDEX_SETTINGS, directory, new DummyShardLock(shardId)) { + @Override + public Directory directory() { + if (callStackContainsAnyOf("cleanUpUnreferencedFiles")) { + throw new AlreadyClosedException("store is already closed"); + } + + return super.directory(); + } + }; + } + public void testCurrentTranslogUUIIDIsCommitted() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { diff --git a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java index bcdca2236d3f3..3fb43b7dbdc4e 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java @@ -138,13 +138,15 @@ public void testGetForFieldRuntimeField() { ); final SetOnce<Supplier<SearchLookup>> searchLookupSetOnce = new SetOnce<>(); MappedFieldType ft = mock(MappedFieldType.class); + final int shardId = randomInt(); when(ft.fielddataBuilder(Mockito.any(), Mockito.any())).thenAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") Supplier<SearchLookup> searchLookup = (Supplier<SearchLookup>) invocationOnMock.getArguments()[1]; searchLookupSetOnce.set(searchLookup); + assertEquals(searchLookup.get().shardId(), shardId); return (IndexFieldData.Builder) (cache, breakerService) -> null; }); - SearchLookup searchLookup = new SearchLookup(null, null); + SearchLookup searchLookup = new SearchLookup(null, null, shardId); ifdService.getForField(ft, "qualified", () -> searchLookup); assertSame(searchLookup, searchLookupSetOnce.get().get()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java index 1a66037d98d71..0a2435553b19e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java @@ -32,10 +32,14 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -75,22 +79,41 @@ public void testTermQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); String ip = "2001:db8::2:1"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + + Query query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "192.168.1.7"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "2001:db8::2:1"; String prefix = ip + "/64"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); + + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64); + assertEquals(query, ft.termQuery(prefix, null)); ip = "192.168.1.7"; prefix = ip + "/16"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16); + assertEquals(query, ft.termQuery(prefix, null)); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("::1", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testTermsQuery() { @@ -118,44 +141,123 @@ public void testTermsQuery() { public void testRangeQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); - + Query query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, null) ); @@ -178,30 +280,60 @@ public void testRangeQuery() { ) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")); assertEquals( // lower bound is ipv4, upper bound is ipv6 - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, null) ); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testFetchSourceValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index bb3f2be8ea748..adcfc9d7b17fc 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -63,7 +63,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; @@ -541,6 +543,28 @@ public void testReloadSearchAnalyzers() throws IOException { ); } + public void testMapperDynamicAllowedIgnored() { + final List<Function<Settings.Builder, Settings.Builder>> scenarios = List.of( + (builder) -> builder.putNull(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey()), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), true), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + ); + + for (int i = 0; i < scenarios.size(); i++) { + final Settings.Builder defaultSettingsBuilder = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); + + final Settings settings = scenarios.get(i).apply(defaultSettingsBuilder).build(); + + createIndex("test" + i, settings).mapperService(); + } + + assertWarnings( + "[index.mapper.dynamic] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); + } + private boolean assertSameContainedFilters(TokenFilterFactory[] originalTokenFilter, NamedAnalyzer updatedAnalyzer) { ReloadableCustomAnalyzer updatedReloadableAnalyzer = (ReloadableCustomAnalyzer) updatedAnalyzer.analyzer(); TokenFilterFactory[] newTokenFilters = updatedReloadableAnalyzer.getComponents().getTokenFilters(); diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 62337264bc0b1..29efd64e5c751 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -34,12 +34,16 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; @@ -58,6 +62,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.opensearch.index.IndexSettingsTests.newIndexMeta; import static org.opensearch.index.query.InnerHitBuilderTests.randomNestedInnerHits; @@ -431,4 +436,96 @@ public void testSetParentFilterInContext() throws Exception { assertNull(queryShardContext.getParentFilter()); verify(innerQueryBuilder).toQuery(queryShardContext); } + + public void testNestedDepthProhibited() throws Exception { + assertThrows(IllegalArgumentException.class, () -> doWithDepth(0, context -> fail("won't call"))); + } + + public void testNestedDepthAllowed() throws Exception { + ThrowingConsumer<QueryShardContext> check = (context) -> { + NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None); + OpenSearchToParentBlockJoinQuery blockJoinQuery = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); + Optional<BooleanClause> childLeg = ((BooleanQuery) blockJoinQuery.getChildQuery()).clauses() + .stream() + .filter(c -> c.getOccur() == BooleanClause.Occur.MUST) + .findFirst(); + assertTrue(childLeg.isPresent()); + assertEquals(new MatchAllDocsQuery(), childLeg.get().getQuery()); + }; + check.accept(createShardContext()); + doWithDepth(randomIntBetween(1, 20), check); + } + + public void testNestedDepthOnceOnly() throws Exception { + doWithDepth(1, this::checkOnceNested); + } + + public void testNestedDepthDefault() throws Exception { + assertEquals(20, createShardContext().getIndexSettings().getMaxNestedQueryDepth()); + } + + private void checkOnceNested(QueryShardContext ctx) throws Exception { + { + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> depth2.toQuery(ctx)); + assertEquals( + "The depth of Nested Query is [2] has exceeded the allowed maximum of [1]. This maximum can be set by changing the [index.query.max_nested_depth] index level setting.", + e.getMessage() + ); + } + { + QueryBuilder mustBjqMustBjq = new BoolQueryBuilder().must( + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None) + ).must(new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None)); + BooleanQuery bool = (BooleanQuery) mustBjqMustBjq.toQuery(ctx); + assertEquals( + "Can parse joins one by one without breaching depth limit", + 2, + bool.clauses().stream().filter(c -> c.getQuery() instanceof OpenSearchToParentBlockJoinQuery).count() + ); + } + } + + public void testUpdateMaxDepthSettings() throws Exception { + doWithDepth(2, (ctx) -> { + assertEquals(ctx.getIndexSettings().getMaxNestedQueryDepth(), 2); + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + Query depth2Query = depth2.toQuery(ctx); + assertTrue(depth2Query instanceof OpenSearchToParentBlockJoinQuery); + }); + } + + void doWithDepth(int depth, ThrowingConsumer<QueryShardContext> test) throws Exception { + QueryShardContext context = createShardContext(); + int defLimit = context.getIndexSettings().getMaxNestedQueryDepth(); + assertTrue(defLimit > 0); + Settings updateSettings = Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", depth) + .build(); + context.getIndexSettings().updateIndexMetadata(IndexMetadata.builder("index").settings(updateSettings).build()); + try { + test.accept(context); + } finally { + context.getIndexSettings() + .updateIndexMetadata( + IndexMetadata.builder("index") + .settings( + Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", defLimit) + .build() + ) + .build() + ); + } + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java index c819d35872c6e..1a2ad49a3f334 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java @@ -91,6 +91,8 @@ public class QueryShardContextTests extends OpenSearchTestCase { + private static final int SHARD_ID = 0; + public void testFailIfFieldMappingNotFound() { QueryShardContext context = createQueryShardContext(IndexMetadata.INDEX_UUID_NA_VALUE, null); context.setAllowUnmappedFields(false); @@ -307,6 +309,11 @@ public void testFielddataLookupOneFieldManyReferences() throws IOException { assertEquals(Arrays.asList(expectedFirstDoc.toString(), expectedSecondDoc.toString()), collect("field", queryShardContext)); } + public void testSearchLookupShardId() { + SearchLookup searchLookup = createQueryShardContext("uuid", null, null).lookup(); + assertEquals(SHARD_ID, searchLookup.shardId()); + } + public static QueryShardContext createQueryShardContext(String indexUuid, String clusterAlias) { return createQueryShardContext(indexUuid, clusterAlias, null); } @@ -343,7 +350,7 @@ private static QueryShardContext createQueryShardContext( } final long nowInMillis = randomNonNegativeLong(); return new QueryShardContext( - 0, + SHARD_ID, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 811d6a722d0f6..85878cc2e1c9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.tests.store.BaseDirectoryWrapper; @@ -102,6 +103,16 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { public void tearDown() throws Exception { Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + + for (ReferenceManager.RefreshListener refreshListener : indexShard.getEngine().config().getInternalRefreshListener()) { + if (refreshListener instanceof ReleasableRetryableRefreshListener) { + ((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes(); + } + } + if (remoteStoreRefreshListener != null) { + remoteStoreRefreshListener.drainRefreshes(); + } + closeShards(indexShard); super.tearDown(); } @@ -335,6 +346,7 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 0); + assertTrue("remote store in sync", tuple.v1().isRemoteSegmentStoreInSync()); } public void testRefreshSuccessOnSecondAttempt() throws Exception { @@ -404,6 +416,20 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { assertNoLagAndTotalUploadsFailed(segmentTracker, 2); } + public void testRefreshPersistentFailure() throws Exception { + int succeedOnAttempt = 10; + CountDownLatch refreshCountLatch = new CountDownLatch(1); + CountDownLatch successLatch = new CountDownLatch(10); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch + ); + // Giving 10ms for some iterations of remote refresh upload + Thread.sleep(10); + assertFalse("remote store should not in sync", tuple.v1().isRemoteSegmentStoreInSync()); + } + private void assertNoLagAndTotalUploadsFailed(RemoteSegmentTransferTracker segmentTracker, long totalUploadsFailed) throws Exception { assertBusy(() -> { assertEquals(0, segmentTracker.getBytesLag()); @@ -418,13 +444,40 @@ public void testTrackerData() throws Exception { RemoteStoreRefreshListener listener = tuple.v1(); RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); RemoteSegmentTransferTracker tracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); - assertNoLag(tracker); + assertBusy(() -> assertNoLag(tracker)); indexDocs(100, randomIntBetween(100, 200)); indexShard.refresh("test"); listener.afterRefresh(true); assertBusy(() -> assertNoLag(tracker)); } + /** + * Tests segments upload fails with replication checkpoint and replication tracker primary term mismatch + */ + public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { + int totalAttempt = 1; + int checkpointPublishSucceedOnAttempt = 0; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(totalAttempt); + + // success latch should change as we would be failed primary term latest validation. + CountDownLatch successLatch = new CountDownLatch(1); + CountDownLatch reachedCheckpointPublishLatch = new CountDownLatch(0); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + totalAttempt, + refreshCountLatch, + successLatch, + checkpointPublishSucceedOnAttempt, + reachedCheckpointPublishLatch, + false + ); + + assertBusy(() -> assertEquals(1, tuple.v2().getRemoteSegmentTransferTracker(indexShard.shardId()).getTotalUploadsFailed())); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + assertBusy(() -> assertEquals(0, reachedCheckpointPublishLatch.getCount())); + } + private void assertNoLag(RemoteSegmentTransferTracker tracker) { assertEquals(0, tracker.getRefreshSeqNoLag()); assertEquals(0, tracker.getBytesLag()); @@ -460,6 +513,24 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch + ) throws IOException { + return mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + succeedCheckpointPublishOnAttempt, + reachedCheckpointPublishLatch, + true + ); + } + + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( + int succeedOnAttempt, + CountDownLatch refreshCountLatch, + CountDownLatch successLatch, + int succeedCheckpointPublishOnAttempt, + CountDownLatch reachedCheckpointPublishLatch, + boolean mockPrimaryTerm ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -473,6 +544,14 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn new InternalEngineFactory() ); + RemoteSegmentTransferTracker tracker = indexShard.getRemoteStoreStatsTrackerFactory() + .getRemoteSegmentTransferTracker(indexShard.shardId()); + try { + assertBusy(() -> assertTrue(tracker.getTotalUploadsSucceeded() > 0)); + } catch (Exception e) { + assert false; + } + indexDocs(1, randomIntBetween(1, 100)); // Mock indexShard.store().directory() @@ -492,6 +571,9 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Mock indexShard.getOperationPrimaryTerm() + if (mockPrimaryTerm) { + when(shard.getOperationPrimaryTerm()).thenReturn(indexShard.getOperationPrimaryTerm()); + } when(shard.getLatestReplicationCheckpoint()).thenReturn(indexShard.getLatestReplicationCheckpoint()); // Mock indexShard.routingEntry().primary() @@ -512,6 +594,7 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn // Mock indexShard.getSegmentInfosSnapshot() doAnswer(invocation -> { if (counter.incrementAndGet() <= succeedOnAttempt) { + logger.error("Failing in get segment info {}", counter.get()); throw new RuntimeException("Inducing failure in upload"); } return indexShard.getSegmentInfosSnapshot(); @@ -527,6 +610,7 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { successLatch.countDown(); + logger.info("Value fo latch {}", successLatch.getCount()); } return indexShard.getEngine(); }).when(shard).getEngine(); @@ -554,7 +638,6 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn RecoverySettings recoverySettings = mock(RecoverySettings.class); when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); when(shard.getRecoverySettings()).thenReturn(recoverySettings); - RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); @@ -587,6 +670,31 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + assertTrue(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } + + public void testRemoteSegmentStoreNotInSync() throws IOException { + setup(true, 3); + remoteStoreRefreshListener.afterRefresh(true); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + verifyUploadedSegments(remoteSegmentStoreDirectory); + remoteStoreRefreshListener.isRemoteSegmentStoreInSync(); + boolean oneFileDeleted = false; + // Delete any one file from remote store + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (oneFileDeleted == false && RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file) == false) { + remoteSegmentStoreDirectory.deleteFile(file); + oneFileDeleted = true; + break; + } + } + } + assertFalse(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } } } diff --git a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java index c1a51bb780f61..846b975a9520e 100644 --- a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java @@ -270,6 +270,7 @@ public void testStatsDirWrapper() throws IOException { IOUtils.close(dir, target); } + @SuppressWarnings("removal") public boolean hardLinksSupported(Path path) throws IOException { try { Files.createFile(path.resolve("foo.bar")); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 2c6c4afed69fd..e2ebb2e642bfe 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; @@ -41,6 +42,8 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -58,6 +61,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.mockito.Mockito; @@ -971,21 +975,85 @@ public void testDeleteStaleCommitsWithinThreshold() throws Exception { verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); } + @TestLogging(value = "_root:debug", reason = "Validate logging output") public void testDeleteStaleCommitsActualDelete() throws Exception { + try (final MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getRootLogger())) { + appender.addExpectation( + new MockLogAppender.PatternSeenWithLoggerPrefixExpectation( + "Metadata files to delete message", + "org.opensearch.index.store.RemoteSegmentStoreDirectory", + Level.DEBUG, + "metadataFilesEligibleToDelete=\\[" + metadataFilename3 + "\\] metadataFilesToBeDeleted=\\[" + metadataFilename3 + "\\]" + ) + ); + + final Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); + final List<String> filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + appender.assertAllExpectationsMatched(); + } + } + + public void testDeleteStaleCommitsActualDeleteWithLocks() throws Exception { Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2)); + // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } - ; assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } + + public void testDeleteStaleCommitsNoDeletesDueToLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking all the old metadata files to ensure that none of the segment files are getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2, metadataFilename3)); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsExceptionWhileFetchingLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenThrow(new RuntimeException("Rate limit exceeded")); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); } public void testDeleteStaleCommitsDeleteDedup() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java index b4eac2c4548d5..299100b65a43e 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java @@ -17,6 +17,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Set; import junit.framework.TestCase; @@ -96,4 +97,26 @@ public void testIsAcquiredExceptionCase() { // metadata file is not passed durin FileLockInfo testLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); assertThrows(IllegalArgumentException.class, () -> remoteStoreMetadataLockManager.isAcquired(testLockInfo)); } + + public void testFetchLocksEmpty() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn(Set.of()); + assertEquals(0, remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata").size()); + } + + public void testFetchLocksNonEmpty() throws IOException { + String metadata1 = "metadata_1_2_3"; + String metadata2 = "metadata_4_5_6"; + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn( + Set.of( + FileLockInfo.LockFileUtils.generateLockName(metadata1, "snapshot1"), + FileLockInfo.LockFileUtils.generateLockName(metadata2, "snapshot2") + ) + ); + assertEquals(Set.of(metadata1, metadata2), remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } + + public void testFetchLocksException() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenThrow(new IOException("Something went wrong")); + assertThrows(IOException.class, () -> remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 2204124f1de4f..a135802c5f49c 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -78,11 +78,31 @@ public void test4MBBlock() throws Exception { runAllTestsFor(22); } - public void testChunkedRepository() throws IOException { - final long blockSize = new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(); - final long repositoryChunkSize = new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(); - final long fileSize = new ByteSizeValue(3, ByteSizeUnit.KB).getBytes(); + public void testChunkedRepositoryWithBlockSizeGreaterThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(8, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeLessThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(3, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeEqualToChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + private void verifyChunkedRepository(long blockSize, long repositoryChunkSize, long fileSize) throws IOException { when(transferManager.fetchBlob(any())).thenReturn(new ByteArrayIndexInput("test", new byte[(int) blockSize])); try ( FSDirectory directory = new MMapDirectory(path, lockFactory); @@ -105,8 +125,9 @@ public void testChunkedRepository() throws IOException { // Seek to the position past the first repository chunk indexInput.seek(repositoryChunkSize); } - // Verify the second chunk is requested (i.e. ".part1") - verify(transferManager).fetchBlob(argThat(request -> request.getBlobName().equals("File_Name.part1"))); + + // Verify all the chunks related to block are added to the fetchBlob request + verify(transferManager).fetchBlob(argThat(request -> request.getBlobLength() == blockSize)); } private void runAllTestsFor(int blockSizeShift) throws Exception { @@ -115,6 +136,7 @@ private void runAllTestsFor(int blockSizeShift) throws Exception { TestGroup.testGetBlock(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockOffset(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockStart(blockedSnapshotFile, blockSize); + TestGroup.testGetBlobParts(blockedSnapshotFile); TestGroup.testCurrentBlockStart(blockedSnapshotFile, blockSize); TestGroup.testCurrentBlockPosition(blockedSnapshotFile, blockSize); TestGroup.testClone(blockedSnapshotFile, blockSize); @@ -252,6 +274,35 @@ public static void testGetBlockStart(OnDemandBlockSnapshotIndexInput blockedSnap assertEquals(blockSize * 2, blockedSnapshotFile.getBlockStart(2)); } + public static void testGetBlobParts(OnDemandBlockSnapshotIndexInput blockedSnapshotFile) { + // block id 0 + int blockId = 0; + long blockStart = blockedSnapshotFile.getBlockStart(blockId); + long blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 1 + blockId = 1; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 2 + blockId = 2; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + } + public static void testCurrentBlockStart(OnDemandBlockSnapshotIndexInput blockedSnapshotFile, int blockSize) throws IOException { // block 0 blockedSnapshotFile.seek(blockSize - 1); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java index 04434fa52e555..e2a6a4011a6b7 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -18,7 +18,6 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -59,7 +58,7 @@ public class FileCacheCleanerTests extends OpenSearchTestCase { @Before public void setUpFileCache() throws IOException { env = newNodeEnvironment(SETTINGS); - cleaner = new FileCacheCleaner(env, fileCache); + cleaner = new FileCacheCleaner(() -> fileCache); files.put(SHARD_0, addFile(fileCache, env, SHARD_0)); files.put(SHARD_1, addFile(fileCache, env, SHARD_1)); MatcherAssert.assertThat(fileCache.size(), equalTo(2L)); @@ -103,12 +102,11 @@ public void testShardRemoved() { final Path cachePath = ShardPath.loadFileCachePath(env, SHARD_0).getDataPath(); assertTrue(Files.exists(cachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(1L)); assertNull(fileCache.get(files.get(SHARD_0))); assertFalse(Files.exists(files.get(SHARD_0))); assertTrue(Files.exists(files.get(SHARD_1))); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); assertFalse(Files.exists(cachePath)); } @@ -116,15 +114,9 @@ public void testIndexRemoved() { final Path indexCachePath = env.fileCacheNodePath().fileCachePath.resolve(SHARD_0.getIndex().getUUID()); assertTrue(Files.exists(indexCachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.beforeIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexRemoved( - SHARD_0.getIndex(), - INDEX_SETTINGS, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED - ); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); + cleaner.beforeShardPathDeleted(SHARD_1, INDEX_SETTINGS, env); + cleaner.beforeIndexPathDeleted(SHARD_0.getIndex(), INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(0L)); assertFalse(Files.exists(indexCachePath)); } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java index d42e614302658..7ae3944eb6944 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java @@ -163,17 +163,11 @@ public void testUsageExceedsCapacity() throws Exception { public void testDownloadFails() throws Exception { doThrow(new IOException("Expected test exception")).when(blobContainer).readBlob(eq("failure-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("failure-blob", 0, EIGHT_MB)); expectThrows( IOException.class, - () -> transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("failure-blob") - .position(0) - .fileName("file") - .directory(directory) - .length(EIGHT_MB) - .build() - ) + () -> transferManager.fetchBlob(BlobFetchRequest.builder().fileName("file").directory(directory).blobParts(blobParts).build()) ); MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); MatcherAssert.assertThat(fileCache.usage().usage(), equalTo(0L)); @@ -187,16 +181,13 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception latch.await(); return new ByteArrayInputStream(createData()); }).when(blobContainer).readBlob(eq("blocking-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blocking-blob", 0, EIGHT_MB)); + final Thread blockingThread = new Thread(() -> { try { transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("blocking-blob") - .position(0) - .fileName("blocking-file") - .directory(directory) - .length(EIGHT_MB) - .build() + BlobFetchRequest.builder().fileName("blocking-file").directory(directory).blobParts(blobParts).build() ); } catch (IOException e) { throw new RuntimeException(e); @@ -216,9 +207,9 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception } private IndexInput fetchBlobWithName(String blobname) throws IOException { - return transferManager.fetchBlob( - BlobFetchRequest.builder().blobName("blob").position(0).fileName(blobname).directory(directory).length(EIGHT_MB).build() - ); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blob", 0, EIGHT_MB)); + return transferManager.fetchBlob(BlobFetchRequest.builder().fileName(blobname).directory(directory).blobParts(blobParts).build()); } private static void assertIndexInputIsFunctional(IndexInput indexInput) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 6bfab278993ed..a83e737dc25c1 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -86,7 +86,6 @@ import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -715,6 +714,7 @@ public void testSimpleOperationsUpload() throws Exception { translog.setMinSeqNoToKeep(0); // This should not trim anything from local translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(2, translog.readers.size()); assertBusy(() -> { assertEquals(4, translog.allUploaded().size()); @@ -728,6 +728,7 @@ public void testSimpleOperationsUpload() throws Exception { // This should not trim tlog-2.* files from remote as we not uploading any more translog to remote translog.setMinSeqNoToKeep(1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> { assertEquals(4, translog.allUploaded().size()); @@ -766,6 +767,7 @@ public void testMetadataFileDeletion() throws Exception { addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); translog.setMinSeqNoToKeep(i); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); } assertBusy(() -> assertEquals(4, translog.allUploaded().size())); @@ -776,6 +778,7 @@ public void testMetadataFileDeletion() throws Exception { addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); } translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1 + moreDocs, translog.readers.size()); assertBusy(() -> assertEquals(2 + 2L * moreDocs, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); @@ -783,6 +786,7 @@ public void testMetadataFileDeletion() throws Exception { int totalDocs = numDocs + moreDocs; translog.setMinSeqNoToKeep(totalDocs - 1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); addToTranslogAndListAndUpload( translog, @@ -791,6 +795,7 @@ public void testMetadataFileDeletion() throws Exception { ); translog.setMinSeqNoToKeep(totalDocs); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); // Change primary term and test the deletion of older primaries @@ -841,6 +846,7 @@ public void testDrainSync() throws Exception { translog.setMinSeqNoToKeep(0); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); // Case 1 - During ongoing uploads, the available permits are 0. @@ -869,6 +875,7 @@ public void testDrainSync() throws Exception { // Case 3 - After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. translog.setMinSeqNoToKeep(1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertEquals(6, translog.allUploaded().size()); assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); @@ -892,6 +899,7 @@ public void testDrainSync() throws Exception { translog.setMinSeqNoToKeep(3); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> assertEquals(4, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); @@ -1048,7 +1056,7 @@ public void testConcurrentWriteViewsAndSnapshot() throws Throwable { final int threadId = i; writers[i] = new Thread(new AbstractRunnable() { @Override - public void doRun() throws BrokenBarrierException, InterruptedException, IOException { + public void doRun() throws Exception { barrier.await(); int counter = 0; while (run.get() && idGenerator.get() < maxOps) { @@ -1090,6 +1098,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep // deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); translog.setMinSeqNoToKeep(localCheckpoint + 1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); } } if (id % 7 == 0) { diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index e4dd32e5c6f70..2531790ede4af 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -15,7 +15,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; @@ -27,12 +26,9 @@ import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; -import org.junit.Assert; import java.util.Arrays; import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -165,40 +161,6 @@ public void testTransportTimeoutForGetSegmentFilesAction() { assertEquals(recoverySettings.internalActionLongTimeout(), capturedRequest.options.timeout()); } - public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { - CountDownLatch latch = new CountDownLatch(1); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); - replicationSource.getSegmentFiles( - REPLICATION_ID, - checkpoint, - Arrays.asList(testMetadata), - mock(IndexShard.class), - (fileName, bytesRecovered) -> {}, - new ActionListener<>() { - @Override - public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - Assert.fail("onFailure response expected."); - } - - @Override - public void onFailure(Exception e) { - assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); - latch.countDown(); - } - } - ); - replicationSource.cancel(); - latch.await(2, TimeUnit.SECONDS); - assertEquals("listener should have resolved in a failure", 0, latch.getCount()); - } - private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f284a425a417b..3c72dda2d8b5d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -620,6 +620,7 @@ public void testForceSegmentSyncHandlerWithFailure_AlreadyClosedException_swallo } public void testTargetCancelledBeforeStartInvoked() { + final String cancelReason = "test"; final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, primaryShard.getLatestReplicationCheckpoint(), @@ -633,12 +634,12 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { // failures leave state object in last entered stage. - assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); - assertTrue(e.getCause() instanceof CancellableThreads.ExecutionCancelledException); + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + assertEquals(cancelReason, e.getMessage()); } } ); - target.cancel("test"); + target.cancel(cancelReason); sut.startReplication(target); } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java index a42c302b516d2..48b2941fe3b7e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java @@ -185,7 +185,7 @@ public void testLoggingOnHungIO() throws Exception { } public void testFailsHealthOnHungIOBeyondHealthyTimeout() throws Exception { - long healthyTimeoutThreshold = randomLongBetween(500, 1000); + long healthyTimeoutThreshold = randomLongBetween(1500, 2000); long refreshInterval = randomLongBetween(500, 1000); long slowLogThreshold = randomLongBetween(100, 200); long delayBetweenChecks = 100; diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index b976704e8af57..7f55c9f5cc7f7 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -32,10 +32,13 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonParseException; + import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -74,6 +77,33 @@ public void testReadFromProperties() throws Exception { assertEquals("fake desc", info.getDescription()); assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); + assertThat(info.getExtendedPlugins(), empty()); + } + + public void testReadFromPropertiesWithSingleOpenSearchRange() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertEquals("my_plugin", info.getName()); + assertEquals("fake desc", info.getDescription()); + assertEquals("1.0", info.getVersion()); + assertEquals("FakePlugin", info.getClassname()); + assertEquals("~" + Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -102,6 +132,7 @@ public void testReadFromPropertiesWithFolderNameAndVersionAfter() throws Excepti assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); assertEquals("custom-folder", info.getTargetFolderName()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -130,11 +161,40 @@ public void testReadFromPropertiesVersionMissing() throws Exception { assertThat(e.getMessage(), containsString("[version] is missing")); } - public void testReadFromPropertiesOpenSearchVersionMissing() throws Exception { + public void testReadFromPropertiesOpenSearchVersionAndDependenciesMissing() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc", "name", "my_plugin", "version", "1.0"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("[opensearch.version] is missing")); + assertThat( + e.getMessage(), + containsString("Either [opensearch.version] or [dependencies] property must be specified for the plugin ") + ); + } + + public void testReadFromPropertiesWithDependenciesAndOpenSearchVersion() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "dependencies", + "{opensearch:" + Version.CURRENT.toString() + "}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("Only one of [opensearch.version] or [dependencies] property can be specified for the plugin") + ); } public void testReadFromPropertiesJavaVersionMissing() throws Exception { @@ -305,7 +365,6 @@ public void testSerialize() throws Exception { ByteBufferStreamInput input = new ByteBufferStreamInput(buffer); PluginInfo info2 = new PluginInfo(input); assertThat(info2.toString(), equalTo(info.toString())); - } public void testPluginListSorted() { @@ -347,4 +406,193 @@ public void testUnknownProperties() throws Exception { assertThat(e.getMessage(), containsString("Unknown properties in plugin descriptor")); } + public void testMultipleDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\", dependency2:\"1.0.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testNonOpenSearchDependency() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{some_dependency:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Only opensearch is allowed to be specified as a plugin dependency")); + } + + public void testEmptyDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testInvalidDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{invalid}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(JsonParseException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testEmptyOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Version cannot be empty")); + } + + public void testInvalidOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"1.2\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("the version needs to contain major, minor, and revision, and optionally the build: 1.2") + ); + } + + public void testInvalidRangeInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"<2.2.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(NumberFormatException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testhMultipleOpenSearchRangesInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~1.2.3, =1.2.3\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } + + public void testhMultipleOpenSearchRangesInConstructor() throws Exception { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new PluginInfo( + "plugin_name", + "foo", + "dummy", + List.of( + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.EQ), + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.DEFAULT) + ), + "1.8", + "dummyclass", + null, + Collections.emptyList(), + randomBoolean() + ) + ); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } } diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index db276678ba4dd..bd9ee33856f14 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -45,8 +45,10 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexModule; +import org.opensearch.semver.SemverRange; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -717,6 +719,45 @@ public void testIncompatibleOpenSearchVersion() throws Exception { assertThat(e.getMessage(), containsString("was built for OpenSearch version 6.0.0")); } + public void testCompatibleOpenSearchVersionRange() { + List<SemverRange> pluginCompatibilityRange = List.of(new SemverRange(Version.CURRENT, SemverRange.RangeOperator.TILDE)); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + PluginsService.verifyCompatibility(info); + } + + public void testIncompatibleOpenSearchVersionRange() { + // Version.CURRENT is behind by one with respect to patch version in the range + List<SemverRange> pluginCompatibilityRange = List.of( + new SemverRange( + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)), + SemverRange.RangeOperator.TILDE + ) + ); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("was built for OpenSearch version ")); + } + public void testIncompatibleJavaVersion() throws Exception { PluginInfo info = new PluginInfo( "my_plugin", @@ -891,7 +932,10 @@ public void testExtensiblePlugin() { TestExtensiblePlugin extensiblePlugin = new TestExtensiblePlugin(); PluginsService.loadExtensions( Collections.singletonList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin) + Tuple.tuple( + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ) ) ); @@ -902,9 +946,12 @@ public void testExtensiblePlugin() { TestPlugin testPlugin = new TestPlugin(); PluginsService.loadExtensions( Arrays.asList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin), Tuple.tuple( - new PluginInfo("test", null, null, null, null, null, Collections.singletonList("extensible"), false), + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ), + Tuple.tuple( + new PluginInfo("test", null, null, Version.CURRENT, null, null, Collections.singletonList("extensible"), false), testPlugin ) ) @@ -1036,6 +1083,40 @@ public void testThrowingConstructor() { assertThat(e.getCause().getCause(), hasToString(containsString("test constructor failure"))); } + public void testPluginCompatibilityWithSemverRange() { + // Compatible plugin and core versions + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.2"))); + + // Incompatible plugin and core versions + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.0"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.1.0"))); + } + + private PluginInfo getPluginInfoWithWithSemverRange(String semverRange) { + return new PluginInfo( + "my_plugin", + "desc", + "1.0", + List.of(SemverRange.fromString(semverRange)), + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + } + private static class TestExtensiblePlugin extends Plugin implements ExtensiblePlugin { private List<TestExtensionPoint> extensions; diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index 25405afa24c16..b7239e7b59742 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -138,6 +138,37 @@ public void teardown() throws IOException { IOUtils.close(client); } + public void testDefaultRestControllerGetAllHandlersContainsFavicon() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + assertFalse(handlers.hasNext()); + } + + public void testRestControllerGetAllHandlers() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + + restController.registerHandler(RestRequest.Method.PATCH, "/foo", mock(RestHandler.class)); + restController.registerHandler(RestRequest.Method.GET, "/foo", mock(RestHandler.class)); + + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + + assertTrue(handlers.hasNext()); + MethodHandlers rootHandler = handlers.next(); + assertEquals(rootHandler.getPath(), "/foo"); + assertEquals(rootHandler.getValidMethods(), Set.of(RestRequest.Method.GET, RestRequest.Method.PATCH)); + + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + + assertFalse(handlers.hasNext()); + } + public void testApplyRelevantHeaders() throws Exception { final ThreadContext threadContext = client.threadPool().getThreadContext(); Set<RestHeaderDefinition> headers = new HashSet<>( @@ -150,15 +181,15 @@ public void testApplyRelevantHeaders() throws Exception { restHeaders.put("header.3", Collections.singletonList("false")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestController spyRestController = spy(restController); - when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<MethodHandlers>() { + when(spyRestController.getAllRestMethodHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<RestMethodHandlers>() { @Override public boolean hasNext() { return false; } @Override - public MethodHandlers next() { - return new MethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { + public RestMethodHandlers next() { + return new RestMethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { assertEquals("true", threadContext.getHeader("header.1")); assertEquals("true", threadContext.getHeader("header.2")); assertNull(threadContext.getHeader("header.3")); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index 73f83a5642bb4..fa13ec2036797 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -125,7 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); - assertThat(headers.get(74).value, equalTo("docs.deleted")); + assertThat(headers.get(78).value, equalTo("docs.deleted")); final List<List<Table.Cell>> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -141,9 +141,9 @@ public void testBuildTable() { assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); - assertThat(row.get(74).value, equalTo(shardStats.getStats().getDocs().getDeleted())); + assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 347011af98c6d..3793249d569f0 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -54,7 +54,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -80,7 +79,6 @@ import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.slice.SliceBuilder; import org.opensearch.search.sort.SortAndFormats; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -554,8 +552,6 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } public void testSearchPathEvaluationUsingSortField() throws Exception { - // enable the concurrent set FeatureFlag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); diff --git a/server/src/test/java/org/opensearch/search/SearchHitTests.java b/server/src/test/java/org/opensearch/search/SearchHitTests.java index 88d5fb38a6cb1..13b4d9f976ed5 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitTests.java @@ -56,11 +56,13 @@ import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.test.RandomObjects; import org.opensearch.test.VersionUtils; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; @@ -76,6 +78,25 @@ import static org.hamcrest.Matchers.nullValue; public class SearchHitTests extends AbstractWireSerializingTestCase<SearchHit> { + + private Map<String, Float> getSampleMatchedQueries() { + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + matchedQueries.put("query1", 1.0f); + matchedQueries.put("query2", 0.5f); + return matchedQueries; + } + + public static SearchHit createTestItemWithMatchedQueriesScores(boolean withOptionalInnerHits, boolean withShardTarget) { + var searchHit = createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); + int size = randomIntBetween(1, 5); // Ensure at least one matched query + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); + for (int i = 0; i < size; i++) { + matchedQueries.put(randomAlphaOfLength(5), randomFloat()); + } + searchHit.matchedQueriesWithScores(matchedQueries); + return searchHit; + } + public static SearchHit createTestItem(boolean withOptionalInnerHits, boolean withShardTarget) { return createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); } @@ -129,11 +150,11 @@ public static SearchHit createTestItem(final MediaType mediaType, boolean withOp } if (randomBoolean()) { int size = randomIntBetween(0, 5); - String[] matchedQueries = new String[size]; + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = randomAlphaOfLength(5); + matchedQueries.put(randomAlphaOfLength(5), Float.NaN); } - hit.matchedQueries(matchedQueries); + hit.matchedQueriesWithScores(matchedQueries); } if (randomBoolean()) { hit.explanation(createExplanation(randomIntBetween(0, 5))); @@ -219,6 +240,21 @@ public void testFromXContentLenientParsing() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); } + public void testSerializationDeserializationWithMatchedQueriesScores() throws IOException { + SearchHit searchHit = createTestItemWithMatchedQueriesScores(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_3_0_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + } + + public void testSerializationDeserializationWithMatchedQueriesList() throws IOException { + SearchHit searchHit = createTestItem(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_2_12_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + Assert.assertArrayEquals(searchHit.getMatchedQueries(), deserializedSearchHit.getMatchedQueries()); + } + /** * When e.g. with "stored_fields": "_none_", only "_index" and "_score" are returned. */ @@ -244,6 +280,125 @@ public void testToXContent() throws IOException { assertEquals("{\"_id\":\"id1\",\"_score\":1.5}", builder.toString()); } + public void testSerializeShardTargetWithNewVersion() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } + + public void testSerializeShardTargetWithNewVersionAndMatchedQueries() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + innerHit1.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + innerHit2.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + innerHit3.matchedQueriesWithScores(getSampleMatchedQueries()); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + String[] expectedMatchedQueries = new String[] { "query1", "query2" }; + String[] actualMatchedQueries = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueries(); + assertArrayEquals(expectedMatchedQueries, actualMatchedQueries); + + Map<String, Float> expectedMatchedQueriesAndScores = new LinkedHashMap<>(); + expectedMatchedQueriesAndScores.put("query1", 1.0f); + expectedMatchedQueriesAndScores.put("query2", 0.5f); + + Map<String, Float> actualMatchedQueriesAndScores = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueriesAndScores(); + assertEquals(expectedMatchedQueriesAndScores, actualMatchedQueriesAndScores); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } + public void testSerializeShardTarget() throws Exception { String clusterAlias = randomBoolean() ? null : "cluster_alias"; SearchShardTarget target = new SearchShardTarget( diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 317253be9825f..01b8d6d8cdd72 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -113,7 +113,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class SearchModuleTests extends OpenSearchTestCase { @@ -431,9 +430,7 @@ public void testDefaultQueryPhaseSearcher() { } public void testConcurrentQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - SearchModule searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); TestSearchContext searchContext = new TestSearchContext(null); searchContext.setConcurrentSegmentSearchEnabled(true); QueryPhase queryPhase = searchModule.getQueryPhase(); @@ -443,8 +440,6 @@ public void testConcurrentQueryPhaseSearcher() { } public void testPluginQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); QueryPhaseSearcher queryPhaseSearcher = (searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout) -> false; SearchPlugin plugin1 = new SearchPlugin() { @Override @@ -452,7 +447,7 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { return Optional.of(queryPhaseSearcher); } }; - SearchModule searchModule = new SearchModule(settings, Collections.singletonList(plugin1)); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.singletonList(plugin1)); QueryPhase queryPhase = searchModule.getQueryPhase(); TestSearchContext searchContext = new TestSearchContext(null); assertEquals(queryPhaseSearcher, queryPhase.getQueryPhaseSearcher()); @@ -480,18 +475,10 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { } public void testIndexSearcher() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); ThreadPool threadPool = mock(ThreadPool.class); - assertNull(searchModule.getIndexSearcherExecutor(threadPool)); - verify(threadPool, times(0)).executor(ThreadPool.Names.INDEX_SEARCHER); - - // enable concurrent segment search feature flag - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); searchModule.getIndexSearcherExecutor(threadPool); verify(threadPool).executor(ThreadPool.Names.INDEX_SEARCHER); - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } public void testMultiplePluginRegisterIndexSearcherProvider() { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 7c84078af080e..d502bab5918a8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -57,7 +57,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -225,11 +224,6 @@ public void onQueryPhase(SearchContext context, long tookInNanos) { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings() { return Settings.builder().put("search.default_search_timeout", "5s").build(); @@ -1189,7 +1183,7 @@ public void testCreateSearchContext() throws IOException { public void testConcurrentSegmentSearchSearchContext() throws IOException { Boolean[][] scenarios = { // cluster setting, index setting, concurrent search enabled? - { null, null, true }, + { null, null, false }, { null, false, false }, { null, true, true }, { true, null, true }, diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java index 21d05305eed1b..eef7e4c45849d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -37,8 +37,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -52,7 +51,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.is; -public abstract class ShardSizeTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class ShardSizeTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ShardSizeTestCase(Settings dynamicSettings) { super(dynamicSettings); @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { // we need at least 2 diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index eabc4b7764eed..13a3d8145743b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -35,10 +35,14 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.opensearch.OpenSearchParseException; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1253,6 +1257,74 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ); } + public void testDateHistogramSourceWithSize() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).size(1); + }, + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{date=1474329600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + } + ); + } + + public void testDateHistogramSourceWithDocCountField() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45"), "_doc_count", 5), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00"), "_doc_count", 2), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24"), "_doc_count", 3), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=1508457600000}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(6L, result.getBuckets().get(2).getDocCount()); + } + ); + } + public void testWithDateHistogram() throws IOException { final List<Map<String, List<Object>>> dataset = new ArrayList<>(); dataset.addAll( @@ -1279,7 +1351,7 @@ public void testWithDateHistogram() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); - assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); @@ -1300,9 +1372,8 @@ public void testWithDateHistogram() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") .calendarInterval(DateHistogramInterval.days(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).aggregateAfter( - createAfterKey("date", 1474329600000L) + createAfterKey("date", 1474329600000L) // 2016-09-20T00:00:00 ); - }, (result) -> { assertEquals(2, result.getBuckets().size()); @@ -2242,21 +2313,20 @@ private <T extends Comparable<T>, V extends Comparable<T>> void testRandomTerms( Function<Object, V> transformKey ) throws IOException { int numTerms = randomIntBetween(10, 500); - List<T> terms = new ArrayList<>(); + List<T> terms = new ArrayList<>(); // possible values for the terms for (int i = 0; i < numTerms; i++) { terms.add(randomSupplier.get()); } int numDocs = randomIntBetween(100, 200); List<Map<String, List<Object>>> dataset = new ArrayList<>(); - - Set<T> valuesSet = new HashSet<>(); - Map<Comparable<?>, AtomicLong> expectedDocCounts = new HashMap<>(); + Set<T> valuesSet = new HashSet<>(); // how many different values + Map<Comparable<?>, AtomicLong> expectedDocCounts = new HashMap<>(); // how many docs for each value for (int i = 0; i < numDocs; i++) { int numValues = randomIntBetween(1, 5); Set<Object> values = new HashSet<>(); for (int j = 0; j < numValues; j++) { int rand = randomIntBetween(0, terms.size() - 1); - if (values.add(terms.get(rand))) { + if (values.add(terms.get(rand))) { // values are unique for one doc AtomicLong count = expectedDocCounts.computeIfAbsent(terms.get(rand), (k) -> new AtomicLong(0)); count.incrementAndGet(); valuesSet.add(terms.get(rand)); @@ -2264,9 +2334,8 @@ private <T extends Comparable<T>, V extends Comparable<T>> void testRandomTerms( } dataset.add(Collections.singletonMap(field, new ArrayList<>(values))); } - List<T> expected = new ArrayList<>(valuesSet); + List<T> expected = new ArrayList<>(valuesSet); // how many buckets expected Collections.sort(expected); - List<Comparable<T>> seen = new ArrayList<>(); AtomicBoolean finish = new AtomicBoolean(false); int size = randomIntBetween(1, expected.size()); @@ -2463,4 +2532,41 @@ public void testIndexSortWithDuplicate() throws Exception { ); } } + + public void testUnderFilterAggregator() throws IOException { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + FilterAggregationBuilder filterAggregatorBuilder = new FilterAggregationBuilder( + "filter_mcmilterface", + new MatchAllQueryBuilder() + ); + filterAggregatorBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return filterAggregatorBuilder; + }, (ic) -> {}); + } + + public void testUnderBucketAggregator() throws IOException { + try { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("terms").field("keyword"); + termsAggregationBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return termsAggregationBuilder; + }, (ic) -> {}); + fail("Should have thrown an IllegalArgumentException"); + } catch (IllegalArgumentException iae) { + assertTrue( + iae.getMessage() + .contains("[composite] aggregation cannot be used with a parent aggregation of type: [TermsAggregatorFactory]") + ); + } + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index bca6623e66104..2a4fbca7a8541 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -45,6 +46,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1178,6 +1180,181 @@ public void testOverlappingBounds() { ); } + public void testHardBoundsNotOverlapping() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2018-01-01", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-02-03")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2017-02-03", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + false + ); + } + + public void testFilterRewriteOptimizationWithRangeQuery() throws IOException { + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2018-01-01"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true // force AGGREGABLE_DATE field to be searchable to test the filter rewrite optimization path + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-02-02")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2017-02-03"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + } + + public void testDocCountField() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(5, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + true + ); + } + public void testIllegalInterval() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -1211,13 +1388,42 @@ private void testSearchCase( int maxBucket, boolean useNanosecondResolution ) throws IOException { - boolean aggregableDateIsSearchable = randomBoolean(); + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, false); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField + ) throws IOException { + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, useDocCountField, randomBoolean()); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField, + boolean aggregableDateIsSearchable + ) throws IOException { + logger.debug("Aggregable date is searchable {}", aggregableDateIsSearchable); DateFieldMapper.DateFieldType fieldType = aggregableDateFieldType(useNanosecondResolution, aggregableDateIsSearchable); try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); + if (useDocCountField) { + // add the doc count field to the first document + document.add(new NumericDocValuesField(DocCountFieldMapper.NAME, 5)); + } for (String date : dataset) { long instant = asLong(date, fieldType); document.add(new SortedNumericDocValuesField(AGGREGABLE_DATE, instant)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index e02c00005df9b..94cb4c7955a21 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -39,7 +39,6 @@ import org.opensearch.common.document.DocumentField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.geo.RandomGeoGenerator; import java.util.ArrayList; @@ -65,7 +64,7 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractGeoTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractGeoTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java index 9eb90f2358f98..98dde2c7a31b3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java @@ -60,7 +60,7 @@ private static class FakeAggregationScript extends AggregationScript { int index; FakeAggregationScript(Object[][] values) { - super(Collections.emptyMap(), new SearchLookup(null, null) { + super(Collections.emptyMap(), new SearchLookup(null, null, SearchLookup.UNKNOWN_SHARD_ID) { @Override public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java index 85aacfbd63ee2..8c4b8ad6d1776 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java @@ -85,6 +85,7 @@ public void setUp() throws Exception { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); diff --git a/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java new file mode 100644 index 0000000000000..e942c3ab17420 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.lookup; + +import org.opensearch.index.mapper.MapperService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; + +public class SearchLookupTests extends OpenSearchTestCase { + public void testDeprecatedConstructorShardId() { + final SearchLookup searchLookup = new SearchLookup(mock(MapperService.class), (a, b) -> null); + assertThrows(IllegalStateException.class, searchLookup::shardId); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java index ca4b7dc49f6f0..55c50b8cf854d 100644 --- a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java +++ b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java @@ -39,9 +39,14 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.opensearch.Version; @@ -49,6 +54,7 @@ import org.opensearch.common.lucene.search.function.ScriptScoreQuery; import org.opensearch.script.ScoreScript; import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; @@ -56,6 +62,8 @@ import org.junit.Before; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; @@ -177,6 +185,37 @@ public void testScriptScoreErrorOnNegativeScore() { assertTrue(e.getMessage().contains("Must be a non-negative score!")); } + public void testTwoPhaseIteratorDelegation() throws IOException { + Map<String, Object> params = new HashMap<>(); + String scriptSource = "doc['field'].value != null ? 2.0 : 0.0"; // Adjust based on actual field and logic + Script script = new Script(ScriptType.INLINE, "painless", scriptSource, params); + float minScore = 1.0f; // This should be below the score produced by the script for all docs + ScoreScript.LeafFactory factory = newFactory(script, false, explanation -> 2.0); + + Query subQuery = new MatchAllDocsQuery(); + ScriptScoreQuery scriptScoreQuery = new ScriptScoreQuery(subQuery, script, factory, minScore, "index", 0, Version.CURRENT); + + Weight weight = searcher.createWeight(searcher.rewrite(scriptScoreQuery), ScoreMode.COMPLETE, 1f); + + boolean foundMatchingDoc = false; + for (LeafReaderContext leafContext : searcher.getIndexReader().leaves()) { + Scorer scorer = weight.scorer(leafContext); + if (scorer != null) { + TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator(); + assertNotNull("TwoPhaseIterator should not be null", twoPhaseIterator); + DocIdSetIterator docIdSetIterator = twoPhaseIterator.approximation(); + int docId; + while ((docId = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (twoPhaseIterator.matches()) { + foundMatchingDoc = true; + break; + } + } + } + } + assertTrue("Expected to find at least one matching document", foundMatchingDoc); + } + private ScoreScript.LeafFactory newFactory( Script script, boolean needsScore, @@ -203,5 +242,4 @@ public double execute(ExplanationHolder explanation) { } }; } - } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9bb1f51c51cf6..7c50e961853b5 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -188,7 +188,6 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; @@ -2037,7 +2036,6 @@ public void onFailure(final Exception e) { final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); final SetOnce<RepositoriesService> repositoriesServiceReference = new SetOnce<>(); repositoriesServiceReference.set(repositoriesService); - FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnv, null); indicesService = new IndicesService( settings, mock(PluginsService.class), @@ -2072,7 +2070,6 @@ public void onFailure(final Exception e) { emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, - fileCacheCleaner, null, new RemoteStoreStatsTrackerFactory(clusterService, settings), DefaultRecoverySettings.INSTANCE diff --git a/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java new file mode 100644 index 0000000000000..4c96f79b30d55 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class TelemetrySettingsTests extends OpenSearchTestCase { + + public void testSetTracingEnabledOrDisabled() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validation for tracingEnabled as true + telemetrySettings.setTracingEnabled(true); + assertTrue(telemetrySettings.isTracingEnabled()); + + // Validation for tracingEnabled as false + telemetrySettings.setTracingEnabled(false); + assertFalse(telemetrySettings.isTracingEnabled()); + } + + public void testSetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default sample rate i.e 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 100% request + telemetrySettings.setSamplingProbability(1.00); + assertEquals(1.00, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 50% request + telemetrySettings.setSamplingProbability(0.50); + assertEquals(0.50, telemetrySettings.getSamplingProbability(), 0.00d); + } + + public void testGetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default value of Sampling is 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.02").build()); + + // Validating if default sampling is updated to 2% + assertEquals(0.02, telemetrySettings.getSamplingProbability(), 0.00d); + } + +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java index b4183412cdf02..75fc6761a60ef 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkAddress; @@ -27,29 +29,64 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; public class SpanBuilderTests extends OpenSearchTestCase { + public String uri; + + public String expectedSpanName; + + public String expectedQueryParams; + + public String expectedReqRawPath; + + @ParametersFactory + public static Collection<Object[]> data() { + return Arrays.asList( + new Object[][] { + { "/_test/resource?name=John&age=25", "GET /_test/resource", "name=John&age=25", "/_test/resource" }, + { "/_test/", "GET /_test/", "", "/_test/" }, } + ); + } + + public SpanBuilderTests(String uri, String expectedSpanName, String expectedQueryParams, String expectedReqRawPath) { + this.uri = uri; + this.expectedSpanName = expectedSpanName; + this.expectedQueryParams = expectedQueryParams; + this.expectedReqRawPath = expectedReqRawPath; + } + public void testHttpRequestContext() { - HttpRequest httpRequest = createHttpRequest(); + HttpRequest httpRequest = createHttpRequest(uri); SpanCreationContext context = SpanBuilder.from(httpRequest); Attributes attributes = context.getAttributes(); - assertEquals("GET /_test", context.getSpanName()); + assertEquals(expectedSpanName, context.getSpanName()); assertEquals("true", attributes.getAttributesMap().get(AttributeNames.TRACE)); assertEquals("GET", attributes.getAttributesMap().get(AttributeNames.HTTP_METHOD)); assertEquals("HTTP_1_0", attributes.getAttributesMap().get(AttributeNames.HTTP_PROTOCOL_VERSION)); - assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + assertEquals(uri, attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } } public void testRestRequestContext() { - RestRequest restRequest = RestRequest.request(null, createHttpRequest(), null); + RestRequest restRequest = RestRequest.request(null, createHttpRequest(uri), null); SpanCreationContext context = SpanBuilder.from(restRequest); Attributes attributes = context.getAttributes(); - assertEquals("GET /_test", context.getSpanName()); - assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); + assertEquals(expectedSpanName, context.getSpanName()); + assertEquals(expectedReqRawPath, attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); assertNotNull(attributes.getAttributesMap().get(AttributeNames.REST_REQ_ID)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } } public void testRestRequestContextForNull() { @@ -97,7 +134,7 @@ public void close() { }; } - private static HttpRequest createHttpRequest() { + private static HttpRequest createHttpRequest(String uri) { return new HttpRequest() { @Override public RestRequest.Method method() { @@ -106,7 +143,7 @@ public RestRequest.Method method() { @Override public String uri() { - return "/_test"; + return uri; } @Override diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json index 4a4fc7d2c81b1..1ed56fa6dab4d 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json @@ -20,4 +20,4 @@ } } } -} \ No newline at end of file +} diff --git a/settings.gradle b/settings.gradle index 24ab4a7a22237..8fbf32504215b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.1" + id "com.gradle.enterprise" version "3.16.2" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java index 90f4f1ba2ceb2..5cb5cdd18f6cc 100644 --- a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java +++ b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.metrics.InternalMax; @@ -64,11 +63,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(DelayedShardAggregationPlugin.class); diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 7adf29792f27d..8b7b55edc1899 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -51,6 +51,7 @@ dependencies { exclude module: "logback-core" exclude module: "logback-classic" exclude module: "avro" + exclude group: 'org.apache.kerby' } api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" @@ -72,10 +73,11 @@ dependencies { api "commons-net:commons-net:3.10.0" api "ch.qos.logback:logback-core:1.2.13" api "ch.qos.logback:logback-classic:1.2.13" + api 'org.apache.kerby:kerb-admin:2.0.3' runtimeOnly "com.google.guava:guava:${versions.guava}" runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.7.0" + runtimeOnly "com.squareup.okio:okio:3.8.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index e4d2faab9a657..539ca9471fa04 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: '3.2' services: minio-fixture: build: diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index 43881d0660e04..933385dedcf49 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -84,6 +84,7 @@ * The idea is to mimic as much as possible what happens with ES in production * mode (e.g. assign permissions and install security manager the same way) */ +@SuppressWarnings("removal") public class BootstrapForTesting { // TODO: can we share more code with the non-test side here diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index ac78a0d1936ea..a65ce3cbdd380 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -253,7 +253,7 @@ protected QueryShardContext createQueryShardContext(MapperService mapperService) when(queryShardContext.allowExpensiveQueries()).thenReturn(true); when(queryShardContext.lookup()).thenReturn(new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); - })); + }, SearchLookup.UNKNOWN_SHARD_ID)); when(queryShardContext.getFieldType(any())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); when(queryShardContext.documentMapper(anyString())).thenReturn(mapperService.documentMapper()); return queryShardContext; diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java index da043229c642d..dc5954907a4fa 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java @@ -293,7 +293,7 @@ protected final List<?> fetchFromDocValues(MapperService mapperService, MappedFi withLuceneIndex(mapperService, iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); }, iw -> { - SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); + SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, SearchLookup.UNKNOWN_SHARD_ID); ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); IndexSearcher searcher = newSearcher(iw); LeafReaderContext context = searcher.getIndexReader().leaves().get(0); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 412d5235fe462..bf1c4d4c94e04 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -785,10 +785,10 @@ protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMet protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(path); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); - RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("data")); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("metadata")); RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( - new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) + new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex().resolve("lock_files"))) ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index 0f5e043ee1135..cc2d26a598757 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -31,6 +31,7 @@ * Mockito plugin which wraps the Mockito calls into priviledged execution blocks and respects * SecurityManager presence. */ +@SuppressWarnings("removal") @SuppressForbidden(reason = "allow URL#getFile() to be used in tests") public class PriviledgedMockMaker implements MockMaker { private static AccessControlContext context; diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java index 8e94f2cacf070..0b44fe447d6f8 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java @@ -36,11 +36,10 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,7 @@ import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public abstract class AbstractTermsTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractTermsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public AbstractTermsTestCase(Settings dynamicSettings) { super(dynamicSettings); @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java index 6b5ec838f401d..466e4d1bf1742 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -14,6 +14,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; @@ -40,13 +41,16 @@ import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.composite.InternalComposite; @@ -139,12 +143,16 @@ protected void executeTestCase( boolean useIndexSort, Query query, List<Map<String, List<Object>>> dataset, - Supplier<CompositeAggregationBuilder> create, + Supplier<? extends AggregationBuilder> create, Consumer<InternalComposite> verify ) throws IOException { Map<String, MappedFieldType> types = FIELD_TYPES.stream().collect(Collectors.toMap(MappedFieldType::name, Function.identity())); - CompositeAggregationBuilder aggregationBuilder = create.get(); - Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; + AggregationBuilder aggregationBuilder = create.get(); + Sort indexSort = null; + if (aggregationBuilder instanceof CompositeAggregationBuilder && useIndexSort) { + CompositeAggregationBuilder cab = (CompositeAggregationBuilder) aggregationBuilder; + indexSort = buildIndexSort(cab.sources(), types); + } IndexSettings indexSettings = createIndexSettings(indexSort); try (Directory directory = newDirectory()) { IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); @@ -180,14 +188,16 @@ protected void executeTestCase( } try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = new IndexSearcher(indexReader); - InternalComposite composite = searchAndReduce( + InternalAggregation aggregation = searchAndReduce( indexSettings, indexSearcher, query, aggregationBuilder, FIELD_TYPES.toArray(new MappedFieldType[0]) ); - verify.accept(composite); + if (aggregation instanceof InternalComposite) { + verify.accept((InternalComposite) aggregation); + } } } } @@ -196,6 +206,12 @@ protected void addToDocument(int id, Document doc, Map<String, List<Object>> key doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); for (Map.Entry<String, List<Object>> entry : keys.entrySet()) { final String name = entry.getKey(); + if (name.equals(DocCountFieldMapper.NAME)) { + doc.add(new IntPoint(name, (int) entry.getValue().get(0))); + // doc count field should be DocValuesType.NUMERIC + doc.add(new NumericDocValuesField(name, (int) entry.getValue().get(0))); + continue; + } for (Object value : entry.getValue()) { if (value instanceof Integer) { doc.add(new SortedNumericDocValuesField(name, (int) value)); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java index 103b67e2782de..8c2cefa89c860 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -35,9 +35,8 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -48,11 +47,11 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractNumericTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractNumericTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static long minValue, maxValue, minValues, maxValues; - public AbstractNumericTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public AbstractNumericTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java index eddcf9c738bb3..f698cd03c464f 100644 --- a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java +++ b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java @@ -46,6 +46,7 @@ public static synchronized void clear() { private static final Logger LOGGER = LogManager.getLogger(FeatureFlagSetter.class); private final Set<String> flags = ConcurrentCollections.newConcurrentSet(); + @SuppressWarnings("removal") @SuppressForbidden(reason = "Enables setting of feature flags") private void setFlag(String flag) { flags.add(flag); @@ -53,6 +54,7 @@ private void setFlag(String flag) { LOGGER.info("set feature_flag={}", flag); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "Clears the set feature flags") private void clearAll() { for (String flag : flags) { diff --git a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java index 328aaf8a65b1f..6d6199833b25b 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java @@ -35,18 +35,23 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Property; import org.apache.logging.log4j.core.filter.RegexFilter; import org.opensearch.common.logging.Loggers; import org.opensearch.common.regex.Regex; +import org.opensearch.test.junit.annotations.TestLogging; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; /** * Test appender that can be used to verify that certain events were logged correctly @@ -68,11 +73,19 @@ public class MockLogAppender extends AbstractAppender implements AutoCloseable { * write to a closed MockLogAppender instance. */ public static MockLogAppender createForLoggers(Logger... loggers) throws IllegalAccessException { - return createForLoggers(".*(\n.*)*", loggers); + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, ".*(\n.*)*", loggers); } public static MockLogAppender createForLoggers(String filter, Logger... loggers) throws IllegalAccessException { + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, filter, loggers); + } + + private static MockLogAppender createForLoggersInternal(String callingClass, String filter, Logger... loggers) + throws IllegalAccessException { final MockLogAppender appender = new MockLogAppender( + callingClass + "-mock-log-appender", RegexFilter.createFilter(filter, new String[0], false, null, null), Collections.unmodifiableList(Arrays.asList(loggers)) ); @@ -83,8 +96,8 @@ public static MockLogAppender createForLoggers(String filter, Logger... loggers) return appender; } - private MockLogAppender(RegexFilter filter, List<Logger> loggers) { - super("mock", filter, null); + private MockLogAppender(String name, RegexFilter filter, List<Logger> loggers) { + super(name, filter, null, true, Property.EMPTY_ARRAY); /* * We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs, * we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a @@ -116,7 +129,14 @@ public void close() { for (Logger logger : loggers) { Loggers.removeAppender(logger, this); } - this.stop(); + super.stop(); + } + + @Override + public void stop() { + // MockLogAppender should be used with try-with-resources to ensure + // proper clean up ordering and should never be stopped directly. + throw new UnsupportedOperationException("Use close() to ensure proper clean up ordering"); } public interface LoggingExpectation { @@ -243,6 +263,59 @@ public void assertMatched() { } + /** + * Used for cases when the logger is dynamically named such as to include an index name or shard id + * + * Best used in conjunction with the root logger: + * {@code @TestLogging(value = "_root:debug", reason = "Validate logging output");} + * @see TestLogging + * */ + public static class PatternSeenWithLoggerPrefixExpectation implements LoggingExpectation { + private final String expectationName; + private final String loggerPrefix; + private final Level level; + private final String messageMatchingRegex; + + private final List<String> loggerMatches = new ArrayList<>(); + private final AtomicBoolean eventSeen = new AtomicBoolean(false); + + public PatternSeenWithLoggerPrefixExpectation( + final String expectationName, + final String loggerPrefix, + final Level level, + final String messageMatchingRegex + ) { + this.expectationName = expectationName; + this.loggerPrefix = loggerPrefix; + this.level = level; + this.messageMatchingRegex = messageMatchingRegex; + } + + @Override + public void match(final LogEvent event) { + if (event.getLevel() == level && event.getLoggerName().startsWith(loggerPrefix)) { + final String formattedMessage = event.getMessage().getFormattedMessage(); + loggerMatches.add(formattedMessage); + if (formattedMessage.matches(messageMatchingRegex)) { + eventSeen.set(true); + } + } + } + + @Override + public void assertMatched() { + if (!eventSeen.get()) { + final StringBuilder failureMessage = new StringBuilder(); + failureMessage.append(expectationName + " was not seen, found " + loggerMatches.size() + " messages matching the logger."); + failureMessage.append("\r\nMessage matching regex: " + messageMatchingRegex); + if (!loggerMatches.isEmpty()) { + failureMessage.append("\r\nMessage details:\r\n" + String.join("\r\n", loggerMatches)); + } + fail(failureMessage.toString()); + } + } + } + private static String getLoggerName(String name) { if (name.startsWith("org.opensearch.")) { name = name.substring("org.opensearch.".length()); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6215e84f42676..47dd033834f1c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -32,13 +32,12 @@ package org.opensearch.test; -import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -48,7 +47,6 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -77,7 +75,6 @@ import org.opensearch.client.ClusterAdminClient; import org.opensearch.client.Requests; import org.opensearch.client.RestClient; -import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.OpenSearchNodeCommand; @@ -96,7 +93,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.FeatureFlagSettings; @@ -106,7 +103,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.smile.SmileXContent; @@ -120,6 +116,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -128,8 +125,8 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; @@ -138,16 +135,18 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; import org.opensearch.node.NodeMocksPlugin; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.rest.action.RestCancellableNodeClient; import org.opensearch.script.MockScriptService; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchHit; @@ -158,17 +157,15 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.telemetry.MockTelemetryPlugin; -import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; import org.hamcrest.Matchers; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Rule; import java.io.IOException; import java.lang.Runtime.Version; @@ -189,13 +186,12 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Random; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -209,14 +205,16 @@ import static org.opensearch.core.common.util.CollectionUtils.eagerPartition; import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.test.XContentTestUtils.convertToMap; import static org.opensearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -334,6 +332,10 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; private static final boolean MOCK_MODULES_ENABLED = "true".equals(System.getProperty(TESTS_ENABLE_MOCK_MODULES, "true")); + + @Rule + public static OpenSearchTestClusterRule testClusterRule = new OpenSearchTestClusterRule(); + /** * Threshold at which indexing switches from frequently async to frequently bulk. */ @@ -369,22 +371,9 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; - /** - * The current cluster depending on the configured {@link Scope}. - * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. - */ - private static TestCluster currentCluster; - private static RestClient restClient = null; - - private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); - - private static OpenSearchIntegTestCase INSTANCE = null; // see @SuiteScope - private static Long SUITE_SEED = null; - @BeforeClass public static void beforeClass() throws Exception { - SUITE_SEED = randomLong(); - initializeSuiteScope(); + testClusterRule.beforeClass(); } @Override @@ -394,36 +383,6 @@ protected final boolean enableWarningsCheck() { return false; } - protected final void beforeInternal() throws Exception { - final Scope currentClusterScope = getCurrentClusterScope(); - Callable<Void> setup = () -> { - cluster().beforeTest(random()); - cluster().wipe(excludeTemplates()); - randomIndexTemplate(); - return null; - }; - switch (currentClusterScope) { - case SUITE: - assert SUITE_SEED != null : "Suite seed was not initialized"; - currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED); - RandomizedContext.current().runWithPrivateRandomness(SUITE_SEED, setup); - break; - case TEST: - currentCluster = buildAndPutCluster(currentClusterScope, randomLong()); - setup.call(); - break; - } - - } - - private void printTestMessage(String message) { - if (isSuiteScopedTest(getClass()) && (getTestName().equals("<unknown>"))) { - logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); - } else { - logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message); - } - } - /** * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas @@ -547,85 +506,6 @@ private static Settings.Builder setRandomIndexTranslogSettings(Random random, Se return builder; } - private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(seed, () -> buildTestCluster(scope, seed)); - } - - private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception { - final Class<?> clazz = this.getClass(); - TestCluster testCluster = clusters.remove(clazz); // remove this cluster first - clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere - switch (currentClusterScope) { - case SUITE: - if (testCluster == null) { // only build if it's not there yet - testCluster = buildWithPrivateContext(currentClusterScope, seed); - } - break; - case TEST: - // close the previous one and create a new one - IOUtils.closeWhileHandlingException(testCluster); - testCluster = buildTestCluster(currentClusterScope, seed); - break; - } - clusters.put(clazz, testCluster); - return testCluster; - } - - private static void clearClusters() throws Exception { - if (!clusters.isEmpty()) { - IOUtils.close(clusters.values()); - clusters.clear(); - } - if (restClient != null) { - restClient.close(); - restClient = null; - } - assertBusy(() -> { - int numChannels = RestCancellableNodeClient.getNumChannels(); - assertEquals( - numChannels - + " channels still being tracked in " - + RestCancellableNodeClient.class.getSimpleName() - + " while there should be none", - 0, - numChannels - ); - }); - } - - private void afterInternal(boolean afterClass) throws Exception { - final Scope currentClusterScope = getCurrentClusterScope(); - if (isInternalCluster()) { - internalCluster().clearDisruptionScheme(); - } - try { - if (cluster() != null) { - if (currentClusterScope != Scope.TEST) { - Metadata metadata = client().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); - - final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); - assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); - - final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); - assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); - } - ensureClusterSizeConsistency(); - ensureClusterStateConsistency(); - ensureClusterStateCanBeReadByNodeTool(); - beforeIndexDeletion(); - cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete - if (afterClass || currentClusterScope == Scope.TEST) { - cluster().close(); - } - cluster().assertAfterTest(); - } - } finally { - if (currentClusterScope == Scope.TEST) { - clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST - } - } - } - /** * @return An exclude set of index templates that will not be removed in between tests. */ @@ -638,18 +518,15 @@ protected void beforeIndexDeletion() throws Exception { } public static TestCluster cluster() { - return currentCluster; + return testClusterRule.cluster(); } public static boolean isInternalCluster() { - return (currentCluster instanceof InternalTestCluster); + return testClusterRule.isInternalCluster(); } public static InternalTestCluster internalCluster() { - if (!isInternalCluster()) { - throw new UnsupportedOperationException("current test cluster is immutable"); - } - return (InternalTestCluster) currentCluster; + return testClusterRule.internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")); } public ClusterService clusterService() { @@ -661,14 +538,7 @@ public static Client client() { } public static Client client(@Nullable String node) { - if (node != null) { - return internalCluster().client(node); - } - Client client = cluster().client(); - if (frequently()) { - client = new RandomizingClient(client, random()); - } - return client; + return testClusterRule.clientForNode(node); } public static Client dataNodeClient() { @@ -771,6 +641,11 @@ public Settings indexSettings() { ); } + if (randomBoolean()) { + builder.put(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + builder.put(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING.getKey(), randomDoubleBetween(0.01, 0.50, true)); + } + return builder.build(); } @@ -787,6 +662,9 @@ protected Settings featureFlagSettings() { } // Enabling Telemetry setting by default featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); + + // Enabling fuzzy set for tests by default + featureSettings.put(FeatureFlags.DOC_ID_FUZZY_SET_SETTING.getKey(), true); return featureSettings.build(); } @@ -1687,14 +1565,17 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma if (dummyDocuments) { indexRandomForMultipleSlices(indicesArray); } + if (forceRefresh) { + waitForReplication(); + } } /* - * This method ingests bogus documents for the given indices such that multiple slices - * are formed. This is useful for testing with the concurrent search use-case as it creates - * multiple slices based on segment count. - * @param indices the indices in which bogus documents should be ingested - * */ + * This method ingests bogus documents for the given indices such that multiple slices + * are formed. This is useful for testing with the concurrent search use-case as it creates + * multiple slices based on segment count. + * @param indices the indices in which bogus documents should be ingested + * */ protected void indexRandomForMultipleSlices(String... indices) throws InterruptedException { Set<List<String>> bogusIds = new HashSet<>(); int refreshCount = randomIntBetween(2, 3); @@ -1928,7 +1809,7 @@ public void clearScroll(String... scrollIds) { assertThat(clearResponse.isSucceeded(), equalTo(true)); } - private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { + static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { if (clazz == Object.class || clazz == OpenSearchIntegTestCase.class) { return null; } @@ -1939,16 +1820,6 @@ private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> a return getAnnotation(clazz.getSuperclass(), annotationClass); } - private Scope getCurrentClusterScope() { - return getCurrentClusterScope(this.getClass()); - } - - private static Scope getCurrentClusterScope(Class<?> clazz) { - ClusterScope annotation = getAnnotation(clazz, ClusterScope.class); - // if we are not annotated assume suite! - return annotation == null ? Scope.SUITE : annotation.scope(); - } - private boolean getSupportsDedicatedClusterManagers() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null ? true : annotation.supportsDedicatedMasters(); @@ -2008,6 +1879,9 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) .put(featureFlagSettings()); // Enable tracer only when Telemetry Setting is enabled @@ -2015,14 +1889,25 @@ protected Settings nodeSettings(int nodeOrdinal) { builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } - if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { - // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices - // when tests are run with concurrent segment search enabled - builder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); + + // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + if (useRandomReplicationStrategy()) { + ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); } return builder.build(); } + /** + * Used for selecting random replication strategy, either DOCUMENT or SEGMENT. + * This method must be overridden by subclass to use random replication strategy. + * Should be used only on test classes where replication strategy is not critical for tests. + */ + protected boolean useRandomReplicationStrategy() { + return false; + } + protected Path nodeConfigPath(int nodeOrdinal) { return null; } @@ -2270,10 +2155,9 @@ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( * Returns path to a random directory that can be used to create a temporary file system repo */ public Path randomRepoPath() { - if (currentCluster instanceof InternalTestCluster) { - return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings()); - } - throw new UnsupportedOperationException("unsupported cluster type"); + return testClusterRule.internalCluster() + .map(c -> randomRepoPath(c.getDefaultSettings())) + .orElseThrow(() -> new UnsupportedOperationException("unsupported cluster type")); } /** @@ -2347,83 +2231,9 @@ private NumShards(int numPrimaries, int numReplicas) { } } - private static boolean runTestScopeLifecycle() { - return INSTANCE == null; - } - - @Before - public final void setupTestCluster() throws Exception { - if (runTestScopeLifecycle()) { - printTestMessage("setting up"); - beforeInternal(); - printTestMessage("all set up"); - } - } - - @After - public final void cleanUpCluster() throws Exception { - // Deleting indices is going to clear search contexts implicitly so we - // need to check that there are no more in-flight search contexts before - // we remove indices - if (isInternalCluster()) { - internalCluster().setBootstrapClusterManagerNodeIndex(-1); - } - super.ensureAllSearchContextsReleased(); - if (runTestScopeLifecycle()) { - printTestMessage("cleaning up after"); - afterInternal(false); - printTestMessage("cleaned up after"); - } - } - @AfterClass public static void afterClass() throws Exception { - try { - if (runTestScopeLifecycle()) { - clearClusters(); - } else { - INSTANCE.printTestMessage("cleaning up after"); - INSTANCE.afterInternal(true); - checkStaticState(true); - } - StrictCheckSpanProcessor.validateTracingStateOnShutdown(); - } finally { - SUITE_SEED = null; - currentCluster = null; - INSTANCE = null; - } - } - - private static void initializeSuiteScope() throws Exception { - Class<?> targetClass = getTestClass(); - /* - Note we create these test class instance via reflection - since JUnit creates a new instance per test and that is also - the reason why INSTANCE is static since this entire method - must be executed in a static context. - */ - assert INSTANCE == null; - if (isSuiteScopedTest(targetClass)) { - // note we need to do this way to make sure this is reproducible - if (isSuiteScopedTestParameterized(targetClass)) { - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor(Settings.class).newInstance(Settings.EMPTY); - } else { - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); - } - boolean success = false; - try { - INSTANCE.printTestMessage("setup"); - INSTANCE.beforeInternal(); - INSTANCE.setupSuiteScopeCluster(); - success = true; - } finally { - if (!success) { - afterClass(); - } - } - } else { - INSTANCE = null; - } + testClusterRule.afterClass(); } /** @@ -2455,41 +2265,8 @@ protected boolean forbidPrivateIndexSettings() { * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise * it cannot be reused by other tests anymore. */ - protected static synchronized RestClient getRestClient() { - if (restClient == null) { - restClient = createRestClient(); - } - return restClient; - } - - protected static RestClient createRestClient() { - return createRestClient(null, "http"); - } - - protected static RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { - NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().get(); - assertFalse(nodesInfoResponse.hasFailures()); - return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); - } - - protected static RestClient createRestClient( - final List<NodeInfo> nodes, - RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, - String protocol - ) { - List<HttpHost> hosts = new ArrayList<>(); - for (NodeInfo node : nodes) { - if (node.getInfo(HttpInfo.class) != null) { - TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); - InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); - } - } - RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); - if (httpClientConfigCallback != null) { - builder.setHttpClientConfigCallback(httpClientConfigCallback); - } - return builder.build(); + protected static RestClient getRestClient() { + return testClusterRule.getRestClient(); } /** @@ -2500,20 +2277,6 @@ protected static RestClient createRestClient( */ protected void setupSuiteScopeCluster() throws Exception {} - private static boolean isSuiteScopedTest(Class<?> clazz) { - return clazz.getAnnotation(SuiteScopeTestCase.class) != null; - } - - /* - * For tests defined with, SuiteScopeTestCase return true if the - * class has a constructor that takes a single Settings parameter - * */ - private static boolean isSuiteScopedTestParameterized(Class<?> clazz) { - return Arrays.stream(clazz.getConstructors()) - .filter(x -> x.getParameterTypes().length == 1) - .anyMatch(x -> x.getParameterTypes()[0].equals(Settings.class)); - } - /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests @@ -2622,4 +2385,96 @@ protected ClusterState getClusterState() { return client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); } + /** + * Refreshes the indices in the cluster and waits until active/started replica shards + * are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected RefreshResponse refreshAndWaitForReplication(String... indices) { + RefreshResponse refreshResponse = refresh(indices); + waitForReplication(); + return refreshResponse; + } + + /** + * Waits until active/started replica shards are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected void waitForReplication(String... indices) { + if (indices.length == 0) { + indices = getClusterState().routingTable().indicesRouting().keySet().toArray(String[]::new); + } + try { + for (String index : indices) { + if (isSegmentReplicationEnabledForIndex(index)) { + if (isInternalCluster()) { + IndexRoutingTable indexRoutingTable = getClusterState().routingTable().index(index); + if (indexRoutingTable != null) { + assertBusy(() -> { + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + final ShardRouting primaryRouting = shardRoutingTable.primaryShard(); + if (primaryRouting.state().toString().equals("STARTED")) { + if (isSegmentReplicationEnabledForIndex(index)) { + final List<ShardRouting> replicaRouting = shardRoutingTable.replicaShards(); + final IndexShard primaryShard = getIndexShard(primaryRouting, index); + for (ShardRouting replica : replicaRouting) { + if (replica.state().toString().equals("STARTED")) { + IndexShard replicaShard = getIndexShard(replica, index); + assertEquals( + "replica shards haven't caught up with primary", + getLatestSegmentInfoVersion(primaryShard), + getLatestSegmentInfoVersion(replicaShard) + ); + } + } + } + } + } + }, 30, TimeUnit.SECONDS); + } + } else { + throw new IllegalStateException( + "Segment Replication is not supported for testing tests using External Test Cluster" + ); + } + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Checks if Segment Replication is enabled on Index. + */ + protected boolean isSegmentReplicationEnabledForIndex(String index) { + return clusterService().state().getMetadata().isSegmentReplicationEnabled(index); + } + + protected IndexShard getIndexShard(ShardRouting routing, String indexName) { + return getIndexShard(getClusterState().nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); + } + + /** + * Fetch IndexShard by shardId, multiple shards per node allowed. + */ + protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexServiceSafe(index); + final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid.equals(shardId.id())).findFirst(); + return indexService.getShard(id.get()); + } + + /** + * Fetch latest segment info snapshot version of an index. + */ + protected long getLatestSegmentInfoVersion(IndexShard shard) { + try (final GatedCloseable<SegmentInfos> snapshot = shard.getSegmentInfosSnapshot()) { + return snapshot.get().version; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index efc29d1c254e6..45ea63e862df6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -255,13 +255,11 @@ private Node newNode() { .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) - .put(nodeSettings()) // allow test cases to provide their own settings or override these - .put(featureFlagSettings); - if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices // when tests are run with concurrent segment search enabled - settingsBuilder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); - } + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) + .put(nodeSettings()) // allow test cases to provide their own settings or override these + .put(featureFlagSettings); Collection<Class<? extends Plugin>> plugins = getPlugins(); if (plugins.contains(getTestTransportPlugin()) == false) { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index b5ff30deecf5c..aac3fca9e1e16 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -83,6 +83,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateUtils; import org.opensearch.common.time.FormatNames; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -144,6 +145,8 @@ import java.io.IOException; import java.io.InputStream; +import java.io.PrintWriter; +import java.io.StringWriter; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; @@ -169,6 +172,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntFunction; import java.util.function.Predicate; import java.util.function.Supplier; @@ -638,7 +642,32 @@ protected static void checkStaticState(boolean afterClass) throws Exception { try { // ensure that there are no status logger messages which would indicate a problem with our Log4j usage; we map the // StatusData instances to Strings as otherwise their toString output is useless + + final Function<StatusData, String> statusToString = (statusData) -> { + try (final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw)) { + + pw.print(statusData.getLevel()); + pw.print(":"); + pw.print(statusData.getMessage().getFormattedMessage()); + + if (statusData.getStackTraceElement() != null) { + final var messageSource = statusData.getStackTraceElement(); + pw.println("Source:"); + pw.println(messageSource.getFileName() + "@" + messageSource.getLineNumber()); + } + + if (statusData.getThrowable() != null) { + pw.println("Throwable:"); + statusData.getThrowable().printStackTrace(pw); + } + return sw.toString(); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + }; + assertThat( + statusData.stream().map(statusToString::apply).collect(Collectors.joining("\r\n")), statusData.stream().map(status -> status.getMessage().getFormattedMessage()).collect(Collectors.toList()), empty() ); @@ -1095,6 +1124,38 @@ public static void assertBusy(CheckedRunnable<Exception> codeBlock, long maxWait } } + /** + * Runs the code block for the provided max wait time and sleeping for fixed sleep time, waiting for no assertions to trip. + */ + public static void assertBusyWithFixedSleepTime(CheckedRunnable<Exception> codeBlock, TimeValue maxWaitTime, TimeValue sleepTime) + throws Exception { + long maxTimeInMillis = maxWaitTime.millis(); + long sleepTimeInMillis = sleepTime.millis(); + if (sleepTimeInMillis > maxTimeInMillis) { + throw new IllegalArgumentException("sleepTime is more than the maxWaitTime"); + } + long sum = 0; + List<AssertionError> failures = new ArrayList<>(); + while (sum <= maxTimeInMillis) { + try { + codeBlock.run(); + return; + } catch (AssertionError e) { + failures.add(e); + } + sum += sleepTimeInMillis; + Thread.sleep(sleepTimeInMillis); + } + try { + codeBlock.run(); + } catch (AssertionError e) { + for (AssertionError failure : failures) { + e.addSuppressed(failure); + } + throw e; + } + } + /** * Periodically execute the supplied function until it returns true, or a timeout * is reached. This version uses a timeout of 10 seconds. If at all possible, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java new file mode 100644 index 0000000000000..57e9ccf22ab43 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.RandomizedContext; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.client.Client; +import org.opensearch.client.RestClient; +import org.opensearch.client.RestClientBuilder; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpInfo; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.OpenSearchIntegTestCase.SuiteScopeTestCase; +import org.opensearch.test.client.RandomizingClient; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; + +import static org.hamcrest.Matchers.empty; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; + +/** + * The JUnit {@link MethodRule} that handles test method scoped and test suite scoped clusters for integration (internal cluster) tests. There rule is + * injected into {@link OpenSearchIntegTestCase} that every integration test suite should be subclassing. In case of the parameterized test suites, + * please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} or {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, depending + * on the way cluster settings are being managed. + */ +class OpenSearchTestClusterRule implements MethodRule { + // Maps each TestCluster instance to the exact test suite instance that triggered its creation + private final Map<TestCluster, OpenSearchIntegTestCase> suites = new IdentityHashMap<>(); + private final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); + private final Logger logger = LogManager.getLogger(getClass()); + + /** + * The current cluster depending on the configured {@link Scope}. + * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. + */ + private TestCluster currentCluster = null; + private RestClient restClient = null; + + private OpenSearchIntegTestCase suiteInstance = null; // see @SuiteScope + private Long suiteSeed = null; + + @Override + public Statement apply(Statement base, FrameworkMethod method, Object target) { + return statement(base, method, target); + } + + void beforeClass() throws Exception { + suiteSeed = OpenSearchTestCase.randomLong(); + } + + void afterClass() throws Exception { + try { + if (runTestScopeLifecycle()) { + clearClusters(); + } else { + printTestMessage("cleaning up after"); + afterInternal(true, null); + OpenSearchTestCase.checkStaticState(true); + synchronized (clusters) { + final TestCluster cluster = clusters.remove(getTestClass()); + IOUtils.closeWhileHandlingException(cluster); + if (cluster != null) { + suites.remove(cluster); + } + } + } + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); + } finally { + suiteSeed = null; + currentCluster = null; + suiteInstance = null; + } + } + + TestCluster cluster() { + return currentCluster; + } + + boolean isInternalCluster() { + return (cluster() instanceof InternalTestCluster); + } + + Optional<InternalTestCluster> internalCluster() { + if (!isInternalCluster()) { + return Optional.empty(); + } else { + return Optional.of((InternalTestCluster) cluster()); + } + } + + Client clientForAnyNode() { + return clientForNode(null); + } + + Client clientForNode(@Nullable String node) { + if (node != null) { + return internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")).client(node); + } + Client client = cluster().client(); + if (OpenSearchTestCase.frequently()) { + client = new RandomizingClient(client, OpenSearchTestCase.random()); + } + return client; + } + + synchronized RestClient getRestClient() { + if (restClient == null) { + restClient = createRestClient(); + } + return restClient; + } + + protected final void beforeInternal(OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(target.getClass()); + Callable<Void> setup = () -> { + currentCluster.beforeTest(OpenSearchTestCase.random()); + currentCluster.wipe(target.excludeTemplates()); + target.randomIndexTemplate(); + return null; + }; + switch (currentClusterScope) { + case SUITE: + assert suiteSeed != null : "Suite seed was not initialized"; + currentCluster = buildAndPutCluster(currentClusterScope, suiteSeed, target); + RandomizedContext.current().runWithPrivateRandomness(suiteSeed, setup); + break; + case TEST: + currentCluster = buildAndPutCluster(currentClusterScope, OpenSearchTestCase.randomLong(), target); + setup.call(); + break; + } + } + + protected void before(Object target, FrameworkMethod method) throws Throwable { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + initializeSuiteScope(instance, method); + + if (runTestScopeLifecycle()) { + printTestMessage("setting up", method); + beforeInternal(instance); + printTestMessage("all set up", method); + } + } + + protected void after(Object target, FrameworkMethod method) throws Exception { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + + // Deleting indices is going to clear search contexts implicitly so we + // need to check that there are no more in-flight search contexts before + // we remove indices + internalCluster().ifPresent(c -> c.setBootstrapClusterManagerNodeIndex(-1)); + + instance.ensureAllSearchContextsReleased(); + if (runTestScopeLifecycle()) { + printTestMessage("cleaning up after", method); + afterInternal(false, instance); + printTestMessage("cleaned up after", method); + } + } + + protected RestClient createRestClient() { + return createRestClient(null, "http"); + } + + protected RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { + NodesInfoResponse nodesInfoResponse = clientForAnyNode().admin().cluster().prepareNodesInfo().get(); + assertFalse(nodesInfoResponse.hasFailures()); + return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); + } + + protected RestClient createRestClient( + final List<NodeInfo> nodes, + RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, + String protocol + ) { + List<HttpHost> hosts = new ArrayList<>(); + for (NodeInfo node : nodes) { + if (node.getInfo(HttpInfo.class) != null) { + TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); + InetSocketAddress address = publishAddress.address(); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); + } + } + RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); + if (httpClientConfigCallback != null) { + builder.setHttpClientConfigCallback(httpClientConfigCallback); + } + return builder.build(); + } + + private Scope getClusterScope(Class<?> clazz) { + ClusterScope annotation = OpenSearchIntegTestCase.getAnnotation(clazz, ClusterScope.class); + // if we are not annotated assume suite! + return annotation == null ? Scope.SUITE : annotation.scope(); + } + + private TestCluster buildWithPrivateContext(final Scope scope, final long seed, OpenSearchIntegTestCase target) throws Exception { + return RandomizedContext.current().runWithPrivateRandomness(seed, () -> target.buildTestCluster(scope, seed)); + } + + private static boolean isSuiteScopedTest(Class<?> clazz) { + return clazz.getAnnotation(SuiteScopeTestCase.class) != null; + } + + private static boolean hasParametersChanged( + final ParameterizedOpenSearchIntegTestCase instance, + final ParameterizedOpenSearchIntegTestCase target + ) { + return !instance.hasSameParametersAs(target); + } + + private boolean runTestScopeLifecycle() { + return suiteInstance == null; + } + + private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed, OpenSearchIntegTestCase target) throws Exception { + final Class<?> clazz = target.getClass(); + + synchronized (clusters) { + TestCluster testCluster = clusters.remove(clazz); // remove this cluster first + clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere + switch (currentClusterScope) { + case SUITE: + if (testCluster != null && target instanceof ParameterizedOpenSearchIntegTestCase) { + final OpenSearchIntegTestCase instance = suites.get(testCluster); + if (instance != null) { + assert instance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) instance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + IOUtils.closeWhileHandlingException(testCluster); + printTestMessage("new instance of parameterized test class, recreating test cluster for suite"); + testCluster = null; + } + } + } + + if (testCluster == null) { // only build if it's not there yet + testCluster = buildWithPrivateContext(currentClusterScope, seed, target); + suites.put(testCluster, target); + } + break; + case TEST: + // close the previous one and create a new one + IOUtils.closeWhileHandlingException(testCluster); + testCluster = target.buildTestCluster(currentClusterScope, seed); + break; + } + clusters.put(clazz, testCluster); + return testCluster; + } + } + + private void printTestMessage(String message) { + logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); + } + + private static Class<?> getTestClass() { + return OpenSearchTestCase.getTestClass(); + } + + private void printTestMessage(String message, FrameworkMethod method) { + logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), method.getName(), message); + } + + private void afterInternal(boolean afterClass, OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(getTestClass()); + internalCluster().ifPresent(InternalTestCluster::clearDisruptionScheme); + + OpenSearchIntegTestCase instance = suiteInstance; + if (instance == null) { + instance = target; + } + + try { + if (cluster() != null) { + if (currentClusterScope != Scope.TEST) { + Metadata metadata = clientForAnyNode().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); + + final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); + assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); + + final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); + assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); + } + instance.ensureClusterSizeConsistency(); + instance.ensureClusterStateConsistency(); + instance.ensureClusterStateCanBeReadByNodeTool(); + instance.beforeIndexDeletion(); + cluster().wipe(instance.excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete + if (afterClass || currentClusterScope == Scope.TEST) { + cluster().close(); + } + cluster().assertAfterTest(); + } + } finally { + if (currentClusterScope == Scope.TEST) { + clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST + } + } + } + + private void clearClusters() throws Exception { + synchronized (clusters) { + if (!clusters.isEmpty()) { + IOUtils.close(clusters.values()); + suites.clear(); + clusters.clear(); + } + } + if (restClient != null) { + restClient.close(); + restClient = null; + } + OpenSearchTestCase.assertBusy(() -> { + int numChannels = RestCancellableNodeClient.getNumChannels(); + OpenSearchTestCase.assertEquals( + numChannels + + " channels still being tracked in " + + RestCancellableNodeClient.class.getSimpleName() + + " while there should be none", + 0, + numChannels + ); + }); + } + + private Statement statement(final Statement base, FrameworkMethod method, Object target) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + before(target, method); + + List<Throwable> errors = new ArrayList<Throwable>(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(target, method); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + private void initializeSuiteScope(OpenSearchIntegTestCase target, FrameworkMethod method) throws Exception { + final Class<?> targetClass = getTestClass(); + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test. + */ + if (suiteInstance != null) { + // Catching the case when parameterized test cases are run: the test class stays the same but the test instances changes. + if (target instanceof ParameterizedOpenSearchIntegTestCase) { + assert suiteInstance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) suiteInstance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + printTestMessage("new instance of parameterized test class, recreating cluster scope", method); + afterClass(); + beforeClass(); + } else { + return; /* same test class instance */ + } + } else { + return; /* not a parameterized test */ + } + } + + assert suiteInstance == null; + if (isSuiteScopedTest(targetClass)) { + suiteInstance = target; + + boolean success = false; + try { + printTestMessage("setup", method); + beforeInternal(target); + suiteInstance.setupSuiteScopeCluster(); + success = true; + } finally { + if (!success) { + afterClass(); + } + } + } else { + suiteInstance = null; + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..b31dfa2bdefa5 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsModule; +import org.junit.After; +import org.junit.Before; + +/** + * Base class for running the tests with parameterization using dynamic settings: the cluster will be created once before the test suite and the + * settings will be applied dynamically, please notice that not all settings could be changed dynamically (consider using {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} + * instead). + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { apply settings -> run test method -> unapply settings } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> apply settings -> run test method -> unapply settings -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedDynamicSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @Before + public void beforeTests() { + SettingsModule settingsModule = new SettingsModule(settings); + for (String key : settings.keySet()) { + assertTrue( + settingsModule.getClusterSettings().isDynamicSetting(key) || settingsModule.getIndexScopedSettings().isDynamicSetting(key) + ); + } + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + } + + @After + public void afterTests() { + final Settings.Builder settingsToUnset = Settings.builder(); + settings.keySet().forEach(settingsToUnset::putNull); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + return true; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index edda6bf5603f7..23316adf6a2d7 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -9,48 +9,43 @@ package org.opensearch.test; import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsModule; -import org.junit.After; -import org.junit.Before; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; /** - * Base class for running the tests with parameterization of the dynamic settings - * For any class that wants to use parameterization, use @ParametersFactory to generate - * different params only for dynamic settings. Refer SearchCancellationIT for an example. - * Note: this doesn't work for the parameterization of feature flag/static settings. + * Base class for running the tests with parameterization of the settings. + * For any class that wants to use parameterization, use {@link com.carrotsearch.randomizedtesting.annotations.ParametersFactory} to generate + * different parameters. + * + * There are two flavors of applying the parameterized settings to the cluster on the suite level: + * - static: the cluster will be pre-created with the settings at startup, please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase}, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated + * - dynamic: the cluster will be created once before the test suite and the settings will be applied dynamically , please subclass {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, + * please notice that not all settings could be changed dynamically + * + * If the test suites use per-test level, the cluster will be recreated per each test method (applying static or dynamic settings). */ -public abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestCase { - - private final Settings dynamicSettings; +abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestCase { + protected final Settings settings; - public ParameterizedOpenSearchIntegTestCase(Settings dynamicSettings) { - this.dynamicSettings = dynamicSettings; - } - - @Before - public void beforeTests() { - SettingsModule settingsModule = new SettingsModule(dynamicSettings); - for (String key : dynamicSettings.keySet()) { - assertTrue( - settingsModule.getClusterSettings().isDynamicSetting(key) || settingsModule.getIndexScopedSettings().isDynamicSetting(key) - ); - } - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(dynamicSettings).get(); - } - - @After - public void afterTests() { - final Settings.Builder settingsToUnset = Settings.builder(); - dynamicSettings.keySet().forEach(settingsToUnset::putNull); - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); + ParameterizedOpenSearchIntegTestCase(Settings settings) { + this.settings = settings; } // This method shouldn't be called in setupSuiteScopeCluster(). Only call this method inside single test. public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { - if (dynamicSettings.get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).equals("true")) { + if (CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) { indexRandomForMultipleSlices(indices); } } + + /** + * Compares the parameters of the two {@link ParameterizedOpenSearchIntegTestCase} test suite instances. + * This method is being use by {@link OpenSearchTestClusterRule} to determine when the parameterized test suite is instantiated with + * another set of parameters and the test cluster has to be recreated to reflect that. + * @param obj instance of the {@link ParameterizedOpenSearchIntegTestCase} to compare with + * @return {@code true} of the parameters of the test suites are the same, {@code false} otherwise + */ + abstract boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj); } diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..defcedfac1c76 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; + +/** + * Base class for running the tests with parameterization with static settings: the cluster will be pre-created with the settings at startup, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated. + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { run test method } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> run test method -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedStaticSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + + public static final List<Object[]> replicationSettings = Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } + ); + + public ParameterizedStaticSettingsOpenSearchIntegTestCase(Settings nodeSettings) { + super(nodeSettings); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(settings).build(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + final ParameterizedStaticSettingsOpenSearchIntegTestCase other = (ParameterizedStaticSettingsOpenSearchIntegTestCase) obj; + return Objects.equals(settings, other.settings); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index 2fb345f73fb06..09a72dcdc3641 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -107,6 +107,7 @@ public class TestSearchContext extends SearchContext { SearchShardTask task; SortAndFormats sort; boolean trackScores = false; + boolean includeNamedQueriesScore = false; int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; ContextIndexSearcher searcher; @@ -409,6 +410,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; diff --git a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java index 8fb9bc5cd7c1c..8ce5afab17c00 100644 --- a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java @@ -359,4 +359,14 @@ public static Version randomPreviousCompatibleVersion(Random random, Version ver // but 7.2.0 for minimum compat return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), getPreviousVersion(version)); } + + /** + * Returns a {@link Version} with a given major, minor and revision version. + * Build version is skipped for the sake of simplicity. + */ + public static Version getVersion(byte major, byte minor, byte revision) { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(revision); + return Version.fromString(sb.toString()); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 44837c37962b4..168fbd5bd0d0a 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -252,7 +252,7 @@ public TimeValue expectedTimeToHeal() { * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected boolean suspendThreads(Set<Thread> nodeThreads) { Thread[] allThreads = null; @@ -360,7 +360,7 @@ protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo bl ); } - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected void resumeThreads(Set<Thread> threads) { for (Thread thread : threads) { diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 183214c159c14..650558aaa97a6 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -304,6 +304,22 @@ public static void assertHitCount(SearchResponse countResponse, long expectedHit } } + public static void assertHitCount(SearchResponse countResponse, long minHitCount, long maxHitCount) { + final TotalHits totalHits = countResponse.getHits().getTotalHits(); + if (!(totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value >= minHitCount && totalHits.value <= maxHitCount)) { + fail( + "Count is " + + totalHits + + " not between " + + minHitCount + + " and " + + maxHitCount + + " inclusive. " + + formatShardStatus(countResponse) + ); + } + } + public static void assertExists(GetResponse response) { String message = String.format(Locale.ROOT, "Expected %s/%s to exist, but does not", response.getIndex(), response.getId()); assertThat(message, response.isExists(), is(true)); @@ -528,6 +544,10 @@ public static Matcher<SearchHit> hasScore(final float score) { return new OpenSearchMatchers.SearchHitHasScoreMatcher(score); } + public static Matcher<SearchHit> hasMatchedQueries(final String[] matchedQueries) { + return new OpenSearchMatchers.SearchHitMatchedQueriesMatcher(matchedQueries); + } + public static <T, V> CombinableMatcher<T> hasProperty(Function<? super T, ? extends V> property, Matcher<V> valueMatcher) { return OpenSearchMatchers.HasPropertyLambdaMatcher.hasProperty(property, valueMatcher); } diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java index 5889b7e269ed2..2be94bd53e3c1 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java @@ -38,6 +38,7 @@ import org.hamcrest.TypeSafeMatcher; import org.hamcrest.core.CombinableMatcher; +import java.util.Arrays; import java.util.function.Function; public class OpenSearchMatchers { @@ -111,6 +112,35 @@ public void describeTo(final Description description) { } } + public static class SearchHitMatchedQueriesMatcher extends TypeSafeMatcher<SearchHit> { + private String[] matchedQueries; + + public SearchHitMatchedQueriesMatcher(String[] matchedQueries) { + this.matchedQueries = matchedQueries; + } + + @Override + protected boolean matchesSafely(SearchHit searchHit) { + String[] searchHitQueries = searchHit.getMatchedQueries(); + if (matchedQueries == null) { + return false; + } + Arrays.sort(searchHitQueries); + Arrays.sort(matchedQueries); + return Arrays.equals(searchHitQueries, matchedQueries); + } + + @Override + public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) { + mismatchDescription.appendText(" matched queries were ").appendValue(Arrays.toString(searchHit.getMatchedQueries())); + } + + @Override + public void describeTo(final Description description) { + description.appendText("searchHit matched queries should be ").appendValue(Arrays.toString(matchedQueries)); + } + } + public static class HasPropertyLambdaMatcher<T, V> extends FeatureMatcher<T, V> { private final Function<? super T, ? extends V> property; diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index dda413ce2818e..44daf1b1554e0 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -11,8 +11,10 @@ import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; @@ -46,6 +48,11 @@ public Counter createUpDownCounter(String name, String description, String unit) return NoopCounter.INSTANCE; } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + @Override public void close() { diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 3b64e044e7bf0..e43b0756e2f2b 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -1283,9 +1283,17 @@ public String executor() { Level.TRACE, notSeenReceived ); + final String notSeenResponseSent = ".*\\[internal:testNotSeen].*sent response.*"; + final MockLogAppender.LoggingExpectation notSeenResponseSentExpectation = new MockLogAppender.PatternSeenEventExpectation( + "sent response", + "org.opensearch.transport.TransportService.tracer", + Level.TRACE, + notSeenResponseSent + ); appender.addExpectation(notSeenSentExpectation); appender.addExpectation(notSeenReceivedExpectation); + appender.addExpectation(notSeenResponseSentExpectation); PlainTransportFuture<StringMessageResponse> future = new PlainTransportFuture<>(noopResponseHandler); serviceA.sendRequest(nodeB, "internal:testNotSeen", new StringMessageRequest(""), future); diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6dd14e06248a9 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..f38c1ecd26429 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..1f9a7cb87ae15 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..36ca14e453158 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..84caebdb4302f --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6df8ad2c27210 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +}