diff --git a/.github/workflows/commit-check.yml b/.github/workflows/commit-check.yml index 5f71d78b97307..36d6425e58096 100644 --- a/.github/workflows/commit-check.yml +++ b/.github/workflows/commit-check.yml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: Code Style, Abandoned Tests +name: Code Style, Abandoned Tests, Javadocs on: pull_request: push: @@ -34,12 +34,12 @@ jobs: java: [ '8', '11' ] name: Check java code on JDK ${{ matrix.java }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} @@ -67,16 +67,20 @@ jobs: run : | ./mvnw test -Pcheck-test-suites,all-java,all-scala,scala -B -V + - name: Check javadocs. + run : | + ./mvnw -DskipTests install -pl modules/tools -B -V && ./mvnw initialize -Pjavadoc -B -V + check-dotnet: name: Сheck .NET code runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup .NET SDK - uses: actions/setup-dotnet@v3 + uses: actions/setup-dotnet@v4 with: dotnet-version: '6.0.x' @@ -96,12 +100,12 @@ jobs: - { python: "3.8", toxenv: "py38" } - { python: "3.8", toxenv: "codestyle" } steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.cfg.python}} diff --git a/.github/workflows/sonar-branch.yml b/.github/workflows/sonar-branch.yml index f7a4c98914f68..90463bb1369c3 100644 --- a/.github/workflows/sonar-branch.yml +++ b/.github/workflows/sonar-branch.yml @@ -23,34 +23,40 @@ on: - master - 'ignite-[0-9].[0-9]+.[0-9]+*' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: sonarcloud: name: Sonar Analysis if: github.repository == 'apache/ignite' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false + # "fetch-depth: 0" is needed for Sonar's new code detection, blame information and issue backdating + # see more details at https://community.sonarsource.com/t/git-fetch-depth-implications/75260 - - name: Setup java - uses: actions/setup-java@v3 + - name: Setup JDK11 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 11 - cache: 'maven' - name: Cache SonarCloud packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Cache Maven packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: - path: ~/.m2 + path: ~/.m2/repository key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} restore-keys: ${{ runner.os }}-m2 @@ -59,7 +65,13 @@ jobs: - name: Build with Maven run: | - ./mvnw install -P all-java,lgpl,examples,skip-docs -DskipTests -B -V + ./mvnw install -P all-java,lgpl,examples,skip-docs -DskipTests -B -V + + - name: Setup JDK17 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: 17 - name: Sonar Analyze Upload run: > @@ -67,7 +79,7 @@ jobs: -P all-java,lgpl,examples,skip-docs -Dsonar.branch.name=${{ github.ref_name }} -Dsonar.projectKey=apache_ignite - -Dsonar.login=${SONARCLOUD_TOKEN} + -Dsonar.token=${SONARCLOUD_TOKEN} -B -V env: MAVEN_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" diff --git a/.github/workflows/sonar-pr-from-fork-build.yml b/.github/workflows/sonar-pr-from-fork-build.yml new file mode 100644 index 0000000000000..a1c8fc4bc0b4f --- /dev/null +++ b/.github/workflows/sonar-pr-from-fork-build.yml @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: SonarBuild + +on: pull_request + +permissions: + contents: read + +concurrency: + group: sonar-pr-workflow-${{ github.event.pull_request.head.repo.full_name }}-${{ github.event.pull_request.head.ref }} + cancel-in-progress: true + +jobs: + build: + if: github.repository == 'apache/ignite' + name: Build artifacts for Sonar Analysis + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: "refs/pull/${{ github.event.number }}/merge" + persist-credentials: false + + - name: Set up JDK11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Cache local Maven repository + uses: actions/cache@v4 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-m2 + + - name: Install Libnuma + run: sudo apt-get update && sudo apt-get install libnuma-dev + + - name: Build with Maven + run: | + ./mvnw install -P all-java,lgpl,examples,skip-docs -DskipTests -B -V + + - name: Prepare compiled classes artifact + shell: bash + run: find -iname "*target" -type d -exec tar -rf target.tar {} \+ + + - name: Upload compiled classes artifact + uses: actions/upload-artifact@v4 + id: target-artifact-upload-step + with: + name: target-artifact + path: | + target.tar + if-no-files-found: error + retention-days: 1 + + - name: Prepare pull request artifact + shell: bash + run: | + echo ${{ github.event.pull_request.number }} >> pr-event.txt + echo ${{ github.event.pull_request.head.ref }} >> pr-event.txt + echo ${{ github.event.pull_request.base.ref }} >> pr-event.txt + echo ${{ github.event.pull_request.head.sha }} >> pr-event.txt + echo ${{ steps.target-artifact-upload-step.outputs.artifact-id }} >> pr-event.txt + + - name: Upload pull request event artifact + uses: actions/upload-artifact@v4 + with: + name: pr-event-artifact + path: | + pr-event.txt + if-no-files-found: error + retention-days: 1 diff --git a/.github/workflows/sonar-pr-from-fork-scan.yml b/.github/workflows/sonar-pr-from-fork-scan.yml new file mode 100644 index 0000000000000..f32fd998037cc --- /dev/null +++ b/.github/workflows/sonar-pr-from-fork-scan.yml @@ -0,0 +1,175 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Sonar Quality Pull Request Analysis + +on: + workflow_run: + workflows: [SonarBuild] + types: [completed] + +concurrency: + group: sonar-pr-workflow-${{ github.event.workflow_run.head_repository.full_name }}-${{ github.event.workflow_run.head_branch }} + cancel-in-progress: true + +jobs: + sonarcloud: + if: ${{ github.event.workflow_run.conclusion == 'success' && github.repository == 'apache/ignite' }} + name: Sonar Analysis + runs-on: ubuntu-latest + permissions: + contents: read + actions: write + checks: write + steps: + - name: Download pull request event artifact + uses: actions/download-artifact@v4 + with: + name: pr-event-artifact + run-id: ${{ github.event.workflow_run.id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Read pull request event + shell: bash + run: | + echo "pr_number=$(sed '1q;d' pr-event.txt)" >> "$GITHUB_ENV" + echo "pr_head_ref=$(sed '2q;d' pr-event.txt)" >> "$GITHUB_ENV" + echo "pr_base_ref=$(sed '3q;d' pr-event.txt)" >> "$GITHUB_ENV" + echo "pr_head_sha=$(sed '4q;d' pr-event.txt)" >> "$GITHUB_ENV" + echo "target_artifact_id=$(sed '5q;d' pr-event.txt)" >> "$GITHUB_ENV" + + - name: Create new PR check + uses: actions/github-script@v7 + id: check + with: + script: | + const jobs_response = await github.rest.actions.listJobsForWorkflowRunAttempt({ + ...context.repo, + run_id: context.runId, + attempt_number: process.env.GITHUB_RUN_ATTEMPT, + }); + + const job_url = jobs_response.data.jobs[0].html_url; + + const check_response = await github.rest.checks.create({ + ...context.repo, + name: 'Sonar Quality Pull Request Analysis', + head_sha: process.env.pr_head_sha, + status: 'in_progress', + output: { + title: 'Sonar Quality Pull Request Analysis', + summary: '[Details ...](' + job_url + ')' + } + }); + + return check_response.data.id; + result-encoding: string + + - name: Checkout PR head branch + uses: actions/checkout@v4 + with: + repository: ${{ github.event.workflow_run.head_repository.full_name }} + ref: ${{ github.event.workflow_run.head_branch }} + fetch-depth: 0 + # "fetch-depth: 0" is needed for Sonar's new code detection, blame information and issue backdating + # see more details at https://community.sonarsource.com/t/git-fetch-depth-implications/75260 + + - name: Checkout PR base branch + run: | + git remote add upstream ${{ github.event.repository.clone_url }} + git fetch upstream + git checkout -B $pr_base_ref upstream/$pr_base_ref + git checkout ${{ github.event.workflow_run.head_branch }} + git clean -ffdx && git reset --hard HEAD + + - name: Download compiled classes artifact + uses: actions/download-artifact@v4 + with: + name: target-artifact + run-id: ${{ github.event.workflow_run.id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Delete compiled classes artifact + if: always() + uses: actions/github-script@v7 + with: + script: | + await github.rest.actions.deleteArtifact({ + ...context.repo, + artifact_id: process.env.target_artifact_id + }); + + - name: Extract compiled classes artifact + shell: bash + run: tar -xf target.tar + + - name: Set up JDK17 + uses: actions/setup-java@v4 + with: + java-version: '17' + distribution: 'temurin' + + - name: Cache SonarCloud packages + uses: actions/cache@v4 + with: + path: ~/.sonar/cache + key: ${{ runner.os }}-sonar + + - name: Cache local Maven repository + uses: actions/cache@v4 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-m2 + + - name: Sonar Analyze Upload + shell: bash + run: > + ./mvnw org.sonarsource.scanner.maven:sonar-maven-plugin:sonar + -P all-java,lgpl,examples,skip-docs + -Dsonar.scm.revision=${{ github.event.workflow_run.head_sha }} + -Dsonar.pullrequest.branch=${{ env.pr_head_ref }} + -Dsonar.pullrequest.base=${{ env.pr_base_ref }} + -Dsonar.pullrequest.key=${{ env.pr_number }} + -Dsonar.pullrequest.github.repository=apache/ignite + -Dsonar.pullrequest.provider=GitHub + -Dsonar.pullrequest.github.summary_comment=true + -Dsonar.projectKey=apache_ignite + -Dsonar.token=${{ secrets.SONARCLOUD_TOKEN }} + -B -V + env: + MAVEN_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" + SONAR_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" + JAVA_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" + + - name: Update status of PR check + uses: actions/github-script@v7 + if: always() + env: + CHECK_ID: ${{ steps.check.outputs.result }} + JOB_STATUS: ${{ job.status }} + with: + script: | + const { CHECK_ID, JOB_STATUS } = process.env; + + await github.rest.checks.update({ + ...context.repo, + check_run_id: CHECK_ID, + status: 'completed', + conclusion: JOB_STATUS + }); diff --git a/.github/workflows/sonar-pr-from-fork.yml b/.github/workflows/sonar-pr-from-fork.yml deleted file mode 100644 index f4309f68ce6a4..0000000000000 --- a/.github/workflows/sonar-pr-from-fork.yml +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -name: Sonar Quality Pull Request Analysis - -# TODO IGNITE-20466 Investigate and fix the issue with running this workflow on PRs from forks. -on: pull_request - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - sonarcloud: - if: github.repository == 'apache/ignite' - name: Sonar Analysis - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: "refs/pull/${{ github.event.number }}/merge" - fetch-depth: 0 - - - name: Set up JDK11 - uses: actions/setup-java@v3 - with: - distribution: 'temurin' - java-version: '11' - cache: 'maven' - - - name: Cache SonarCloud packages - uses: actions/cache@v3 - with: - path: ~/.sonar/cache - key: ${{ runner.os }}-sonar - restore-keys: ${{ runner.os }}-sonar - - - name: Cache local Maven repository - uses: actions/cache@v3 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-m2 - - - name: Install Libnuma - run: sudo apt-get update && sudo apt-get install libnuma-dev - - - name: Build with Maven - run: | - ./mvnw clean install -P all-java,lgpl,examples,skip-docs -DskipTests -B -V - - - name: Sonar Analyze Upload - run: > - ./mvnw org.sonarsource.scanner.maven:sonar-maven-plugin:sonar - -P all-java,lgpl,examples,skip-docs - -Dsonar.pullrequest.branch=${PULLREQUEST_BRANCH} - -Dsonar.pullrequest.base=${PULLREQUEST_BASE} - -Dsonar.pullrequest.key=${PULLREQUEST_KEY} - -Dsonar.pullrequest.github.repository=apache/ignite - -Dsonar.pullrequest.provider=GitHub - -Dsonar.pullrequest.github.summary_comment=true - -Dsonar.projectKey=apache_ignite - -Dsonar.login=${SONARCLOUD_TOKEN} - -B -V - env: - MAVEN_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" - SONAR_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" - JAVA_OPTS: "-XX:+UseG1GC -XX:InitialHeapSize=2g -XX:MaxHeapSize=6g -XX:+UseStringDeduplication" - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONARCLOUD_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} - PULLREQUEST_BRANCH: ${{ github.head_ref }} - PULLREQUEST_BASE: ${{ github.base_ref }} - PULLREQUEST_KEY: ${{ github.event.pull_request.number }} diff --git a/docs/_docs/extensions-and-integrations/change-data-capture-extensions.adoc b/docs/_docs/extensions-and-integrations/change-data-capture-extensions.adoc index 5973b34607eed..98d6cfc7f32fd 100644 --- a/docs/_docs/extensions-and-integrations/change-data-capture-extensions.adoc +++ b/docs/_docs/extensions-and-integrations/change-data-capture-extensions.adoc @@ -51,12 +51,13 @@ image:../../assets/images/integrations/CDC-ignite2igniteClient.svg[] === Metrics +[cols="25%,75%",opts="header"] |=== |Name |Description | `EventsCount` | Count of messages applied to destination cluster. -| `LastEventTime` | Timestamp of last applied event. -| `TypesCount` | Count of received binary types events. -| `MappingsCount` | Count of received mappings events. +| `LastEventTime` | Timestamp of last applied event to destination cluster. +| `TypesCount` | Count of binary types events applied to destination cluster. +| `MappingsCount` | Count of mappings events applied to destination cluster |=== == Ignite to Ignite CDC streamer @@ -80,12 +81,13 @@ image:../../assets/images/integrations/CDC-ignite2ignite.svg[] === Metrics +[cols="25%,75%",opts="header"] |=== |Name |Description | `EventsCount` | Count of messages applied to destination cluster. -| `LastEventTime` | Timestamp of last applied event. -| `TypesCount` | Count of received binary types events. -| `MappingsCount` | Count of received mappings events. +| `LastEventTime` | Timestamp of last applied event to destination cluster. +| `TypesCount` | Count of binary types events applied to destination cluster. +| `MappingsCount` | Count of mappings events applied to destination cluster |=== == CDC replication using Kafka @@ -118,11 +120,15 @@ image:../../assets/images/integrations/CDC-ignite2kafka.svg[] === IgniteToKafkaCdcStreamer Metrics +[cols="30%,70%",opts="header"] |=== |Name |Description -| `EventsCount` | Count of messages applied to destination cluster. -| `LastEventTime` | Timestamp of last applied event. -| `BytesSent` | Number of bytes send to Kafka. +| `EventsCount` | Count of messages applied to Kafka. +| `LastEventTime` | Timestamp of last applied event to Kafka. +| `TypesCount` | Count of binary types events applied to Kafka. +| `MappingsCount` | Count of mappings events applied to Kafka. +| `BytesSent` | Count of bytes sent to Kafka. +| `MarkersCount` | Count of metadata markers sent to Kafka. |=== === `kafka-to-ignite.sh` application @@ -164,7 +170,7 @@ Kafka to Ignite configuration file should contain the following beans that will . `java.util.Properties` bean with the name `kafkaProperties`: Single Kafka consumer configuration. . `org.apache.ignite.cdc.kafka.KafkaToIgniteCdcStreamerConfiguration` bean: Options specific to `kafka-to-ignite.sh` application. -[cols="20%,45%,35%",opts="header"] +[cols="25%,45%,30%",opts="header"] |=== |Name |Description | Default value | `caches` | Set of cache names to replicate. | null @@ -176,6 +182,19 @@ Kafka to Ignite configuration file should contain the following beans that will | `kafkaRequestTimeout` | Kafka request timeout in milliseconds. | `3000` | `maxBatchSize` | Maximum number of events to be sent to destination cluster in a single batch. | 1024 | `threadCount` | Count of threads to proceed consumers. Each thread poll records from dedicated partitions in round-robin manner. | 16 +|`metricRegistryName`| Name for metric registry. `org.apache.metricRegistryName.cdc.applier` | cdc-kafka-to-ignite +|=== + +=== Metrics + +[cols="35%,65%",opts="header"] +|=== +|Name |Description +| `EventsReceivedCount` | Count of events received from Kafka. +| `LastEventReceivedTime` | Timestamp of last received event from Kafka. +| `EventsSentCount` | Count of events sent to destination cluster. +| `LastBatchSentTime` | Timestamp of last sent batch to the destination cluster. +| `MarkersCount` | Count of metadata markers received from Kafka. |=== ==== Logging diff --git a/modules/calcite/pom.xml b/modules/calcite/pom.xml index 9aef74c606a3d..57ee3a8013e1b 100644 --- a/modules/calcite/pom.xml +++ b/modules/calcite/pom.xml @@ -42,7 +42,7 @@ 2.8.2 3.1.8 2.4 - 2.7.0 + 2.9.0 0.10.2 3.6.1 diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/RootQuery.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/RootQuery.java index 31fafc96eee6c..e67e7f98435b6 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/RootQuery.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/RootQuery.java @@ -29,6 +29,7 @@ import java.util.stream.Collectors; import org.apache.calcite.plan.Context; import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.Frameworks; import org.apache.calcite.util.CancelFlag; import org.apache.ignite.IgniteCheckedException; @@ -139,10 +140,15 @@ public RootQuery( Context parent = Commons.convert(qryCtx); + FrameworkConfig frameworkCfg = qryCtx != null ? qryCtx.unwrap(FrameworkConfig.class) : null; + + if (frameworkCfg == null) + frameworkCfg = FRAMEWORK_CONFIG; + ctx = BaseQueryContext.builder() .parentContext(parent) .frameworkConfig( - Frameworks.newConfigBuilder(FRAMEWORK_CONFIG) + Frameworks.newConfigBuilder(frameworkCfg) .defaultSchema(schema) .build() ) diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/ConverterUtils.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/ConverterUtils.java index 0beb119701174..90c0c2af06bec 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/ConverterUtils.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/ConverterUtils.java @@ -34,8 +34,10 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.runtime.SqlFunctions; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.BuiltInMethod; import org.apache.calcite.util.Util; +import org.apache.ignite.internal.processors.query.calcite.util.Commons; /** */ public class ConverterUtils { @@ -166,6 +168,21 @@ static List internalTypes(List operandList) { return Util.transform(operandList, node -> toInternal(node.getType())); } + /** + * Convert {@code operand} to {@code targetType}. + * + * @param operand The expression to convert + * @param targetType Target type + * @return A new expression with java type corresponding to {@code targetType} + * or original expression if there is no need to convert. + */ + public static Expression convert(Expression operand, RelDataType targetType) { + if (SqlTypeUtil.isDecimal(targetType)) + return convertToDecimal(operand, targetType); + else + return convert(operand, Commons.typeFactory().getJavaClass(targetType)); + } + /** * Convert {@code operand} to target type {@code toType}. * diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexImpTable.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexImpTable.java index e52342a432027..433fb9d29637a 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexImpTable.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexImpTable.java @@ -1963,7 +1963,7 @@ private ParameterExpression genValueStatement( final Expression convertedCallVal = noConvert ? callVal - : ConverterUtils.convert(callVal, returnType); + : ConverterUtils.convert(callVal, call.getType()); final Expression valExpression = Expressions.condition(condition, diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexToLixTranslator.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexToLixTranslator.java index 43ed1dbd6380c..e697948cc0639 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexToLixTranslator.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/exec/exp/RexToLixTranslator.java @@ -62,7 +62,6 @@ import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.sql.validate.SqlConformance; import org.apache.calcite.util.BuiltInMethod; @@ -552,11 +551,9 @@ Expression translateCast( } break; } - if (targetType.getSqlTypeName() == SqlTypeName.DECIMAL) - convert = ConverterUtils.convertToDecimal(operand, targetType); if (convert == null) - convert = ConverterUtils.convert(operand, typeFactory.getJavaClass(targetType)); + convert = ConverterUtils.convert(operand, targetType); // Going from anything to CHAR(n) or VARCHAR(n), make sure value is no // longer than n. @@ -1073,7 +1070,7 @@ private Result implementCaseWhen(RexCall call) { list.newName("case_when_value")); list.add(Expressions.declare(0, valVariable, null)); final List operandList = call.getOperands(); - implementRecursively(this, operandList, valVariable, 0); + implementRecursively(this, operandList, valVariable, call.getType(), 0); final Expression isNullExpression = checkNull(valVariable); final ParameterExpression isNullVariable = Expressions.parameter( @@ -1108,8 +1105,13 @@ private Result implementCaseWhen(RexCall call) { * } * */ - private void implementRecursively(final RexToLixTranslator currentTranslator, - final List operandList, final ParameterExpression valueVariable, int pos) { + private void implementRecursively( + final RexToLixTranslator currentTranslator, + final List operandList, + final ParameterExpression valueVariable, + final RelDataType valueType, + int pos + ) { final BlockBuilder curBlockBuilder = currentTranslator.getBlockBuilder(); final List storageTypes = ConverterUtils.internalTypes(operandList); // [ELSE] clause @@ -1119,7 +1121,7 @@ private void implementRecursively(final RexToLixTranslator currentTranslator, curBlockBuilder.add( Expressions.statement( Expressions.assign(valueVariable, - ConverterUtils.convert(res, valueVariable.getType())))); + ConverterUtils.convert(res, valueType)))); return; } // Condition code: !a_isNull && a_value @@ -1141,7 +1143,7 @@ private void implementRecursively(final RexToLixTranslator currentTranslator, ifTrueBlockBuilder.add( Expressions.statement( Expressions.assign(valueVariable, - ConverterUtils.convert(ifTrueRes, valueVariable.getType())))); + ConverterUtils.convert(ifTrueRes, valueType)))); final BlockStatement ifTrue = ifTrueBlockBuilder.toBlock(); // There is no [ELSE] clause if (pos + 1 == operandList.size() - 1) { @@ -1154,7 +1156,7 @@ private void implementRecursively(final RexToLixTranslator currentTranslator, new BlockBuilder(true, curBlockBuilder); final RexToLixTranslator ifFalseTranslator = currentTranslator.setBlock(ifFalseBlockBuilder); - implementRecursively(ifFalseTranslator, operandList, valueVariable, pos + 2); + implementRecursively(ifFalseTranslator, operandList, valueVariable, valueType, pos + 2); final BlockStatement ifFalse = ifFalseBlockBuilder.toBlock(); curBlockBuilder.add( Expressions.ifThenElse(tester, ifTrue, ifFalse)); diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/schema/CacheIndexImpl.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/schema/CacheIndexImpl.java index 079755aec5e17..06a3e55869bb2 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/schema/CacheIndexImpl.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/schema/CacheIndexImpl.java @@ -258,8 +258,7 @@ else if (checkExpired) if (idxKeys.size() < requiredColumns.cardinality() || !ImmutableBitSet.of(idxKeys).contains(requiredColumns)) return false; - List keyDefs = new ArrayList<>(idx.unwrap(InlineIndex.class).indexDefinition() - .indexKeyDefinitions().values()); + List keyDefs = new ArrayList<>(idx.indexDefinition().indexKeyDefinitions().values()); for (InlineIndexKeyType keyType : InlineIndexKeyTypeRegistry.types(keyDefs, new IndexKeyTypeSettings())) { // Skip variable length keys and java objects (see comments about these limitations in IndexScan class). diff --git a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/type/IgniteTypeSystem.java b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/type/IgniteTypeSystem.java index dcde15bca89b3..a5a9594e20b82 100644 --- a/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/type/IgniteTypeSystem.java +++ b/modules/calcite/src/main/java/org/apache/ignite/internal/processors/query/calcite/type/IgniteTypeSystem.java @@ -43,6 +43,16 @@ public class IgniteTypeSystem extends RelDataTypeSystemImpl implements Serializa return Short.MAX_VALUE; } + /** {@inheritDoc} */ + @Override public int getDefaultPrecision(SqlTypeName typeName) { + // Timestamps internally stored as millis, precision more than 3 is redundant. At the same time, + // default Calcite precision 0 causes truncation when converting to TIMESTAMP without specifying precision. + if (typeName == SqlTypeName.TIMESTAMP || typeName == SqlTypeName.TIME_WITH_LOCAL_TIME_ZONE) + return 3; + + return super.getDefaultPrecision(typeName); + } + /** {@inheritDoc} */ @Override public RelDataType deriveSumType(RelDataTypeFactory typeFactory, RelDataType argumentType) { RelDataType sumType; diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/QueryChecker.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/QueryChecker.java index 19f12244783f4..2d8f936919d18 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/QueryChecker.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/QueryChecker.java @@ -28,12 +28,14 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.apache.calcite.tools.FrameworkConfig; import org.apache.ignite.Ignite; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicCache; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.query.QueryContext; import org.apache.ignite.internal.processors.query.QueryEngine; import org.apache.ignite.internal.processors.query.schema.management.SchemaManager; import org.apache.ignite.internal.util.typedef.F; @@ -296,6 +298,9 @@ public static Matcher containsAnyScan(final String schema, final String /** */ private String exactPlan; + /** */ + private FrameworkConfig frameworkCfg; + /** */ public QueryChecker(String qry) { this.qry = qry; @@ -322,6 +327,13 @@ public QueryChecker withParams(Object... params) { return this; } + /** */ + public QueryChecker withFrameworkConfig(FrameworkConfig frameworkCfg) { + this.frameworkCfg = frameworkCfg; + + return this; + } + /** */ public QueryChecker returns(Object... res) { if (expectedResult == null) @@ -370,8 +382,10 @@ public void check() { // Check plan. QueryEngine engine = getEngine(); + QueryContext ctx = frameworkCfg != null ? QueryContext.of(frameworkCfg) : null; + List>> explainCursors = - engine.query(null, "PUBLIC", "EXPLAIN PLAN FOR " + qry, params); + engine.query(ctx, "PUBLIC", "EXPLAIN PLAN FOR " + qry, params); FieldsQueryCursor> explainCursor = explainCursors.get(0); List> explainRes = explainCursor.getAll(); @@ -387,7 +401,7 @@ public void check() { // Check result. List>> cursors = - engine.query(null, "PUBLIC", qry, params); + engine.query(ctx, "PUBLIC", qry, params); FieldsQueryCursor> cur = cursors.get(0); diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DataTypesTest.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DataTypesTest.java index b6dd0aa7b82af..23474698fe533 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DataTypesTest.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DataTypesTest.java @@ -24,6 +24,8 @@ import java.util.stream.Collectors; import com.google.common.collect.ImmutableSet; import org.apache.calcite.runtime.CalciteException; +import org.apache.calcite.tools.FrameworkConfig; +import org.apache.calcite.tools.Frameworks; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; @@ -31,6 +33,8 @@ import org.apache.ignite.internal.util.typedef.F; import org.junit.Test; +import static org.apache.ignite.internal.processors.query.calcite.CalciteQueryProcessor.FRAMEWORK_CONFIG; + /** * Test SQL data types. */ @@ -467,6 +471,25 @@ public void testNumericConversion() { .check(); } + /** */ + @Test + public void testFunctionArgsToNumericImplicitConversion() { + assertQuery("select decode(?, 0, 0, 1, 1.0)").withParams(0).returns(new BigDecimal("0.0")).check(); + assertQuery("select decode(?, 0, 0, 1, 1.0)").withParams(1).returns(new BigDecimal("1.0")).check(); + assertQuery("select decode(?, 0, 0, 1, 1.000)").withParams(0).returns(new BigDecimal("0.000")).check(); + assertQuery("select decode(?, 0, 0, 1, 1.000)").withParams(1).returns(new BigDecimal("1.000")).check(); + assertQuery("select decode(?, 0, 0.0, 1, 1.000)").withParams(0).returns(new BigDecimal("0.000")).check(); + assertQuery("select decode(?, 0, 0.000, 1, 1.0)").withParams(1).returns(new BigDecimal("1.000")).check(); + + // With callRewrite==true function COALESCE is rewritten to CASE and CoalesceImplementor can't be checked. + FrameworkConfig frameworkCfg = Frameworks.newConfigBuilder(FRAMEWORK_CONFIG) + .sqlValidatorConfig(FRAMEWORK_CONFIG.getSqlValidatorConfig().withCallRewrite(false)) + .build(); + + assertQuery("select coalesce(?, 1.000)").withParams(0).withFrameworkConfig(frameworkCfg) + .returns(new BigDecimal("0.000")).check(); + } + /** */ @Test public void testArithmeticOverflow() { diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/DateTimeTest.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DateTimeTest.java similarity index 63% rename from modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/DateTimeTest.java rename to modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DateTimeTest.java index bce76202e4085..1335e33aa6e20 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/DateTimeTest.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DateTimeTest.java @@ -15,39 +15,29 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.query.calcite; +package org.apache.ignite.internal.processors.query.calcite.integration; import java.sql.Time; import java.sql.Timestamp; import java.text.SimpleDateFormat; import java.util.Date; import java.util.TimeZone; -import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.processors.query.QueryEngine; -import org.apache.ignite.internal.processors.query.calcite.util.Commons; -import org.apache.ignite.testframework.junits.WithSystemProperty; -import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; import static java.util.Collections.singletonList; /** */ -@WithSystemProperty(key = "calcite.debug", value = "true") -public class DateTimeTest extends GridCommonAbstractTest { +public class DateTimeTest extends AbstractBasicIntegrationTest { /** */ private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); - /** */ - private static QueryEngine queryEngine; - /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { - Ignite grid = startGridsMultiThreaded(1); + super.beforeTestsStarted(); QueryEntity qryEnt = new QueryEntity(); qryEnt.setKeyFieldName("ID"); @@ -68,7 +58,7 @@ public class DateTimeTest extends GridCommonAbstractTest { .setQueryEntities(singletonList(qryEnt)) .setSqlSchema("PUBLIC"); - IgniteCache dateTimeCache = grid.createCache(cfg); + IgniteCache dateTimeCache = client.createCache(cfg); dateTimeCache.put(1, new DateTimeEntry(1, javaDate("2020-10-01 12:00:00.000"), sqlDate("2020-10-01"), sqlTime("12:00:00"), sqlTimestamp("2020-10-01 12:00:00.000"))); @@ -78,16 +68,17 @@ public class DateTimeTest extends GridCommonAbstractTest { sqlDate("2020-10-20"), sqlTime("13:15:00"), sqlTimestamp("2020-10-20 13:15:00.000"))); dateTimeCache.put(4, new DateTimeEntry(4, javaDate("2020-01-01 22:40:00.000"), sqlDate("2020-01-01"), sqlTime("22:40:00"), sqlTimestamp("2020-01-01 22:40:00.000"))); + } - queryEngine = Commons.lookupComponent(((IgniteEx)grid).context(), QueryEngine.class); - - awaitPartitionMapExchange(); + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + // Don't clean up caches after test. } /** */ @Test public void testQuery1() throws Exception { - checkQuery("SELECT SQLDATE FROM datetimetable where SQLTIME = '12:00:00'") + assertQuery("SELECT SQLDATE FROM datetimetable where SQLTIME = '12:00:00'") .returns(sqlDate("2020-10-01")) .check(); } @@ -95,7 +86,7 @@ public void testQuery1() throws Exception { /** */ @Test public void testQuery2() throws Exception { - checkQuery("SELECT SQLDATE FROM datetimetable where JAVADATE = ?") + assertQuery("SELECT SQLDATE FROM datetimetable where JAVADATE = ?") .withParams(javaDate("2020-12-01 00:10:20.000")) .returns(sqlDate("2020-12-01")) .check(); @@ -104,7 +95,7 @@ public void testQuery2() throws Exception { /** */ @Test public void testQuery3() throws Exception { - checkQuery("SELECT SQLDATE FROM datetimetable where JAVADATE = ?") + assertQuery("SELECT SQLDATE FROM datetimetable where JAVADATE = ?") .withParams(sqlTimestamp("2020-12-01 00:10:20.000")) .returns(sqlDate("2020-12-01")) .check(); @@ -113,7 +104,7 @@ public void testQuery3() throws Exception { /** */ @Test public void testQuery4() throws Exception { - checkQuery("SELECT MAX(SQLDATE) FROM datetimetable") + assertQuery("SELECT MAX(SQLDATE) FROM datetimetable") .returns(sqlDate("2020-12-01")) .check(); } @@ -121,7 +112,7 @@ public void testQuery4() throws Exception { /** */ @Test public void testQuery5() throws Exception { - checkQuery("SELECT MIN(SQLDATE) FROM datetimetable") + assertQuery("SELECT MIN(SQLDATE) FROM datetimetable") .returns(sqlDate("2020-01-01")) .check(); } @@ -129,7 +120,7 @@ public void testQuery5() throws Exception { /** */ @Test public void testQuery6() throws Exception { - checkQuery("SELECT JAVADATE FROM datetimetable WHERE SQLTIME = '13:15:00'") + assertQuery("SELECT JAVADATE FROM datetimetable WHERE SQLTIME = '13:15:00'") .returns(javaDate("2020-10-20 13:15:00.000")) .check(); } @@ -137,7 +128,7 @@ public void testQuery6() throws Exception { /** */ @Test public void testQuery7() throws Exception { - checkQuery("SELECT t1.JAVADATE, t2.JAVADATE FROM datetimetable t1 " + + assertQuery("SELECT t1.JAVADATE, t2.JAVADATE FROM datetimetable t1 " + "INNER JOIN " + "(SELECT JAVADATE, CAST(SQLTIMESTAMP AS TIME) AS CASTED_TIME FROM datetimetable) t2 " + "ON t1.SQLTIME = t2.CASTED_TIME " + @@ -157,52 +148,52 @@ public void testDstShift() throws Exception { TimeZone.setDefault(TimeZone.getTimeZone("Europe/Moscow")); // Time zone change (EET->MSK) 1992-01-19 02:00:00 -> 1992-01-19 03:00:00 - checkQuery("select date '1992-01-19'").returns(sqlDate("1992-01-19")).check(); - checkQuery("select date '1992-01-18' + interval (1) days").returns(sqlDate("1992-01-19")).check(); - checkQuery("select date '1992-01-18' + interval (24) hours").returns(sqlDate("1992-01-19")).check(); - checkQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (25) hours") + assertQuery("select date '1992-01-19'").returns(sqlDate("1992-01-19")).check(); + assertQuery("select date '1992-01-18' + interval (1) days").returns(sqlDate("1992-01-19")).check(); + assertQuery("select date '1992-01-18' + interval (24) hours").returns(sqlDate("1992-01-19")).check(); + assertQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (25) hours") .returns(sqlTimestamp("1992-01-19 03:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (23) hours") + assertQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (23) hours") .returns(sqlTimestamp("1992-01-19 01:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (24) hours") + assertQuery("SELECT timestamp '1992-01-18 02:30:00' + interval (24) hours") .returns(sqlTimestamp("1992-01-19 02:30:00.000")).check(); // DST started 1992-03-29 02:00:00 -> 1992-03-29 03:00:00 - checkQuery("select date '1992-03-29'").returns(sqlDate("1992-03-29")).check(); - checkQuery("select date '1992-03-28' + interval (1) days").returns(sqlDate("1992-03-29")).check(); - checkQuery("select date '1992-03-28' + interval (24) hours").returns(sqlDate("1992-03-29")).check(); - checkQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (25) hours") + assertQuery("select date '1992-03-29'").returns(sqlDate("1992-03-29")).check(); + assertQuery("select date '1992-03-28' + interval (1) days").returns(sqlDate("1992-03-29")).check(); + assertQuery("select date '1992-03-28' + interval (24) hours").returns(sqlDate("1992-03-29")).check(); + assertQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (25) hours") .returns(sqlTimestamp("1992-03-29 03:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (23) hours") + assertQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (23) hours") .returns(sqlTimestamp("1992-03-29 01:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (24) hours") + assertQuery("SELECT timestamp '1992-03-28 02:30:00' + interval (24) hours") .returns(sqlTimestamp("1992-03-29 02:30:00.000")).check(); // DST ended 1992-09-27 03:00:00 -> 1992-09-27 02:00:00 - checkQuery("select date '1992-09-27'").returns(sqlDate("1992-09-27")).check(); - checkQuery("select date '1992-09-26' + interval (1) days").returns(sqlDate("1992-09-27")).check(); - checkQuery("select date '1992-09-26' + interval (24) hours").returns(sqlDate("1992-09-27")).check(); - checkQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (25) hours") + assertQuery("select date '1992-09-27'").returns(sqlDate("1992-09-27")).check(); + assertQuery("select date '1992-09-26' + interval (1) days").returns(sqlDate("1992-09-27")).check(); + assertQuery("select date '1992-09-26' + interval (24) hours").returns(sqlDate("1992-09-27")).check(); + assertQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (25) hours") .returns(sqlTimestamp("1992-09-27 03:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (23) hours") + assertQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (23) hours") .returns(sqlTimestamp("1992-09-27 01:30:00.000")).check(); - checkQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (24) hours") + assertQuery("SELECT timestamp '1992-09-26 02:30:00' + interval (24) hours") .returns(sqlTimestamp("1992-09-27 02:30:00.000")).check(); TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles")); // DST ended 2021-11-07 02:00:00 -> 2021-11-07 01:00:00 - checkQuery("select date '2021-11-07'").returns(sqlDate("2021-11-07")).check(); - checkQuery("select date '2021-11-06' + interval (1) days").returns(sqlDate("2021-11-07")).check(); - checkQuery("select date '2021-11-06' + interval (24) hours").returns(sqlDate("2021-11-07")).check(); - checkQuery("SELECT timestamp '2021-11-06 01:30:00' + interval (25) hours") + assertQuery("select date '2021-11-07'").returns(sqlDate("2021-11-07")).check(); + assertQuery("select date '2021-11-06' + interval (1) days").returns(sqlDate("2021-11-07")).check(); + assertQuery("select date '2021-11-06' + interval (24) hours").returns(sqlDate("2021-11-07")).check(); + assertQuery("SELECT timestamp '2021-11-06 01:30:00' + interval (25) hours") .returns(sqlTimestamp("2021-11-07 02:30:00.000")).check(); // Check string representation here, since after timestamp calculation we have '2021-11-07T01:30:00.000-0800' // but Timestamp.valueOf method converts '2021-11-07 01:30:00' in 'America/Los_Angeles' time zone to // '2021-11-07T01:30:00.000-0700' (we pass through '2021-11-07 01:30:00' twice after DST ended). - checkQuery("SELECT (timestamp '2021-11-06 02:30:00' + interval (23) hours)::varchar") + assertQuery("SELECT (timestamp '2021-11-06 02:30:00' + interval (23) hours)::varchar") .returns("2021-11-07 01:30:00").check(); - checkQuery("SELECT (timestamp '2021-11-06 01:30:00' + interval (24) hours)::varchar") + assertQuery("SELECT (timestamp '2021-11-06 01:30:00' + interval (24) hours)::varchar") .returns("2021-11-07 01:30:00").check(); } finally { @@ -210,6 +201,31 @@ public void testDstShift() throws Exception { } } + /** */ + @Test + public void testDateTimeCast() throws Exception { + assertQuery("SELECT CAST('2021-01-01 01:02:03.456' AS TIMESTAMP)") + .returns(sqlTimestamp("2021-01-01 01:02:03.456")).check(); + + assertQuery("SELECT CAST('2021-01-01 01:02:03.0' AS TIMESTAMP)") + .returns(sqlTimestamp("2021-01-01 01:02:03")).check(); + + assertQuery("SELECT CAST('2021-01-01 01:02:03' AS TIMESTAMP)") + .returns(sqlTimestamp("2021-01-01 01:02:03")).check(); + + assertQuery("SELECT CAST('2021-01-01 01:02:03.456' AS TIMESTAMP(0))") + .returns(sqlTimestamp("2021-01-01 01:02:03")).check(); + + assertQuery("SELECT CAST('2021-01-01 01:02:03.456' AS TIMESTAMP(2))") + .returns(sqlTimestamp("2021-01-01 01:02:03.45")).check(); + + assertQuery("SELECT CAST('2021-01-01' AS DATE)") + .returns(sqlDate("2021-01-01")).check(); + + assertQuery("SELECT CAST('01:02:03' AS TIME)") + .returns(sqlTime("01:02:03")).check(); + } + /** */ public static class DateTimeEntry { /** */ @@ -237,15 +253,6 @@ public DateTimeEntry(long id, Date javaDate, java.sql.Date sqlDate, Time sqlTime } } - /** */ - private QueryChecker checkQuery(String qry) { - return new QueryChecker(qry) { - @Override protected QueryEngine getEngine() { - return queryEngine; - } - }; - } - /** */ private Date javaDate(String str) throws Exception { return DATE_FORMAT.parse(str); diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DynamicParametersIntegrationTest.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DynamicParametersIntegrationTest.java index ae35a0d51f034..89fba933b5bdc 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DynamicParametersIntegrationTest.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/DynamicParametersIntegrationTest.java @@ -52,7 +52,7 @@ public void testMetadataTypesForDynamicParameters() { ); List types = F.asList("VARCHAR", "DECIMAL(32767, 0)", "INTEGER", "BIGINT", "REAL", "DOUBLE", - "UUID", "INTERVAL DAY TO SECOND", "DATE", "TIMESTAMP(0)", "TIME(0)", "INTERVAL YEAR TO MONTH"); + "UUID", "INTERVAL DAY TO SECOND", "DATE", "TIMESTAMP(3)", "TIME(0)", "INTERVAL YEAR TO MONTH"); for (int i = 0; i < values.size(); i++) { assertQuery("SELECT typeof(?)").withParams(values.get(i)).returns(types.get(i)).check(); diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/QueryMetadataIntegrationTest.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/QueryMetadataIntegrationTest.java index c3e5ebbc6c819..36ee4dc647a06 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/QueryMetadataIntegrationTest.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/QueryMetadataIntegrationTest.java @@ -52,16 +52,16 @@ public void testJoin() throws Exception { .add("PUBLIC", "TBL1", BigDecimal.class, "ID", 10, 2, true) .add("PUBLIC", "TBL1", String.class, "VAL", true) .add("PUBLIC", "TBL1", Long.class, "VAL2", 19, 0, true) - .add("PUBLIC", "TBL1", java.sql.Timestamp.class, "TS", 0, SCALE_NOT_SPECIFIED, true) + .add("PUBLIC", "TBL1", java.sql.Timestamp.class, "TS", 3, SCALE_NOT_SPECIFIED, true) .add("PUBLIC", "TBL2", BigDecimal.class, "ID", 10, 2, false) .add("PUBLIC", "TBL2", String.class, "VAL", true) .add("PUBLIC", "TBL2", Long.class, "VAL2", 19, 0, true) - .add("PUBLIC", "TBL2", java.sql.Timestamp.class, "TS", 0, SCALE_NOT_SPECIFIED, true), + .add("PUBLIC", "TBL2", java.sql.Timestamp.class, "TS", 3, SCALE_NOT_SPECIFIED, true), builder -> builder .add(BigDecimal.class, 10, 2) .add(BigDecimal.class, 10, 2) .add(String.class) - .add(java.sql.Timestamp.class, 0, SCALE_NOT_SPECIFIED) + .add(java.sql.Timestamp.class, 3, SCALE_NOT_SPECIFIED) ).check(); } diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/TableDdlIntegrationTest.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/TableDdlIntegrationTest.java index ccf7b0b5ae06c..98ce8275f2111 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/TableDdlIntegrationTest.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/integration/TableDdlIntegrationTest.java @@ -117,7 +117,7 @@ public void createTableDifferentDataTypes() { "'test', " + "date '2021-01-01', " + "time '12:34:56', " + - "timestamp '2021-01-01 12:34:56', " + + "timestamp '2021-01-01 12:34:56.789', " + "1, " + "9876543210, " + "3, " + @@ -138,7 +138,7 @@ public void createTableDifferentDataTypes() { assertEquals("test", row.get(1)); assertEquals(Date.valueOf("2021-01-01"), row.get(2)); assertEquals(Time.valueOf("12:34:56"), row.get(3)); - assertEquals(Timestamp.valueOf("2021-01-01 12:34:56"), row.get(4)); + assertEquals(Timestamp.valueOf("2021-01-01 12:34:56.789"), row.get(4)); assertEquals(1, row.get(5)); assertEquals(9876543210L, row.get(6)); assertEquals((short)3, row.get(7)); diff --git a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/planner/TestTable.java b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/planner/TestTable.java index f6632a35284ec..c0220b0802828 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/planner/TestTable.java +++ b/modules/calcite/src/test/java/org/apache/ignite/internal/processors/query/calcite/planner/TestTable.java @@ -49,8 +49,8 @@ import org.apache.ignite.internal.cache.query.index.SortOrder; import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyDefinition; import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyType; +import org.apache.ignite.internal.cache.query.index.sorted.client.ClientIndex; import org.apache.ignite.internal.cache.query.index.sorted.client.ClientIndexDefinition; -import org.apache.ignite.internal.cache.query.index.sorted.client.ClientInlineIndex; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.query.calcite.exec.ExecutionContext; import org.apache.ignite.internal.processors.query.calcite.metadata.ColocationGroup; @@ -246,12 +246,10 @@ public TestTable addIndex(RelCollation collation, String name) { IndexDefinition idxDef = new ClientIndexDefinition( new IndexName(QueryUtils.createTableCacheName(DEFAULT_SCHEMA, this.name), DEFAULT_SCHEMA, this.name, name), - keyDefs, - -1, - -1 + keyDefs ); - indexes.put(name, new CacheIndexImpl(collation, name, new ClientInlineIndex(idxDef, -1), this)); + indexes.put(name, new CacheIndexImpl(collation, name, new ClientIndex(idxDef), this)); return this; } diff --git a/modules/calcite/src/test/java/org/apache/ignite/testsuites/IntegrationTestSuite.java b/modules/calcite/src/test/java/org/apache/ignite/testsuites/IntegrationTestSuite.java index dd23c284ac138..84dcc6fdb6133 100644 --- a/modules/calcite/src/test/java/org/apache/ignite/testsuites/IntegrationTestSuite.java +++ b/modules/calcite/src/test/java/org/apache/ignite/testsuites/IntegrationTestSuite.java @@ -20,7 +20,6 @@ import org.apache.ignite.internal.processors.cache.DdlTransactionCalciteSelfTest; import org.apache.ignite.internal.processors.query.calcite.CalciteQueryProcessorTest; import org.apache.ignite.internal.processors.query.calcite.CancelTest; -import org.apache.ignite.internal.processors.query.calcite.DateTimeTest; import org.apache.ignite.internal.processors.query.calcite.IndexWithSameNameCalciteTest; import org.apache.ignite.internal.processors.query.calcite.SqlFieldsQueryUsageTest; import org.apache.ignite.internal.processors.query.calcite.UnstableTopologyTest; @@ -30,6 +29,7 @@ import org.apache.ignite.internal.processors.query.calcite.integration.CalciteErrorHandlilngIntegrationTest; import org.apache.ignite.internal.processors.query.calcite.integration.CorrelatesIntegrationTest; import org.apache.ignite.internal.processors.query.calcite.integration.DataTypesTest; +import org.apache.ignite.internal.processors.query.calcite.integration.DateTimeTest; import org.apache.ignite.internal.processors.query.calcite.integration.DynamicParametersIntegrationTest; import org.apache.ignite.internal.processors.query.calcite.integration.ExpiredEntriesIntegrationTest; import org.apache.ignite.internal.processors.query.calcite.integration.FunctionsTest; diff --git a/modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test_ignore b/modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test similarity index 69% rename from modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test_ignore rename to modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test index 17711eb7b0509..3e0f454054bc1 100644 --- a/modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test_ignore +++ b/modules/calcite/src/test/sql/types/timestamp/test_timestamp_ms.test @@ -1,12 +1,11 @@ # name: test/sql/types/timestamp/test_timestamp_ms.test # description: Test milliseconds with timestamp # group: [timestamp] -# Ignore https://issues.apache.org/jira/browse/IGNITE-15623 query TT -SELECT CAST('2001-04-20 14:42:11.123' AS TIMESTAMP) a, CAST('2001-04-20 14:42:11.0' AS TIMESTAMP) b; +SELECT CAST('2001-04-20 14:42:11.123' AS TIMESTAMP) a, CAST('2001-04-20 14:42:11.00' AS TIMESTAMP) b; ---- -2001-04-20 14:42:11.123 2001-04-20 14:42:11 +2001-04-20 14:42:11.123 2001-04-20 14:42:11.0 # many ms query I diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java index 7db76c1ddabd9..21251db8572fd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java @@ -97,7 +97,7 @@ public void setBinaryContext(BinaryContext ctx, IgniteConfiguration cfg) { } /** {@inheritDoc} */ - @Override protected T unmarshal0(byte[] bytes, @Nullable ClassLoader clsLdr) throws IgniteCheckedException { + @Override protected T unmarshal0(byte[] bytes, @Nullable ClassLoader clsLdr) { return impl.deserialize(bytes, clsLdr); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/Index.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/Index.java index 955bc03f651f3..45984b40c25f3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/Index.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/Index.java @@ -66,4 +66,9 @@ public void onUpdate(@Nullable CacheDataRow oldRow, @Nullable CacheDataRow newRo * @param softDelete if {@code true} then perform logical deletion. */ public void destroy(boolean softDelete); + + /** + * @return Index definition. + */ + public IndexDefinition indexDefinition(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/SortedSegmentedIndex.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/SortedSegmentedIndex.java index 1db578257d306..92738a48b8ff8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/SortedSegmentedIndex.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/SortedSegmentedIndex.java @@ -123,4 +123,7 @@ public GridCursor findLast(int segment, IndexQueryContext qryCtx) * @return amount of index tree segments. */ public int segmentsCount(); + + /** {@inheritDoc} */ + @Override public SortedIndexDefinition indexDefinition(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndex.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndex.java new file mode 100644 index 0000000000000..db34e3ba9470c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndex.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.cache.query.index.sorted.client; + +import java.util.UUID; +import org.apache.ignite.internal.cache.query.index.Index; +import org.apache.ignite.internal.cache.query.index.IndexDefinition; + +/** + * We need indexes on non-affinity nodes. This index does not contain any data. + */ +public class ClientIndex extends AbstractClientIndex implements Index { + /** Index id. */ + private final UUID id = UUID.randomUUID(); + + /** Index definition. */ + private final IndexDefinition def; + + /** */ + public ClientIndex(IndexDefinition def) { + this.def = def; + } + + /** {@inheritDoc} */ + @Override public UUID id() { + return id; + } + + /** {@inheritDoc} */ + @Override public String name() { + return def.idxName().idxName(); + } + + /** {@inheritDoc} */ + @Override public IndexDefinition indexDefinition() { + return def; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexDefinition.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexDefinition.java index 36b8d4d0d7e92..e8fa6066ae019 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexDefinition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexDefinition.java @@ -26,12 +26,6 @@ * Define index for filtered or client node. */ public class ClientIndexDefinition implements IndexDefinition { - /** */ - private final int cfgInlineSize; - - /** */ - private final int maxInlineSize; - /** */ private final IndexName idxName; @@ -39,28 +33,11 @@ public class ClientIndexDefinition implements IndexDefinition { private final LinkedHashMap keyDefs; /** */ - public ClientIndexDefinition( - IndexName idxName, - LinkedHashMap keyDefs, - int cfgInlineSize, - int maxInlineSize - ) { + public ClientIndexDefinition(IndexName idxName, LinkedHashMap keyDefs) { this.idxName = idxName; - this.cfgInlineSize = cfgInlineSize; - this.maxInlineSize = maxInlineSize; this.keyDefs = keyDefs; } - /** */ - public int getCfgInlineSize() { - return cfgInlineSize; - } - - /** */ - public int getMaxInlineSize() { - return maxInlineSize; - } - /** {@inheritDoc} */ @Override public IndexName idxName() { return idxName; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexFactory.java index 6f63445550b0d..9d9c8bbc09de6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientIndexFactory.java @@ -17,52 +17,17 @@ package org.apache.ignite.internal.cache.query.index.sorted.client; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.cache.query.index.Index; import org.apache.ignite.internal.cache.query.index.IndexDefinition; import org.apache.ignite.internal.cache.query.index.IndexFactory; -import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyDefinition; -import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyTypeSettings; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexKeyType; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexKeyTypeRegistry; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexTree; import org.apache.ignite.internal.processors.cache.GridCacheContext; /** * Factory for client index. */ public class ClientIndexFactory implements IndexFactory { - /** Dummy key types. */ - private static final IndexKeyTypeSettings DUMMY_SETTINGS = new IndexKeyTypeSettings(); - - /** Logger. */ - private final IgniteLogger log; - - /** */ - public ClientIndexFactory(IgniteLogger log) { - this.log = log; - } - /** {@inheritDoc} */ - @Override public Index createIndex(GridCacheContext cctx, IndexDefinition definition) { - ClientIndexDefinition def = (ClientIndexDefinition)definition; - - LinkedHashMap keyDefs = definition.indexKeyDefinitions(); - - List keyTypes = InlineIndexKeyTypeRegistry.types(keyDefs.values(), DUMMY_SETTINGS); - - int inlineSize = InlineIndexTree.computeInlineSize( - definition.idxName().fullName(), - keyTypes, - new ArrayList<>(keyDefs.values()), - def.getCfgInlineSize(), - def.getMaxInlineSize(), - log - ); - - return new ClientInlineIndex(def, inlineSize); + @Override public Index createIndex(GridCacheContext cctx, IndexDefinition def) { + return new ClientIndex(def); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientInlineIndex.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientInlineIndex.java deleted file mode 100644 index 1f04dcab39781..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/client/ClientInlineIndex.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.cache.query.index.sorted.client; - -import java.util.UUID; -import org.apache.ignite.internal.cache.query.index.IndexDefinition; -import org.apache.ignite.internal.cache.query.index.sorted.IndexRow; -import org.apache.ignite.internal.cache.query.index.sorted.inline.IndexQueryContext; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndex; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexTree; -import org.apache.ignite.internal.util.lang.GridCursor; - -/** - * We need indexes on non-affinity nodes. This index does not contain any data. - */ -public class ClientInlineIndex extends AbstractClientIndex implements InlineIndex { - /** */ - private final int inlineSize; - - /** Index id. */ - private final UUID id = UUID.randomUUID(); - - /** Index definition. */ - private final IndexDefinition def; - - /** */ - public ClientInlineIndex(IndexDefinition def, int inlineSize) { - this.def = def; - this.inlineSize = inlineSize; - } - - /** {@inheritDoc} */ - @Override public int inlineSize() { - return inlineSize; - } - - /** {@inheritDoc} */ - @Override public boolean created() { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public InlineIndexTree segment(int segment) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public GridCursor find( - IndexRow lower, - IndexRow upper, - boolean lowIncl, - boolean upIncl, - int segment, - IndexQueryContext qryCtx - ) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public GridCursor find( - IndexRow lower, - IndexRow upper, - boolean lowIncl, - boolean upIncl, - IndexQueryContext qryCtx - ) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public GridCursor findFirst(int segment, IndexQueryContext qryCtx) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public GridCursor findLast(int segment, IndexQueryContext qryCtx) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public GridCursor findFirstOrLast(IndexQueryContext qryCtx, boolean first) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public long count(int segment) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public long totalCount() { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public long count(int segment, IndexQueryContext qryCtx) { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public int segmentsCount() { - throw unsupported(); - } - - /** {@inheritDoc} */ - @Override public UUID id() { - return id; - } - - /** {@inheritDoc} */ - @Override public String name() { - return def.idxName().idxName(); - } - - /** {@inheritDoc} */ - @Override public IndexDefinition indexDefinition() { - return def; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/inline/InlineIndex.java b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/inline/InlineIndex.java index ffff30f57a89c..35a198e1de597 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/inline/InlineIndex.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cache/query/index/sorted/inline/InlineIndex.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.cache.query.index.sorted.inline; -import org.apache.ignite.internal.cache.query.index.IndexDefinition; import org.apache.ignite.internal.cache.query.index.sorted.SortedSegmentedIndex; /** @@ -39,9 +38,4 @@ public interface InlineIndex extends SortedSegmentedIndex { * @return Tree segment for specified number. */ public InlineIndexTree segment(int segment); - - /** - * @return Index definition. - */ - public IndexDefinition indexDefinition(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 5c2e84112bc4f..7c6c3246fd5ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -329,6 +329,26 @@ else if (log.isDebugEnabled()) if (isDeadClassLoader(meta)) return null; + boolean skipSearchDeployment = false; + + // Check already exist deployment. + if (meta.deploymentMode() == SHARED) { + Collection created = getDeployments(); + + for (GridDeployment dep0 : created) { + // hot redeploy from same node + if (dep0.participants().containsKey(meta.senderNodeId()) || dep0.undeployed()) + continue; + + IgniteBiTuple, Throwable> cls = dep0.deployedClass(meta.className(), meta.alias()); + + if (cls.getKey() != null && cls.getValue() == null) { + ((SharedDeployment)dep0).addParticipant(meta.senderNodeId(), meta.classLoaderId()); + skipSearchDeployment = true; + } + } + } + if (!F.isEmpty(meta.participants())) { Map participants = new LinkedHashMap<>(); @@ -376,7 +396,8 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { return null; } - dep = (SharedDeployment)searchDeploymentCache(meta); + if (!skipSearchDeployment) + dep = (SharedDeployment)searchDeploymentCache(meta); if (dep == null) { List deps = cache.get(meta.userVersion()); @@ -1243,8 +1264,6 @@ void onRemoved() { /** {@inheritDoc} */ @Override public void onDeployed(Class cls) { - assert !Thread.holdsLock(mux); - boolean isTask = isTask(cls); String msg = (isTask ? "Task" : "Class") + " was deployed in SHARED or CONTINUOUS mode: " + cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/P2PClassLoadingIssues.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/P2PClassLoadingIssues.java index efdeb0ae2c1d3..ec9cc81d9cba2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/P2PClassLoadingIssues.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/P2PClassLoadingIssues.java @@ -38,6 +38,14 @@ public static T rethrowDisarmedP2PClassLoadingFailure(NoClassDefFoundError e throw error; } + /** Wraps specific exception. + * + * @param e Exception to be wrapped. + */ + public static P2PClassNotFoundException wrapWithP2PFailure(NoClassDefFoundError e) { + return new P2PClassNotFoundException("P2P class loading failed", e); + } + /** * Returns @{code true} if the given Throwable is an error caused by a P2P class-loading failure. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerUpdateJob.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerUpdateJob.java index ffe5b944d6f8e..b4a9bd0133b8f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerUpdateJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerUpdateJob.java @@ -21,7 +21,6 @@ import java.util.Map; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridKernalContext; -import org.apache.ignite.internal.managers.deployment.P2PClassLoadingIssues; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; @@ -33,6 +32,8 @@ import org.apache.ignite.stream.StreamReceiver; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.managers.deployment.P2PClassLoadingIssues.wrapWithP2PFailure; + /** * Job to put entries to cache on affinity node. */ @@ -146,7 +147,7 @@ class DataStreamerUpdateJob implements GridPlainCallable { return null; } catch (NoClassDefFoundError e) { - return P2PClassLoadingIssues.rethrowDisarmedP2PClassLoadingFailure(e); + throw wrapWithP2PFailure(e); } finally { if (ignoreDepOwnership) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheIndexQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheIndexQueryRequest.java index db86541fbb8ea..9d1a29cb7308f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheIndexQueryRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheIndexQueryRequest.java @@ -155,7 +155,8 @@ private IndexQueryCriterion readInCriterion(BinaryRawReaderEx reader) { * {@inheritDoc} */ @Override public ClientResponse process(ClientConnectionContext ctx) { - IgniteCache cache = !isKeepBinary() ? rawCache(ctx) : cache(ctx); + IgniteCache cache = qry.getFilter() != null && !isKeepBinary() ? + rawCache(ctx) : cache(ctx); if (qry.getPartition() != null) updateAffinityMetrics(ctx, qry.getPartition()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 208ee4f9c67e3..9e8897f2ef010 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -2747,6 +2747,8 @@ public void store(GridCacheContext cctx, CacheDataRow newRow, @Nullable CacheDat if (idx != null) idx.store(cctx, desc, newRow, prevRow, prevRowAvailable); + + statsMgr.onRowUpdated(desc.schemaName(), desc.tableName(), newRow.partition(), key.valueBytes(coctx)); } /** @@ -3614,6 +3616,8 @@ public void remove(GridCacheContext cctx, CacheDataRow row) if (indexingEnabled()) idx.remove(cctx, desc, row); + + statsMgr.onRowUpdated(desc.schemaName(), desc.tableName(), row.partition(), row.key().valueBytes(cctx.cacheObjectContext())); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/running/HeavyQueriesTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/running/HeavyQueriesTracker.java index d7a0679334083..1a3654a415aa9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/running/HeavyQueriesTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/running/HeavyQueriesTracker.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.query.running; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridKernalContext; @@ -254,6 +255,11 @@ public void setResultSetSizeThresholdMultiplier(int rsSizeThresholdMult) { this.rsSizeThresholdMult = rsSizeThresholdMult <= 1 ? 1 : rsSizeThresholdMult; } + /** */ + public Set getQueries() { + return qrys.keySet(); + } + /** * Holds timeout settings for the specified query. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/management/SortedIndexDescriptorFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/management/SortedIndexDescriptorFactory.java index a5e9c83a5bfa8..0059f5e290f44 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/management/SortedIndexDescriptorFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/management/SortedIndexDescriptorFactory.java @@ -17,17 +17,21 @@ package org.apache.ignite.internal.processors.query.schema.management; +import java.util.ArrayList; import java.util.LinkedHashMap; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.cache.query.index.Index; import org.apache.ignite.internal.cache.query.index.IndexName; import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyDefinition; +import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyTypeSettings; import org.apache.ignite.internal.cache.query.index.sorted.QueryIndexDefinition; import org.apache.ignite.internal.cache.query.index.sorted.client.ClientIndexDefinition; import org.apache.ignite.internal.cache.query.index.sorted.client.ClientIndexFactory; import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndex; import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexFactory; +import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexKeyTypeRegistry; +import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexTree; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; @@ -92,8 +96,10 @@ public SortedIndexDescriptorFactory(IgniteLogger log) { addAffinityColumn(unwrappedCols, tbl); LinkedHashMap idxCols = unwrappedCols; + IndexName idxFullName = new IndexName(cacheInfo.name(), typeDesc.schemaName(), typeDesc.tableName(), idxName); Index idx; + int inlineSize; if (cacheInfo.affinityNode()) { GridCacheContext cctx = cacheInfo.cacheContext(); @@ -108,7 +114,7 @@ public SortedIndexDescriptorFactory(IgniteLogger log) { QueryIndexDefinition idxDef = new QueryIndexDefinition( typeDesc, cacheInfo, - new IndexName(cacheInfo.name(), typeDesc.schemaName(), typeDesc.tableName(), idxName), + idxFullName, treeName, ctx.indexProcessor().rowCacheCleaner(cacheInfo.groupId()), isPk, @@ -122,21 +128,28 @@ public SortedIndexDescriptorFactory(IgniteLogger log) { idx = ctx.indexProcessor().createIndexDynamically(cctx, SORTED_IDX_FACTORY, idxDef, cacheVisitor); else idx = ctx.indexProcessor().createIndex(cctx, SORTED_IDX_FACTORY, idxDef); + + assert idx instanceof InlineIndex : idx; + + inlineSize = ((InlineIndex)idx).inlineSize(); } else { - ClientIndexDefinition d = new ClientIndexDefinition( - new IndexName(tbl.cacheInfo().name(), tbl.type().schemaName(), tbl.type().tableName(), idxName), - idxCols, - idxDesc.inlineSize(), - tbl.cacheInfo().config().getSqlIndexMaxInlineSize()); + ClientIndexDefinition def = new ClientIndexDefinition(idxFullName, idxCols); - idx = ctx.indexProcessor().createIndex(tbl.cacheInfo().cacheContext(), new ClientIndexFactory(log), d); - } + idx = ctx.indexProcessor().createIndex(cacheInfo.cacheContext(), new ClientIndexFactory(), def); - assert idx instanceof InlineIndex : idx; + // Here inline size is just for information (to be shown in system view). + inlineSize = InlineIndexTree.computeInlineSize( + idxFullName.fullName(), + InlineIndexKeyTypeRegistry.types(idxCols.values(), new IndexKeyTypeSettings()), + new ArrayList<>(idxCols.values()), + idxDesc.inlineSize(), + tbl.cacheInfo().config().getSqlIndexMaxInlineSize(), + log + ); + } - return new IndexDescriptor(tbl, idxName, idxDesc.type(), idxCols, isPk, isAff, - ((InlineIndex)idx).inlineSize(), idx); + return new IndexDescriptor(tbl, idxName, idxDesc.type(), idxCols, isPk, isAff, inlineSize, idx); } /** Split key into simple components and add to columns list. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/stat/IgniteStatisticsManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/stat/IgniteStatisticsManagerImpl.java index 66f5af303b865..1e2ceb3c593e6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/stat/IgniteStatisticsManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/stat/IgniteStatisticsManagerImpl.java @@ -111,7 +111,7 @@ public class IgniteStatisticsManagerImpl implements IgniteStatisticsManager { private volatile boolean started; /** Schedule to process obsolescence statistics. */ - private GridTimeoutProcessor.CancelableTask obsolescenceSchedule; + private volatile GridTimeoutProcessor.CancelableTask obsolescenceSchedule; /** Exchange listener. */ private final PartitionsExchangeAware exchAwareLsnr = new PartitionsExchangeAware() { @@ -236,16 +236,23 @@ else if (db == null) tryStart(); - if (serverNode) { - // Use mgmt pool to work with statistics repository in busy lock to schedule some tasks. - obsolescenceSchedule = ctx.timeout().schedule(() -> { - obsolescenceBusyExecutor.execute(() -> processObsolescence()); - }, OBSOLESCENCE_INTERVAL * 1000, OBSOLESCENCE_INTERVAL * 1000); - } + if (serverNode) + scheduleObsolescence(OBSOLESCENCE_INTERVAL); ctx.cache().context().exchange().registerExchangeAwareComponent(exchAwareLsnr); } + /** */ + void scheduleObsolescence(int seconds) { + assert seconds >= 1; + + if (obsolescenceSchedule != null) + obsolescenceSchedule.close(); + + obsolescenceSchedule = ctx.timeout().schedule(() -> obsolescenceBusyExecutor.execute(this::processObsolescence), + seconds * 1000, seconds * 1000); + } + /** * Check all preconditions and stop if started and have reason to stop. */ diff --git a/modules/core/src/main/java/org/apache/ignite/marshaller/jdk/JdkMarshallerObjectInputStream.java b/modules/core/src/main/java/org/apache/ignite/marshaller/jdk/JdkMarshallerObjectInputStream.java index d9fdd3d2f1c3d..7e7c8f107e2c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/marshaller/jdk/JdkMarshallerObjectInputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/marshaller/jdk/JdkMarshallerObjectInputStream.java @@ -51,7 +51,7 @@ class JdkMarshallerObjectInputStream extends ObjectInputStream { } /** {@inheritDoc} */ - @Override protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { + @Override protected Class resolveClass(ObjectStreamClass desc) throws ClassNotFoundException { // NOTE: DO NOT CHANGE TO 'clsLoader.loadClass()' // Must have 'Class.forName()' instead of clsLoader.loadClass() // due to weird ClassNotFoundExceptions for arrays of classes diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index 6ae86f6c07165..410611cc82feb 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -1659,6 +1659,39 @@ protected Socket openSocket(Socket sock, InetSocketAddress remAddr, IgniteSpiOpe } } + /** + * Writing to a socket might fail due to a broken connection. It might happen due to a recipient has closed the connection + * before, on SSL handshake, and doesn't accept new messages. In a such case it's possible to check the original error + * by reading the socket input stream. + * + * @param sock Socket to check. + * @param writeErr Error on writing a message to the socket. + * @return {@code SSLException} in case of SSL error, or {@code null} otherwise. + */ + private @Nullable SSLException checkSslException(Socket sock, Exception writeErr) { + if (!sslEnable) + return null; + + SSLException sslEx = X.cause(writeErr, SSLException.class); + + if (sslEx != null) + return sslEx; + + try { + // Set timeout to 1ms, in this case of closed socket it should return fast. + if (X.hasCause(writeErr, SocketException.class)) + readReceipt(sock, 1); + } + catch (SSLException sslErr) { + return sslErr; + } + catch (Exception err) { + // Skip. + } + + return null; + } + /** * Creates socket binding it to a local host address. This operation is not blocking. * @@ -1716,7 +1749,9 @@ protected void writeToSocket(Socket sock, TcpDiscoveryAbstractMessage msg, byte[ out.flush(); } catch (IOException e) { - err = e; + SSLException sslEx = checkSslException(sock, e); + + err = sslEx == null ? e : sslEx; } finally { boolean cancelled = obj.cancel(); @@ -1824,7 +1859,9 @@ protected void writeToSocket(Socket sock, U.marshal(marshaller(), msg, out); } catch (IgniteCheckedException e) { - err = e; + SSLException sslEx = checkSslException(sock, e); + + err = sslEx == null ? e : new IgniteCheckedException(sslEx); } finally { boolean cancelled = obj.cancel(); @@ -1869,7 +1906,9 @@ protected void writeToSocket(TcpDiscoveryAbstractMessage msg, Socket sock, int r out.flush(); } catch (IOException e) { - err = e; + SSLException sslEx = checkSslException(sock, e); + + err = sslEx == null ? e : sslEx; } finally { boolean cancelled = obj.cancel(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAtomicEntryProcessorDeploymentSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAtomicEntryProcessorDeploymentSelfTest.java index f1128573c3e74..2fe83283a4567 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAtomicEntryProcessorDeploymentSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAtomicEntryProcessorDeploymentSelfTest.java @@ -17,8 +17,10 @@ package org.apache.ignite.internal.processors.cache; +import java.util.HashMap; import java.util.Map; import java.util.TreeSet; +import java.util.concurrent.TimeUnit; import javax.cache.processor.EntryProcessorResult; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheAtomicityMode; @@ -27,12 +29,16 @@ import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.config.GridTestProperties; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; @@ -116,6 +122,7 @@ public void testInvokeDeployment2() throws Exception { depMode = DeploymentMode.SHARED; doTestInvoke(); + doTestInvokeEx(); } /** @@ -168,6 +175,96 @@ private void doTestInvoke() throws Exception { } } + /** + * Scenario: 2 different client nodes invoke entry processors on intersected collection of keys. + * @throws Exception + */ + private void doTestInvokeEx() throws Exception { + String testCacheName = "dynamic_params"; + + String prcClsName = "org.apache.ignite.tests.p2p.CacheDeploymentEntryProcessorMultipleEnts"; + + String contClsName = "org.apache.ignite.tests.p2p.cache.Container"; + + try { + startGrid(0); + IgniteEx cli1 = startClientGrid(1); + IgniteEx cli2 = startClientGrid(2); + + Class procCls1 = cli1.configuration().getClassLoader().loadClass(prcClsName); + Class procCls2 = cli2.configuration().getClassLoader().loadClass(prcClsName); + + Class contCls1 = cli1.configuration().getClassLoader().loadClass(contClsName); + Class contCls2 = cli2.configuration().getClassLoader().loadClass(contClsName); + + // just one more additional class unavailability check. + try { + Class.forName(TEST_VALUE); + fail(); + } + catch (ClassNotFoundException e) { + // No op. + } + + Class cacheValClazz = grid(2).configuration().getClassLoader().loadClass(TEST_VALUE); + Object cacheVal = cacheValClazz.newInstance(); + + CacheConfiguration ccfg = new CacheConfiguration<>(); + ccfg.setCacheMode(REPLICATED); + ccfg.setAtomicityMode(ATOMIC); + ccfg.setName(testCacheName); + + IgniteCache processedCache = cli1.createCache(ccfg); + + Map map = new HashMap<>(); + for (long i = 0; i < 100; i++) { + map.put(i, cacheVal); + } + + processedCache.putAll(map); + + IgniteCache cache1 = cli1.cache(testCacheName); + IgniteCache cache2 = cli2.cache(testCacheName); + + Object cont1 = contCls1.getDeclaredConstructor(Object.class).newInstance(map); + Object cont2 = contCls2.getDeclaredConstructor(Object.class).newInstance(map); + + for (int i = 0; i < 10; ++i) { + IgniteCache procCache1 = cache1; + IgniteInternalFuture f1 = GridTestUtils.runAsync(() -> { + for (long key = 0; key < 10; key++) { + procCache1.invoke(key, + (CacheEntryProcessor)procCls1.getDeclaredConstructor(Object.class).newInstance(cont1)); + } + }); + + IgniteCache procCache2 = cache2; + IgniteInternalFuture f2 = GridTestUtils.runAsync(() -> { + for (long key = 10; key > 0; key--) { + procCache2.invoke(key, + (CacheEntryProcessor)procCls2.getDeclaredConstructor(Object.class).newInstance(cont2)); + }; + }); + + long duration = TimeUnit.SECONDS.toMillis(30); + + f1.get(duration); + f2.get(duration); + + stopAllClients(true); + + cli1 = startClientGrid(1); + cli2 = startClientGrid(2); + + cache1 = cli1.cache(testCacheName); + cache2 = cli2.cache(testCacheName); + } + } + finally { + stopAllGrids(); + } + } + /** * @throws Exception In case of error. */ diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PSameClassLoaderSelfTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PSameClassLoaderSelfTest.java index 504b4d6e2069d..5000fe323a056 100644 --- a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PSameClassLoaderSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PSameClassLoaderSelfTest.java @@ -20,6 +20,7 @@ import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; +import java.util.UUID; import org.apache.ignite.Ignite; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.IgniteConfiguration; @@ -93,16 +94,18 @@ private void processTest() throws Exception { Class task1 = CLASS_LOADER.loadClass(TEST_TASK1_NAME); Class task2 = CLASS_LOADER.loadClass(TEST_TASK2_NAME); + UUID id = ignite2.cluster().localNode().id(); + // Execute task1 and task2 from node1 on node2 and make sure that they reuse same class loader on node2. - Integer res1 = (Integer)ignite1.compute().execute(task1, ignite2.cluster().localNode().id()); - Integer res2 = (Integer)ignite1.compute().execute(task2, ignite2.cluster().localNode().id()); + Integer res1 = (Integer)ignite1.compute().execute(task1, id); + Integer res2 = (Integer)ignite1.compute().execute(task2, id); assert res1.equals(res2); // Class loaders are same - Integer res3 = (Integer)ignite3.compute().execute(task1, ignite2.cluster().localNode().id()); - Integer res4 = (Integer)ignite3.compute().execute(task2, ignite2.cluster().localNode().id()); + Integer res3 = (Integer)ignite3.compute().execute(task1, id); + Integer res4 = (Integer)ignite3.compute().execute(task2, id); - assert res3.equals(res4); + assertEquals(res3, res4); } finally { stopGrid(1); @@ -147,18 +150,6 @@ public void testContinuousMode() throws Exception { processTest(); } - /** - * Test GridDeploymentMode.SHARED mode. - * - * @throws Exception if error occur. - */ - @Test - public void testSharedMode() throws Exception { - depMode = DeploymentMode.SHARED; - - processTest(); - } - /** * Return true if and only if all elements of array are different. * diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java index 0d4b6ccb90372..51966fa93a97f 100644 --- a/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java +++ b/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java @@ -130,7 +130,7 @@ public void testLambdaDeploymentFromSecondAndThird() throws Exception { new URL(GridTestProperties.getProperty("p2p.uri.cls.second"))}), ignite3, 10_000); for (Object o: res) - assertEquals(o, 43); + assertEquals(43, o); } finally { stopAllGrids(); diff --git a/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/dns_failure_test/BlockingNameService.java b/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/dns_failure_test/BlockingNameService.java index 2c37056805d77..8fd16a2297e2a 100644 --- a/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/dns_failure_test/BlockingNameService.java +++ b/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/dns_failure_test/BlockingNameService.java @@ -91,7 +91,7 @@ public static void main(String[] args) throws Exception { if ("1.8".equals(jdkVer)) installJdk8(); - else if ("11".equals(jdkVer)) + else if ("11".equals(jdkVer) || "17".equals(jdkVer)) installJdk11(); else throw new IllegalArgumentException("Unsupported JDK version: " + jdkVer); diff --git a/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/thin_client_query_test/ThinClientQueryTestApplication.java b/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/thin_client_query_test/ThinClientQueryTestApplication.java new file mode 100644 index 0000000000000..6581ceefc0f0e --- /dev/null +++ b/modules/ducktests/src/main/java/org/apache/ignite/internal/ducktest/tests/thin_client_query_test/ThinClientQueryTestApplication.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.ducktest.tests.thin_client_query_test; + +import java.util.LinkedHashMap; +import java.util.List; +import javax.cache.Cache; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.QueryIndex; +import org.apache.ignite.cache.query.IndexQuery; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.ClientCacheConfiguration; +import org.apache.ignite.internal.ducktest.utils.IgniteAwareApplication; +import org.apache.ignite.internal.util.typedef.F; + +/** Tests cache queries for thin client. */ +public class ThinClientQueryTestApplication extends IgniteAwareApplication { + /** */ + private static final int CNT = 100; + + /** */ + @Override protected void run(JsonNode jsonNode) throws Exception { + markInitialized(); + + QueryEntity qe = new QueryEntity(Integer.class, EntityValue.class) + .setValueType(EntityValue.class.getName()) + .setFields(new LinkedHashMap<>(F.asMap("val", Integer.class.getName()))) + .setIndexes(F.asList(new QueryIndex("val", false).setName("VAL_IDX"))); + + ClientCacheConfiguration clnCacheCfg = new ClientCacheConfiguration() + .setName("testCache") + .setQueryEntities(qe); + + ClientCache cache = client.createCache(clnCacheCfg); + + for (int i = 0; i < CNT; i++) + cache.put(i, new EntityValue(i)); + + boolean filter = jsonNode.get("filter").asBoolean(); + + testIndexQuery(cache, filter); + testBinaryIndexQuery(cache, filter); + testScanQuery(cache, filter); + testBinaryScanQuery(cache, filter); + + markFinished(); + } + + /** */ + private void testIndexQuery(ClientCache cache, boolean filter) { + IndexQuery idxQry = new IndexQuery<>(EntityValue.class.getName(), "VAL_IDX"); + + if (filter) + idxQry.setFilter((k, v) -> v.val < CNT / 2); + + List> result = cache.query(idxQry).getAll(); + + int cnt = filter ? CNT / 2 : CNT; + + assert result.size() == cnt; + + for (int i = 0; i < cnt; i++) { + Cache.Entry e = result.get(i); + + assert e.getKey() == i; + assert e.getValue().val == i; + } + } + + /** */ + private void testBinaryIndexQuery(ClientCache cache, boolean filter) { + IndexQuery idxQry = new IndexQuery<>(EntityValue.class.getName(), "VAL_IDX"); + + if (filter) + idxQry.setFilter((k, v) -> (int)v.field("val") < CNT / 2); + + List> result = cache.withKeepBinary().query(idxQry).getAll(); + + int cnt = filter ? CNT / 2 : CNT; + + assert result.size() == cnt; + + for (int i = 0; i < cnt; i++) { + Cache.Entry e = result.get(i); + + assert e.getKey() == i; + assert (int)e.getValue().field("val") == i; + } + } + + /** */ + private void testScanQuery(ClientCache cache, boolean filter) { + ScanQuery scanQry = new ScanQuery<>(); + + if (filter) + scanQry.setFilter((k, v) -> v.val < CNT / 2); + + List> result = cache.query(scanQry).getAll(); + + int cnt = filter ? CNT / 2 : CNT; + + assert result.size() == cnt; + + for (int i = 0; i < cnt; i++) { + Cache.Entry e = result.get(i); + + assert e.getKey() == e.getValue().val; + } + } + + /** */ + private void testBinaryScanQuery(ClientCache cache, boolean filter) { + ScanQuery scanQry = new ScanQuery<>(); + + if (filter) + scanQry.setFilter((k, v) -> (int)v.field("val") < CNT / 2); + + List> result = cache.withKeepBinary().query(scanQry).getAll(); + + int cnt = filter ? CNT / 2 : CNT; + + assert result.size() == cnt; + + for (int i = 0; i < cnt; i++) { + Cache.Entry e = result.get(i); + + assert e.getKey() == e.getValue().field("val"); + } + } + + /** */ + private static class EntityValue { + /** */ + private final int val; + + /** */ + public EntityValue(int val) { + this.val = val; + } + + /** */ + public String toString() { + return "EntityValue [val=" + val + "]"; + } + } +} diff --git a/modules/ducktests/tests/docker/Dockerfile b/modules/ducktests/tests/docker/Dockerfile index b09e8d00b70a5..0b5ae552b4b36 100644 --- a/modules/ducktests/tests/docker/Dockerfile +++ b/modules/ducktests/tests/docker/Dockerfile @@ -49,7 +49,7 @@ ARG APACHE_MIRROR="https://apache-mirror.rbc.ru/pub/apache/" ARG APACHE_ARCHIVE="https://archive.apache.org/dist/" # Install binary test dependencies. -RUN for v in "2.7.6" "2.15.0"; \ +RUN for v in "2.7.6" "2.16.0"; \ do cd /opt; \ curl -O $APACHE_ARCHIVE/ignite/$v/apache-ignite-$v-bin.zip;\ unzip apache-ignite-$v-bin.zip && mv /opt/apache-ignite-$v-bin /opt/ignite-$v;\ diff --git a/modules/ducktests/tests/ignitetest/tests/cdc/cdc_test.py b/modules/ducktests/tests/ignitetest/tests/cdc/cdc_test.py index 4a1cc9b738111..56a0fe4bbe319 100644 --- a/modules/ducktests/tests/ignitetest/tests/cdc/cdc_test.py +++ b/modules/ducktests/tests/ignitetest/tests/cdc/cdc_test.py @@ -25,7 +25,7 @@ from ignitetest.utils import cluster, ignite_versions, ignore_if from ignitetest.utils.bean import Bean from ignitetest.utils.ignite_test import IgniteTest -from ignitetest.utils.version import LATEST, IgniteVersion, DEV_BRANCH, V_2_14_0 +from ignitetest.utils.version import LATEST, IgniteVersion, DEV_BRANCH, V_2_16_0 class CdcTest(IgniteTest): @@ -37,7 +37,7 @@ class CdcTest(IgniteTest): @cluster(num_nodes=5) @ignite_versions(str(DEV_BRANCH), str(LATEST)) - @ignore_if(lambda version, _: version <= V_2_14_0) + @ignore_if(lambda version, _: version <= V_2_16_0) @parametrize(num_nodes=5, wal_force_archive_timeout=100, pacing=10, duration_sec=10) def test_cdc_start_stop(self, ignite_version, num_nodes, wal_force_archive_timeout, pacing, duration_sec): """ diff --git a/modules/ducktests/tests/ignitetest/tests/thin_client_query_test.py b/modules/ducktests/tests/ignitetest/tests/thin_client_query_test.py new file mode 100644 index 0000000000000..e008878159a68 --- /dev/null +++ b/modules/ducktests/tests/ignitetest/tests/thin_client_query_test.py @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains client queries tests. +""" +from ducktape.mark import matrix + +from ignitetest.services.ignite import IgniteService +from ignitetest.services.ignite_app import IgniteApplicationService +from ignitetest.services.utils.ignite_configuration import IgniteConfiguration, IgniteThinClientConfiguration +from ignitetest.services.utils.ignite_spec import IgniteNodeSpec +from ignitetest.services.utils.ssl.client_connector_configuration import ClientConnectorConfiguration +from ignitetest.utils import cluster, ignite_versions +from ignitetest.utils.ignite_test import IgniteTest +from ignitetest.utils.version import DEV_BRANCH, IgniteVersion + + +class ThinClientQueryTest(IgniteTest): + """ + cluster - cluster size. + JAVA_CLIENT_CLASS_NAME - running classname. + to use with ssl enabled: + export GLOBALS='{"ssl":{"enabled":true}}' . + """ + @cluster(num_nodes=3) + @ignite_versions(str(DEV_BRANCH), version_prefix="server_version") + @matrix(filter=[False, True]) + def test_thin_client_index_query(self, server_version, filter): + """ + Thin client IndexQuery test. + :param server_version Ignite node version. + :param filter Whether to use filter for queries. + """ + + server_config = IgniteConfiguration(version=IgniteVersion(server_version), + client_connector_configuration=ClientConnectorConfiguration()) + + ignite = IgniteService(self.test_context, server_config, 2) + + if not filter: + ignite.spec = IgniteNodeSpecExcludeDucktests(service=ignite) + + addresses = [ignite.nodes[0].account.hostname + ":" + str(server_config.client_connector_configuration.port)] + + cls = "org.apache.ignite.internal.ducktest.tests.thin_client_query_test.ThinClientQueryTestApplication" + + thin_clients = IgniteApplicationService(self.test_context, + IgniteThinClientConfiguration( + addresses=addresses, + version=IgniteVersion(str(DEV_BRANCH))), + java_class_name=cls, + num_nodes=1, + params={"filter": filter}) + + ignite.start() + thin_clients.run() + ignite.stop() + + +class IgniteNodeSpecExcludeDucktests(IgniteNodeSpec): + """ + Ignite node specification that excludes module 'ducktests' from classpath. + """ + def modules(self): + """ + Exclude module from preparing USER_LIBS environment variable. + """ + modules = super().modules() + + modules.remove("ducktests") + + return modules + + def envs(self): + """ + Skip the module target directory while building classpath. + """ + envs = super().envs() + + envs["EXCLUDE_MODULES"] = "ducktests" + + return envs diff --git a/modules/ducktests/tests/ignitetest/utils/version.py b/modules/ducktests/tests/ignitetest/utils/version.py index df25dbca5fd5f..929d53e3729b5 100644 --- a/modules/ducktests/tests/ignitetest/utils/version.py +++ b/modules/ducktests/tests/ignitetest/utils/version.py @@ -112,7 +112,11 @@ def __repr__(self): V_2_15_0 = IgniteVersion("2.15.0") LATEST_2_15 = V_2_15_0 +# 2.16.x versions +V_2_16_0 = IgniteVersion("2.16.0") +LATEST_2_16 = V_2_16_0 + # if you updated the LATEST version # please check DEV version in 'tests/ignitetest/__init__.py' -LATEST = LATEST_2_15 +LATEST = LATEST_2_16 OLDEST = V_2_7_6 diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/CacheDeploymentEntryProcessorMultipleEnts.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/CacheDeploymentEntryProcessorMultipleEnts.java new file mode 100644 index 0000000000000..bdb16113bb05f --- /dev/null +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/CacheDeploymentEntryProcessorMultipleEnts.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.tests.p2p; + +import java.util.Map; +import javax.cache.processor.EntryProcessorException; +import javax.cache.processor.MutableEntry; +import org.apache.ignite.cache.CacheEntryProcessor; +import org.apache.ignite.tests.p2p.cache.Container; + +/** + * Entry processor for p2p tests. + */ +public class CacheDeploymentEntryProcessorMultipleEnts implements CacheEntryProcessor { + /** */ + private Map entToProcess; + + /** */ + public CacheDeploymentEntryProcessorMultipleEnts(Object container) { + entToProcess = (Map)((Container)container).field; + } + + /** {@inheritDoc} */ + @Override public Boolean process(MutableEntry entry, + Object... arguments) throws EntryProcessorException { + boolean pr = false; + + for (CacheDeploymentTestValue ent : entToProcess.values()) { + CacheDeploymentTestValue key = ent; + pr = key != null; + } + CacheDeploymentTestValue val = entry.getValue(); + + return pr; + } +} diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/cache/Container.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/cache/Container.java index fff36123f27f7..625890dcc6ae3 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/cache/Container.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/cache/Container.java @@ -20,7 +20,7 @@ /** */ public class Container { /** */ - private Object field; + public Object field; /** */ public Container(Object field) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizer.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizer.java index 875284dd631ab..c3ca1376a9305 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizer.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizer.java @@ -412,8 +412,10 @@ private static boolean pullOutSubQryFromTableList( remapColumns( parent, subSel, - // reference equality used intentionally here - col -> wrappedSubQry == col.expressionInFrom(), + // In case of several nested subqueries, inner subqueries are wrapped into alias of outer subqueries, + // to check column belonging correctly we should unwrap aliases. + // Reference equality used intentionally here. + col -> GridSqlAlias.unwrap(wrappedSubQry) == GridSqlAlias.unwrap(col.expressionInFrom()), subTbl ); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2IndexFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2IndexFactory.java index 14250c8497231..379b3024a0592 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2IndexFactory.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2IndexFactory.java @@ -26,7 +26,6 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.cache.query.index.SortOrder; import org.apache.ignite.internal.cache.query.index.sorted.IndexKeyDefinition; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndex; import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexImpl; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.query.h2.database.H2TreeClientIndex; @@ -90,12 +89,10 @@ Index createIndex(GridH2Table tbl, IndexDescriptor idxDesc) { return new H2TreeIndex(qryIdx, tbl, idxColsArr, idxDesc.isPk(), log); } else { - InlineIndex qryIdx = idxDesc.index().unwrap(InlineIndex.class); - IndexType idxType = idxDesc.isPk() ? IndexType.createPrimaryKey(false, false) : IndexType.createNonUnique(false, false, false); - return new H2TreeClientIndex(qryIdx, tbl, idxDesc.name(), idxColsArr, idxType); + return new H2TreeClientIndex(idxDesc.index(), tbl, idxDesc.name(), idxColsArr, idxType); } } else if (idxDesc.type() == QueryIndexType.GEOSPATIAL) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2QueryInfo.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2QueryInfo.java index 7cdbed157fe33..95ce6c1bf7e64 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2QueryInfo.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2QueryInfo.java @@ -41,6 +41,15 @@ public class H2QueryInfo implements TrackableQuery { /** Begin timestamp. */ private final long beginTs; + /** The most recent point in time when the tracking of a long query was suspended. */ + private volatile long lastSuspendTs; + + /** External wait time. */ + private volatile long extWait; + + /** Long query time tracking suspension flag. */ + private volatile boolean isSuspended; + /** Query schema. */ private final String schema; @@ -112,6 +121,11 @@ public String plan() { return stmt.getPlanSQL(); } + /** */ + public long extWait() { + return extWait; + } + /** * Print info specified by children. * @@ -123,7 +137,25 @@ protected void printInfo(StringBuilder msg) { /** {@inheritDoc} */ @Override public long time() { - return U.currentTimeMillis() - beginTs; + return (isSuspended ? lastSuspendTs : U.currentTimeMillis()) - beginTs - extWait; + } + + /** */ + public synchronized void suspendTracking() { + if (!isSuspended) { + isSuspended = true; + + lastSuspendTs = U.currentTimeMillis(); + } + } + + /** */ + public synchronized void resumeTracking() { + if (isSuspended) { + isSuspended = false; + + extWait += U.currentTimeMillis() - lastSuspendTs; + } } /** @@ -156,6 +188,11 @@ protected void printInfo(StringBuilder msg) { return msgSb.toString(); } + /** */ + public boolean isSuspended() { + return isSuspended; + } + /** * Query type. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java index 9529b4430b1b0..18cbac6b494af 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java @@ -120,6 +120,9 @@ public abstract class H2ResultSetIterator extends GridIteratorAdapter impl /** */ private final H2QueryInfo qryInfo; + /** */ + final IgniteH2Indexing h2; + /** * @param data Data array. * @param log Logger. @@ -141,6 +144,7 @@ protected H2ResultSetIterator( this.data = data; this.tracing = tracing; this.qryInfo = qryInfo; + this.h2 = h2; try { res = (ResultInterface)RESULT_FIELD.get(data); @@ -325,6 +329,9 @@ public void onClose() throws IgniteCheckedException { lockTables(); + if (qryInfo != null) + h2.heavyQueriesTracker().stopTracking(qryInfo, null); + try { resultSetChecker.checkOnClose(); @@ -391,7 +398,7 @@ private synchronized void closeInternal() throws IgniteCheckedException { if (closed) return false; - return hasRow || (hasRow = fetchNext()); + return hasRow || (hasRow = h2.executeWithResumableTimeTracking(this::fetchNext, qryInfo)); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 091a15c9230f6..8e91c71a71172 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -119,6 +119,7 @@ import org.apache.ignite.internal.util.lang.GridPlainRunnable; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.internal.util.lang.IgniteSingletonIterator; +import org.apache.ignite.internal.util.lang.IgniteThrowableSupplier; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; @@ -414,6 +415,8 @@ private GridQueryFieldsResult executeSelectLocal( @Override public GridCloseableIterator> iterator() throws IgniteCheckedException { H2PooledConnection conn = connections().connection(qryDesc.schemaName()); + H2QueryInfo qryInfo = null; + try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_ITER_OPEN, MTC.span()))) { H2Utils.setupConnection(conn, qctx, qryDesc.distributedJoins(), qryDesc.enforceJoinOrder(), qryParams.lazy()); @@ -436,9 +439,11 @@ private GridQueryFieldsResult executeSelectLocal( H2Utils.bindParameters(stmt, F.asList(params)); - H2QueryInfo qryInfo = new H2QueryInfo(H2QueryInfo.QueryType.LOCAL, stmt, qry, + qryInfo = new H2QueryInfo(H2QueryInfo.QueryType.LOCAL, stmt, qry, ctx.localNodeId(), qryId); + heavyQryTracker.startTracking(qryInfo); + if (ctx.performanceStatistics().enabled()) { ctx.performanceStatistics().queryProperty( GridCacheQueryType.SQL_FIELDS, @@ -449,13 +454,16 @@ private GridQueryFieldsResult executeSelectLocal( ); } - ResultSet rs = executeSqlQueryWithTimer( - stmt, - conn, - qry, - timeout, - cancel, - qryParams.dataPageScanEnabled(), + ResultSet rs = executeWithResumableTimeTracking( + () -> executeSqlQueryWithTimer( + stmt, + conn, + qry, + timeout, + cancel, + qryParams.dataPageScanEnabled(), + null + ), qryInfo ); @@ -472,6 +480,9 @@ private GridQueryFieldsResult executeSelectLocal( catch (IgniteCheckedException | RuntimeException | Error e) { conn.close(); + if (qryInfo != null) + heavyQryTracker.stopTracking(qryInfo, e); + throw e; } } @@ -2259,4 +2270,25 @@ public HeavyQueriesTracker heavyQueriesTracker() { public DistributedIndexingConfiguration distributedConfiguration() { return distrCfg; } + + /** + * Resumes time tracking before the task (if needed) and suspends time tracking after the task is finished. + * + * @param task Query/fetch to execute. + * @param qryInfo Query info. + * @throws IgniteCheckedException If failed. + */ + public T executeWithResumableTimeTracking( + IgniteThrowableSupplier task, + final H2QueryInfo qryInfo + ) throws IgniteCheckedException { + qryInfo.resumeTracking(); + + try { + return task.get(); + } + finally { + qryInfo.suspendTracking(); + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeClientIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeClientIndex.java index 9d85d710f2b1d..8048485c24e4f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeClientIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeClientIndex.java @@ -19,7 +19,6 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.cache.query.index.Index; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndex; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow; @@ -35,7 +34,7 @@ */ public class H2TreeClientIndex extends H2TreeIndexBase { /** */ - private final InlineIndex clientIdx; + private final Index clientIdx; /** * @param tbl Table. @@ -43,17 +42,12 @@ public class H2TreeClientIndex extends H2TreeIndexBase { * @param cols Index columns. * @param idxType Index type. */ - public H2TreeClientIndex(InlineIndex idx, GridH2Table tbl, String name, IndexColumn[] cols, IndexType idxType) { + public H2TreeClientIndex(Index idx, GridH2Table tbl, String name, IndexColumn[] cols, IndexType idxType) { super(tbl, name, cols, idxType); clientIdx = idx; } - /** {@inheritDoc} */ - @Override public int inlineSize() { - return clientIdx.inlineSize(); - } - /** {@inheritDoc} */ @Override public void refreshColumnIds() { // Do nothing. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 12a71441de728..1df2e5cad553e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -186,8 +186,10 @@ public H2TreeIndex(InlineIndexImpl queryIndex, GridH2Table tbl, IndexColumn[] co ctx.io().addMessageListener(msgTopic, msgLsnr); } - /** {@inheritDoc} */ - @Override public int inlineSize() { + /** + * @return Inline size. + */ + public int inlineSize() { return queryIndex.inlineSize(); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndexBase.java index 2ca9f68cbba17..0d4ad3d798b59 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndexBase.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndexBase.java @@ -41,11 +41,6 @@ protected H2TreeIndexBase(GridH2Table tbl, String name, IndexColumn[] cols, Inde super(tbl, name, cols, type); } - /** - * @return Inline size. - */ - public abstract int inlineSize(); - /** {@inheritDoc} */ @Override public double getCost(Session ses, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, HashSet allColumnsSet) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java index 818d5e009a1ba..0bc44b5935b75 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.h2.command.Prepared; import org.h2.command.dml.Query; +import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.query.h2.opt.join.CollocationModel.isCollocated; import static org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst.TRUE; @@ -1271,6 +1272,12 @@ private void splitSelect(GridSqlAst parent, int childIdx) throws IgniteCheckedEx SqlAstTraverser traverser = new SqlAstTraverser(mapQry, distributedJoins, log); traverser.traverse(); + @Nullable SqlAstTraverser.MixedModeCachesJoinIssue mixedJoinIssue = traverser.hasOuterJoinMixedCacheModeIssue(); + + if (mixedJoinIssue != null && mixedJoinIssue.error()) { + throw new CacheException(mixedJoinIssue.errorMessage()); + } + map.columns(collectColumns(mapExps)); map.sortColumns(mapQry.sort()); map.partitioned(traverser.hasPartitionedTables()); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/SqlAstTraverser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/SqlAstTraverser.java index f854c3f356583..764069b6ffcc1 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/SqlAstTraverser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/SqlAstTraverser.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal.processors.query.h2.sql; import java.util.HashSet; +import java.util.Objects; import java.util.Set; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; +import org.jetbrains.annotations.Nullable; /** * Traverse over query AST to find info about partitioned table usage. @@ -44,6 +46,9 @@ class SqlAstTraverser { /** Whether query has joins between replicated and partitioned tables. */ private boolean hasOuterJoinReplicatedPartitioned; + /** */ + private @Nullable MixedModeCachesJoinIssue hasOuterJoinMixedCacheModeIssue; + /** Whether top-level table is replicated. */ private boolean isRootTableReplicated; @@ -86,6 +91,11 @@ public boolean hasReplicatedWithPartitionedAndSubQuery() { return (isRootTableReplicated && hasSubQueries && hasPartitionedTables); } + /** */ + public @Nullable MixedModeCachesJoinIssue hasOuterJoinMixedCacheModeIssue() { + return hasOuterJoinMixedCacheModeIssue; + } + /** * Traverse AST while join operation isn't found. Check it if found. * @@ -168,8 +178,24 @@ else if (ast instanceof GridSqlTable) if (left == null || right == null) return; - if (join.isLeftOuter() && !left.isPartitioned() && right.isPartitioned()) - hasOuterJoinReplicatedPartitioned = true; + if (join.isLeftOuter() && !left.isPartitioned() && right.isPartitioned()) { + if (left.cacheContext().affinity().partitions() != right.cacheContext().affinity().partitions()) { + hasOuterJoinMixedCacheModeIssue = new MixedModeCachesJoinIssue("Cache [cacheName=" + left.cacheName() + + ", partitionsCount=" + left.cacheContext().affinity().partitions() + + "] can`t be joined with [cacheName=" + right.cacheName() + + ", partitionsCount=" + right.cacheContext().affinity().partitions() + + "] due to different affinity configuration. Join between PARTITIONED and REPLICATED caches is possible " + + "only with the same partitions number configuration."); + } + // the only way to compare predicate classes, not work for different class loaders. + else if (!Objects.equals(className(left.cacheInfo().config().getNodeFilter()), className(right.cacheInfo().config() + .getNodeFilter()))) { + hasOuterJoinMixedCacheModeIssue = new MixedModeCachesJoinIssue("Cache [cacheName=" + left.cacheName() + "] " + + "can`t be joined with [cacheName=" + right.cacheName() + "] due to different node filters configuration."); + } + else + hasOuterJoinReplicatedPartitioned = true; + } // Skip check if at least one of tables isn't partitioned. if (!(left.isPartitioned() && right.isPartitioned())) @@ -179,6 +205,11 @@ else if (ast instanceof GridSqlTable) checkPartitionedJoin(join, where, left, right, log); } + /** Object class name. */ + @Nullable private static String className(@Nullable Object obj) { + return obj != null ? obj.getClass().getName() : null; + } + /** * Checks whether an AST contains valid join operation between partitioned tables. * Join condition should be an equality operation of affinity keys of tables. Conditions can be splitted between @@ -242,7 +273,7 @@ private String getAlias(GridSqlElement el) { private Set affKeys(boolean pk, GridH2Table tbl) { Set affKeys = new HashSet<>(); - // User explicitly specify an affinity key. Otherwise use primary key. + // User explicitly specify an affinity key. Otherwise, use primary key. if (!pk) affKeys.add(tbl.getAffinityKeyColumn().columnName); else { @@ -279,7 +310,7 @@ private boolean checkPartitionedCondition(GridSqlElement condition, if (GridSqlOperationType.EQUAL == op.operationType()) checkEqualityOperation(op, leftTbl, leftAffKeys, pkLeft, rightTbl, rightAffKeys, pkRight); - // Check affinity condition is covered fully. If true then return. Otherwise go deeper. + // Check affinity condition is covered fully. If true then return. Otherwise, go deeper. if (affinityCondIsCovered(leftAffKeys, rightAffKeys)) return true; @@ -342,4 +373,29 @@ private void checkEqualityOperation(GridSqlOperation equalOp, private boolean affinityCondIsCovered(Set leftAffKeys, Set rightAffKeys) { return leftAffKeys.isEmpty() && rightAffKeys.isEmpty(); } + + /** Mixed cache mode join issues. */ + static class MixedModeCachesJoinIssue { + /** */ + private final boolean err; + + /** */ + private final String msg; + + /** Constructor. */ + MixedModeCachesJoinIssue(String errMsg) { + err = true; + msg = errMsg; + } + + /** Return {@code true} if error present. */ + boolean error() { + return err; + } + + /** Return appropriate error message. */ + String errorMessage() { + return msg; + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java index 3d388561840f2..78a0e0aeb9657 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java @@ -446,6 +446,8 @@ private void onQueryRequest0( qryResults.addResult(qryIdx, res); + MapH2QueryInfo qryInfo = null; + try { res.lock(); @@ -460,7 +462,9 @@ private void onQueryRequest0( H2Utils.bindParameters(stmt, params0); - MapH2QueryInfo qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId); + qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId); + + h2.heavyQueriesTracker().startTracking(qryInfo); if (performanceStatsEnabled) { ctx.performanceStatistics().queryProperty( @@ -472,14 +476,20 @@ private void onQueryRequest0( ); } - ResultSet rs = h2.executeSqlQueryWithTimer( - stmt, - conn, - sql, - timeout, - qryResults.queryCancel(qryIdx), - dataPageScanEnabled, - qryInfo); + GridQueryCancel qryCancel = qryResults.queryCancel(qryIdx); + + ResultSet rs = h2.executeWithResumableTimeTracking( + () -> h2.executeSqlQueryWithTimer( + stmt, + conn, + sql, + timeout, + qryCancel, + dataPageScanEnabled, + null + ), + qryInfo + ); if (evt) { ctx.event().record(new CacheQueryExecutedEvent<>( @@ -507,14 +517,21 @@ private void onQueryRequest0( res.openResult(rs, qryInfo); - final GridQueryNextPageResponse msg = prepareNextPage( - nodeRess, - node, - qryResults, - qryIdx, - segmentId, - pageSize, - dataPageScanEnabled + MapQueryResults qryResults0 = qryResults; + + int qryIdx0 = qryIdx; + + final GridQueryNextPageResponse msg = h2.executeWithResumableTimeTracking( + () -> prepareNextPage( + nodeRess, + node, + qryResults0, + qryIdx0, + segmentId, + pageSize, + dataPageScanEnabled + ), + qryInfo ); if (msg != null) @@ -528,6 +545,12 @@ private void onQueryRequest0( qryIdx++; } + catch (Throwable e) { + if (qryInfo != null) + h2.heavyQueriesTracker().stopTracking(qryInfo, e); + + throw e; + } finally { try { res.unlockTables(); @@ -843,13 +866,15 @@ else if (nodeRess.cancelled(reqId)) { final MapQueryResults qryResults = nodeRess.get(reqId, req.segmentId()); + MapQueryResult res = null; + if (qryResults == null) sendError(node, reqId, new CacheException("No query result found for request: " + req)); else if (qryResults.cancelled()) sendQueryCancel(node, reqId); else { try { - MapQueryResult res = qryResults.result(req.query()); + res = qryResults.result(req.query()); assert res != null; @@ -862,14 +887,18 @@ else if (qryResults.cancelled()) Boolean dataPageScanEnabled = isDataPageScanEnabled(req.getFlags()); - GridQueryNextPageResponse msg = prepareNextPage( - nodeRess, - node, - qryResults, - req.query(), - req.segmentId(), - req.pageSize(), - dataPageScanEnabled); + GridQueryNextPageResponse msg = h2.executeWithResumableTimeTracking( + () -> prepareNextPage( + nodeRess, + node, + qryResults, + req.query(), + req.segmentId(), + req.pageSize(), + dataPageScanEnabled + ), + res.qryInfo() + ); if (msg != null) sendNextPage(node, msg); @@ -884,6 +913,9 @@ else if (qryResults.cancelled()) } } catch (Exception e) { + if (res.qryInfo() != null) + h2.heavyQueriesTracker().stopTracking(res.qryInfo(), e); + QueryRetryException retryEx = X.cause(e, QueryRetryException.class); if (retryEx != null) @@ -939,6 +971,9 @@ private GridQueryNextPageResponse prepareNextPage( if (last) { qr.closeResult(qry); + if (res.qryInfo() != null) + h2.heavyQueriesTracker().stopTracking(res.qryInfo(), null); + if (qr.isAllClosed()) { nodeRess.remove(qr.queryRequestId(), segmentId, qr); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java index 3fdadabe361e5..cffa0dfd9b8e6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java @@ -419,6 +419,8 @@ public Iterator> query( runs.put(qryReqId, r); + ReduceH2QueryInfo qryInfo = null; + try { cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true)); @@ -509,9 +511,11 @@ else if (QueryUtils.wasCancelled(err)) H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params))); - ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), + qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId); + h2.heavyQueriesTracker().startTracking(qryInfo); + if (ctx.performanceStatistics().enabled()) { ctx.performanceStatistics().queryProperty( GridCacheQueryType.SQL_FIELDS, @@ -522,12 +526,18 @@ else if (QueryUtils.wasCancelled(err)) ); } - ResultSet res = h2.executeSqlQueryWithTimer(stmt, - conn, - rdc.query(), - timeoutMillis, - cancel, - dataPageScanEnabled, + H2PooledConnection conn0 = conn; + + ResultSet res = h2.executeWithResumableTimeTracking( + () -> h2.executeSqlQueryWithTimer( + stmt, + conn0, + rdc.query(), + timeoutMillis, + cancel, + dataPageScanEnabled, + null + ), qryInfo ); @@ -549,6 +559,9 @@ else if (QueryUtils.wasCancelled(err)) catch (IgniteCheckedException | RuntimeException e) { release = true; + if (qryInfo != null) + h2.heavyQueriesTracker().stopTracking(qryInfo, e); + if (e instanceof CacheException) { if (QueryUtils.wasCancelled(e)) throw new CacheException("Failed to run reduce query locally.", diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java index d7d8736e43e44..f644a7805e1cc 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java @@ -324,6 +324,11 @@ public void checkTablesVersions() { GridH2Table.checkTablesVersions(ses); } + /** */ + public MapH2QueryInfo qryInfo() { + return res.qryInfo; + } + /** */ private class Result { /** */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/cache/query/IndexQueryInlineSizesTest.java b/modules/indexing/src/test/java/org/apache/ignite/cache/query/IndexQueryInlineSizesTest.java index 32de88d75b83b..31f4d01b27a0d 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/cache/query/IndexQueryInlineSizesTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/cache/query/IndexQueryInlineSizesTest.java @@ -32,7 +32,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; -import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndexBase; +import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -272,7 +272,7 @@ private Index(int inlineSize, String flds, int expInlineSize) { GridH2Table tbl = ((IgniteH2Indexing)crd.context().query().getIndexing()).schemaManager() .dataTable("PUBLIC", TABLE); - assertEquals(expInlineSize, ((H2TreeIndexBase)tbl.getIndex(idxName)).inlineSize()); + assertEquals(expInlineSize, ((H2TreeIndex)tbl.getIndex(idxName)).inlineSize()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingAbstractTest.java index 220c1129f3dd5..b14aeb31ff403 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingAbstractTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingAbstractTest.java @@ -39,8 +39,7 @@ import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndex; -import org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexImpl; +import org.apache.ignite.internal.cache.query.index.sorted.SortedSegmentedIndex; import org.apache.ignite.internal.processors.query.schema.management.IndexDescriptor; import org.apache.ignite.internal.processors.query.schema.management.SchemaManager; import org.apache.ignite.lang.IgnitePredicate; @@ -277,12 +276,8 @@ protected void checkQueryParallelism(IgniteEx ig, CacheMode cacheMode) { assertNotNull(idxDesc); - InlineIndex idx = idxDesc.index().unwrap(InlineIndex.class); - - assertNotNull(idx); - - if (idx instanceof InlineIndexImpl) // Check segments count only on affinity nodes (skip client indexes). - assertEquals(expectedParallelism, idx.segmentsCount()); + if (idxDesc.index() instanceof SortedSegmentedIndex) // Check segments count only on affinity nodes (skip client indexes). + assertEquals(expectedParallelism, ((SortedSegmentedIndex)idxDesc.index()).segmentsCount()); CacheConfiguration cfg = ig.context().cache().cacheConfiguration(POI_CACHE_NAME); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LongRunningQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LongRunningQueryTest.java index 8ba7223c99809..a7faf240c2da8 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LongRunningQueryTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LongRunningQueryTest.java @@ -22,17 +22,21 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; +import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.QueryCancelledException; import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.cache.query.annotations.QuerySqlFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest; +import org.apache.ignite.internal.processors.query.h2.H2QueryInfo; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.running.HeavyQueriesTracker; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.ListeningTestLogger; @@ -41,6 +45,7 @@ import static java.lang.Thread.currentThread; import static org.apache.ignite.internal.processors.query.running.HeavyQueriesTracker.LONG_QUERY_EXEC_MSG; +import static org.h2.engine.Constants.DEFAULT_PAGE_SIZE; /** * Tests for log print for long-running query. @@ -49,21 +54,35 @@ public class LongRunningQueryTest extends AbstractIndexingCommonTest { /** Keys count. */ private static final int KEY_CNT = 1000; + /** External wait time. */ + private static final int EXT_WAIT_TIME = 2000; + + /** Page size. */ + private int pageSize = DEFAULT_PAGE_SIZE; + /** Local query mode. */ private boolean local; /** Lazy query mode. */ private boolean lazy; + /** Merge table usage flag. */ + private boolean withMergeTable; + + /** Distributed joins flag. */ + private boolean distributedJoins; + + /** Ignite instance. */ + private Ignite ignite; + /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); - startGrid(); + ignite = startGrid(); IgniteCache c = grid().createCache(new CacheConfiguration() .setName("test") - .setSqlSchema("TEST") .setQueryEntities(Collections.singleton(new QueryEntity(Long.class, Long.class) .setTableName("test") .addQueryField("id", Long.class.getName(), null) @@ -85,6 +104,18 @@ public class LongRunningQueryTest extends AbstractIndexingCommonTest { super.afterTest(); } + /** + * @param name Name. + * @param idxTypes Index types. + */ + @SuppressWarnings("unchecked") + private static CacheConfiguration cacheConfig(String name, Class... idxTypes) { + return new CacheConfiguration() + .setName(name) + .setIndexedTypes(idxTypes) + .setSqlFunctionClasses(TestSQLFunctions.class); + } + /** * */ @@ -109,6 +140,72 @@ public void testLongLocal() { checkFastQueries(); } + /** + * + */ + @Test + public void testLongDistributedLazy() { + local = false; + lazy = true; + + checkLongRunning(); + checkFastQueries(); + } + + /** + * + */ + @Test + public void testLongDistributedLazyWithMergeTable() { + local = false; + lazy = true; + + withMergeTable = true; + + try { + checkLongRunning(); + } + finally { + withMergeTable = false; + } + } + + /** + * + */ + @Test + public void testLongLocalLazy() { + local = true; + lazy = true; + + checkLongRunning(); + checkFastQueries(); + } + + /** + * Test checks that no long-running queries warnings are printed in case of external waits during + * the execution of distributed queries. + */ + @Test + public void testDistributedLazyWithExternalWait() { + local = false; + lazy = true; + + checkLazyWithExternalWait(); + } + + /** + * Test checks that no long-running queries warnings are printed in case of external waits during + * the execution of local queries. + */ + @Test + public void testlocalLazyWithExternalWait() { + local = true; + lazy = true; + + checkLazyWithExternalWait(); + } + /** * Test checks the correctness of thread name when displaying errors * about long queries. @@ -166,7 +263,7 @@ private void checkFastQueries() { // Several fast queries. for (int i = 0; i < 10; ++i) - sql("SELECT * FROM test").getAll(); + sql("test", "SELECT * FROM test").getAll(); assertFalse(lsnr.check()); } @@ -200,7 +297,7 @@ private void checkBigResultSet() throws Exception { testLog.registerListener(lsnr); - try (FieldsQueryCursor cur = sql("SELECT T0.id FROM test AS T0, test AS T1")) { + try (FieldsQueryCursor cur = sql("test", "SELECT T0.id FROM test AS T0, test AS T1")) { Iterator it = cur.iterator(); while (it.hasNext()) @@ -215,28 +312,116 @@ private void checkBigResultSet() throws Exception { * @param args Query parameters. */ private void sqlCheckLongRunning(String sql, Object... args) { - GridTestUtils.assertThrowsAnyCause(log, () -> sql(sql, args).getAll(), QueryCancelledException.class, ""); + GridTestUtils.assertThrowsAnyCause(log, () -> sql("test", sql, args).getAll(), QueryCancelledException.class, ""); + } + + /** + * @param sql SQL query. + * @param args Query parameters. + */ + private void sqlCheckLongRunningLazy(String sql, Object... args) { + pageSize = 1; + + try { + assertFalse(sql("test", sql, args).iterator().next().isEmpty()); + } + finally { + pageSize = DEFAULT_PAGE_SIZE; + } + } + + /** + * @param sql SQL query. + * @param args Query parameters. + */ + private void sqlCheckLongRunningLazyWithMergeTable(String sql, Object... args) { + distributedJoins = true; + + try { + CacheConfiguration ccfg1 = cacheConfig("pers", Integer.class, Person.class); + CacheConfiguration ccfg2 = cacheConfig("org", Integer.class, Organization.class); + + IgniteCache cache1 = ignite.getOrCreateCache(ccfg1); + IgniteCache cache2 = ignite.getOrCreateCache(ccfg2); + + cache2.put(1, new Organization("o1")); + cache2.put(2, new Organization("o2")); + cache1.put(3, new Person(1, "p1")); + cache1.put(4, new Person(2, "p2")); + cache1.put(5, new Person(3, "p3")); + + assertFalse(sql("pers", sql, args).getAll().isEmpty()); + } + finally { + distributedJoins = false; + } } /** * Execute long-running sql with a check for errors. */ private void sqlCheckLongRunning() { - sqlCheckLongRunning("SELECT T0.id FROM test AS T0, test AS T1, test AS T2 where T0.id > ?", 0); + if (lazy && withMergeTable) { + String select = "select o.name n1, p.name n2 from Person p, \"org\".Organization o" + + " where p.orgId = o._key and o._key=1 and o._key < sleep_func(?)" + + " union select o.name n1, p.name n2 from Person p, \"org\".Organization o" + + " where p.orgId = o._key and o._key=2"; + + sqlCheckLongRunningLazyWithMergeTable(select, 2000); + } + else if (lazy && !withMergeTable) + sqlCheckLongRunningLazy("SELECT * FROM test WHERE _key < sleep_func(?)", 2000); + else + sqlCheckLongRunning("SELECT T0.id FROM test AS T0, test AS T1, test AS T2 where T0.id > ?", 0); } /** + * @param cacheName Cache name. * @param sql SQL query. * @param args Query parameters. * @return Results cursor. */ - private FieldsQueryCursor> sql(String sql, Object... args) { - return grid().context().query().querySqlFields(new SqlFieldsQuery(sql) + private FieldsQueryCursor> sql(String cacheName, String sql, Object... args) { + return ignite.cache(cacheName).query(new SqlFieldsQuery(sql) .setTimeout(10, TimeUnit.SECONDS) .setLocal(local) .setLazy(lazy) - .setSchema("TEST") - .setArgs(args), false); + .setPageSize(pageSize) + .setDistributedJoins(distributedJoins) + .setArgs(args)); + } + + /** */ + public void checkLazyWithExternalWait() { + pageSize = 1; + + LogListener lsnr = LogListener + .matches(LONG_QUERY_EXEC_MSG) + .build(); + + testLog().registerListener(lsnr); + + try { + Iterator> it = sql("test", "select * from test").iterator(); + + it.next(); + + long sleepStartTs = U.currentTimeMillis(); + + while (U.currentTimeMillis() - sleepStartTs <= EXT_WAIT_TIME) + doSleep(100L); + + it.next(); + + H2QueryInfo qry = (H2QueryInfo)heavyQueriesTracker().getQueries().iterator().next(); + + assertTrue(qry.extWait() >= EXT_WAIT_TIME); + + assertFalse(lsnr.check()); + } + finally { + pageSize = DEFAULT_PAGE_SIZE; + } } /** @@ -287,4 +472,38 @@ private ListeningTestLogger testLog() { private HeavyQueriesTracker heavyQueriesTracker() { return ((IgniteH2Indexing)grid().context().query().getIndexing()).heavyQueriesTracker(); } + + /** */ + private static class Person { + /** */ + @QuerySqlField(index = true) + int orgId; + + /** */ + @QuerySqlField(index = true) + String name; + + /** + * @param orgId Organization ID. + * @param name Name. + */ + public Person(int orgId, String name) { + this.orgId = orgId; + this.name = name; + } + } + + /** */ + private static class Organization { + /** */ + @QuerySqlField + String name; + + /** + * @param name Organization name. + */ + public Organization(String name) { + this.name = name; + } + } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizerSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizerSelfTest.java index 5849d1b8aea25..9cd984af0c1cf 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizerSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridSubqueryJoinOptimizerSelfTest.java @@ -953,6 +953,22 @@ public void testDifferentOperatorsWithSubqueryUnderSelect() { } } + /** + * Case when select subquery from another subquery, which is select from another subquery. + */ + @Test + public void testSeveralNestedSubqueries() { + String innerSql = "SELECT id as id0, name as name0 FROM dep"; + String outerSqlTemplate = "SELECT id%d as id%d, name%d as name%d FROM %s WHERE id%d > %d"; + + String curSql = innerSql; + + for (int i = 0; i < 5; i++) { + curSql = String.format(outerSqlTemplate, i, i + 1, i, i + 1, '(' + curSql + ')', i, i); + check(curSql, 1); + } + } + /** * @param sql Sql. * @param expSelectClauses Expected select clauses. diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsAbstractTest.java index 8c87170a466be..a631e99723730 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsAbstractTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsAbstractTest.java @@ -261,12 +261,23 @@ protected List> sql(String sql) { } /** - * Create SQL table with the given index. + * Creates SQL table with the given index and fills with small amount of data. * * @param suffix Table idx, if {@code null} - name "SMALL" without index will be used. * @return Table name. */ protected String createSmallTable(String suffix) { + return createSmallTable(SMALL_SIZE, suffix); + } + + /** + * Creates SQL table with the given index and fills with data of the passed amount. + * + * @param preloadCnt Records cnt to load after creation. + * @param suffix Table idx, if {@code null} - name "SMALL" without index will be used. + * @return Table name. + */ + protected String createSmallTable(int preloadCnt, String suffix) { String tblName = "small" + ((suffix != null) ? suffix : ""); sql("DROP TABLE IF EXISTS " + tblName); @@ -279,7 +290,7 @@ protected String createSmallTable(String suffix) { sql(String.format("CREATE INDEX %s_c ON %s(c)", tblName, tblName)); - for (int i = 0; i < SMALL_SIZE; i++) + for (int i = 0; i < preloadCnt; i++) sql(String.format("INSERT INTO %s(a, b, c) VALUES(%d, %d, %d)", tblName, i, i, i % 10)); return tblName; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsObsolescenceTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsObsolescenceTest.java index 1baa806d1a202..417f837431770 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsObsolescenceTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/stat/StatisticsObsolescenceTest.java @@ -18,22 +18,147 @@ package org.apache.ignite.internal.processors.query.stat; import java.util.Map; - +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import org.apache.ignite.Ignite; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.util.collection.IntMap; +import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.internal.processors.query.stat.IgniteStatisticsHelper.buildDefaultConfigurations; +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; /** * Test for statistics obsolescence. */ public class StatisticsObsolescenceTest extends StatisticsAbstractTest { + /** */ + @Test + public void testObsolescenceWithInsert() throws Exception { + doTestObsolescenceUnderLoad(false, 1, + key -> sql(String.format("insert into SMALL(A, B, C) values(%d, %d, %d)", key, key, key))); + } + + /** */ + @Test + public void testObsolescenceWithUpdate() throws Exception { + doTestObsolescenceUnderLoad(true, 0, key -> sql("update SMALL set B=B+1 where A=" + key)); + } + + /** */ + @Test + public void testObsolescenceWithDelete() throws Exception { + doTestObsolescenceUnderLoad(true, -1, key -> sql("delete from SMALL where A=" + key)); + } + + /** */ + private void doTestObsolescenceUnderLoad(boolean preload, int rowCntCmp, Consumer op) throws Exception { + // Keep enough data to touch every partition. The statistics collection is sensitive to a partition's empty rows num + // and is able to reassemble in this case. This would give false-positive result. + int workingRowsNum = RendezvousAffinityFunction.DFLT_PARTITION_COUNT * 10; + int preloadCnt = preload ? workingRowsNum : 0; + + int osbInterval = 7; + + CyclicBarrier barrier = new CyclicBarrier(2); + + try { + startGridsMultiThreaded(2); + + for (Ignite ig : G.allGrids()) + ((IgniteStatisticsManagerImpl)((IgniteEx)ig).context().query().statsManager()).scheduleObsolescence(osbInterval); + + createSmallTable(preloadCnt, null); + + statisticsMgr(0).usageState(StatisticsUsageState.ON); + statisticsMgr(0).collectStatistics(buildDefaultConfigurations(SMALL_TARGET)); + + // Initialized statistics. + assertTrue(waitForCondition(() -> statisticsMgr(0).getLocalStatistics(SMALL_KEY) != null, osbInterval * 1000)); + assertTrue(waitForCondition(() -> statisticsMgr(1).getLocalStatistics(SMALL_KEY) != null, osbInterval * 1000)); + + ObjectStatisticsImpl initStat1 = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); + ObjectStatisticsImpl initStat2 = (ObjectStatisticsImpl)statisticsMgr(1).getLocalStatistics(SMALL_KEY); + + assertEquals(preloadCnt, initStat1.rowCount() + initStat2.rowCount()); + + GridTestUtils.runAsync(() -> { + AtomicLong key = new AtomicLong(1L); + + long opCnt = 0; + + while (!barrier.isBroken()) { + op.accept(key.getAndIncrement()); + + // Enough updates to trigger the statistics. + if (++opCnt == workingRowsNum / 3) { + opCnt = 0; + + barrier.await(); + barrier.await(); + } + } + }); + + barrier.await(); + + waitForStatsUpdates(initStat1, osbInterval * 2); + + ObjectStatisticsImpl updatedStat = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); + + assertTrue(rowCntCmp > 0 ? updatedStat.rowCount() > initStat1.rowCount() : + (rowCntCmp < 0 ? updatedStat.rowCount() < initStat1.rowCount() : updatedStat.rowCount() == initStat1.rowCount())); + + barrier.await(); + barrier.await(); + + // Continuing data loading, the table is being updated. Since the row count is inreasing, we must obtain a + // new statistics, greather than {@code firstNotEmpty}. + waitForStatsUpdates(updatedStat, osbInterval * 2); + + ObjectStatisticsImpl finalStat = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); + + assertTrue(rowCntCmp > 0 ? finalStat.rowCount() > updatedStat.rowCount() : + (rowCntCmp < 0 ? finalStat.rowCount() < updatedStat.rowCount() : finalStat.rowCount() == updatedStat.rowCount())); + } + finally { + barrier.reset(); + } + } + + /** */ + private void waitForStatsUpdates(ObjectStatisticsImpl compareTo, long timeoutSec) throws IgniteInterruptedCheckedException { + assertTrue(waitForCondition(() -> { + ObjectStatisticsImpl updatedStat = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); + + if (updatedStat == null) + return false; + + AtomicBoolean passed = new AtomicBoolean(true); + + updatedStat.columnsStatistics().forEach((col, stat) -> { + ColumnStatistics compared = compareTo.columnStatistics(col); + + assert compared != null; + + if (compared.createdAt() >= stat.createdAt()) + passed.set(false); + }); + + return passed.get(); + }, timeoutSec * 1000)); + } + /** * Test statistics refreshing after significant changes of base table: * 1) Create and populate small table @@ -51,7 +176,7 @@ public void testObsolescence() throws Exception { statisticsMgr(0).collectStatistics(buildDefaultConfigurations(SMALL_TARGET)); - assertTrue(GridTestUtils.waitForCondition(() -> statisticsMgr(0).getLocalStatistics(SMALL_KEY) != null, TIMEOUT)); + assertTrue(waitForCondition(() -> statisticsMgr(0).getLocalStatistics(SMALL_KEY) != null, TIMEOUT)); ObjectStatisticsImpl stat1 = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); @@ -62,7 +187,7 @@ public void testObsolescence() throws Exception { statisticsMgr(0).processObsolescence(); - assertTrue(GridTestUtils.waitForCondition(() -> { + assertTrue(waitForCondition(() -> { ObjectStatisticsImpl stat2 = (ObjectStatisticsImpl)statisticsMgr(0).getLocalStatistics(SMALL_KEY); return stat2 != null && stat2.rowCount() > stat1.rowCount(); @@ -103,7 +228,7 @@ public void testInactiveLoad() throws Exception { ignite.cluster().state(ClusterState.ACTIVE); - assertTrue(GridTestUtils.waitForCondition(() -> statObs.get(SMALL_KEY).size() > oldSize, TIMEOUT)); + assertTrue(waitForCondition(() -> statObs.get(SMALL_KEY).size() > oldSize, TIMEOUT)); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/sqltests/BaseSqlTest.java b/modules/indexing/src/test/java/org/apache/ignite/sqltests/BaseSqlTest.java index 61e48c6c64e93..51a5efafdcbcd 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/sqltests/BaseSqlTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/sqltests/BaseSqlTest.java @@ -1224,7 +1224,6 @@ public void testRightJoin() { /** * Check that FULL OUTER JOIN (which is currently unsupported) causes valid error message. */ - @SuppressWarnings("ThrowableNotThrown") @Test public void testFullOuterJoinIsNotSupported() { testAllNodes(node -> { @@ -1245,7 +1244,6 @@ public void testFullOuterJoinIsNotSupported() { /** * Check that distributed FULL OUTER JOIN (which is currently unsupported) causes valid error message. */ - @SuppressWarnings("ThrowableNotThrown") @Test public void testFullOuterDistributedJoinIsNotSupported() { testAllNodes(node -> { diff --git a/modules/indexing/src/test/java/org/apache/ignite/sqltests/ReplicatedSqlCustomPartitionsTest.java b/modules/indexing/src/test/java/org/apache/ignite/sqltests/ReplicatedSqlCustomPartitionsTest.java index 0c8ab2baf0c01..2c6bf601313b6 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/sqltests/ReplicatedSqlCustomPartitionsTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/sqltests/ReplicatedSqlCustomPartitionsTest.java @@ -17,10 +17,12 @@ package org.apache.ignite.sqltests; +import org.apache.ignite.IgniteException; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; /** @@ -28,7 +30,13 @@ */ public class ReplicatedSqlCustomPartitionsTest extends ReplicatedSqlTest { /** Test partitions count. */ - private static final int NUM_OF_PARTITIONS = 509; + static final int NUM_OF_PARTITIONS = 509; + + /** */ + static final String DEP_PART_TAB_DIFF = "DepartmentPartDiff"; + + /** */ + static final String DEP_PART_TAB_DIFF_NF = "DepartmentPartDiffNf"; /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { @@ -36,6 +44,11 @@ public class ReplicatedSqlCustomPartitionsTest extends ReplicatedSqlTest { .setCacheConfiguration( new CacheConfiguration("partitioned" + NUM_OF_PARTITIONS + "*") .setAffinity(new RendezvousAffinityFunction(false, NUM_OF_PARTITIONS)), + new CacheConfiguration("partitioned" + NUM_OF_PARTITIONS + "_DIFF*") + .setAffinity(new RendezvousAffinityFunction(false, NUM_OF_PARTITIONS + 1)), + new CacheConfiguration("partitioned" + NUM_OF_PARTITIONS + "_DIFF_NF*") + .setAffinity(new RendezvousAffinityFunction(false, NUM_OF_PARTITIONS)) + .setNodeFilter(clusterNode -> true), new CacheConfiguration("replicated" + NUM_OF_PARTITIONS + "*") .setCacheMode(CacheMode.REPLICATED) .setAffinity(new RendezvousAffinityFunction(false, NUM_OF_PARTITIONS)) @@ -55,6 +68,14 @@ public class ReplicatedSqlCustomPartitionsTest extends ReplicatedSqlTest { createDepartmentTable(DEP_PART_TAB, "template=partitioned" + NUM_OF_PARTITIONS); fillDepartmentTable(DEP_PART_TAB); + + createDepartmentTable(DEP_PART_TAB_DIFF, "template=partitioned" + NUM_OF_PARTITIONS + "_DIFF"); + + fillDepartmentTable(DEP_PART_TAB_DIFF); + + createDepartmentTable(DEP_PART_TAB_DIFF_NF, "template=partitioned" + NUM_OF_PARTITIONS + "_DIFF_NF"); + + fillDepartmentTable(DEP_PART_TAB_DIFF_NF); } /** @@ -73,4 +94,42 @@ public void testLeftJoinReplicatedPartitioned() { public void testRightJoinPartitionedReplicated() { checkRightJoinDepartmentEmployee(DEP_PART_TAB); } + + /** + * Check LEFT JOIN with collocated data of replicated and partitioned tables with different affinity. + * This test relies on having the same number of partitions in replicated and partitioned caches + */ + @Test + public void testLeftJoinReplicatedPartitionedDiffPartitionsErr() { + GridTestUtils.assertThrows(log, () -> checkLeftJoinEmployeeDepartment(DEP_PART_TAB_DIFF), IgniteException.class, + "only with the same partitions number configuration"); + } + + /** + * Check RIGHT JOIN with collocated data of partitioned and replicated tables with different affinity. + */ + @Test + public void testRightJoinPartitionedReplicatedDiffPartitionsErr() { + GridTestUtils.assertThrows(log, () -> checkRightJoinDepartmentEmployee(DEP_PART_TAB_DIFF), IgniteException.class, + "only with the same partitions number configuration"); + } + + /** + * Check LEFT JOIN with collocated data of replicated and partitioned tables with different node filter. + * This test relies on having the same number of partitions in replicated and partitioned caches + */ + @Test + public void testLeftJoinReplicatedPartitionedDiffNodeFilterErr() { + GridTestUtils.assertThrows(log, () -> checkLeftJoinEmployeeDepartment(DEP_PART_TAB_DIFF_NF), IgniteException.class, + "due to different node filters configuration"); + } + + /** + * Check RIGHT JOIN with collocated data of partitioned and replicated tables with different node filter. + */ + @Test + public void testRightJoinPartitionedReplicatedDiffNodeFilterErr() { + GridTestUtils.assertThrows(log, () -> checkRightJoinDepartmentEmployee(DEP_PART_TAB_DIFF_NF), IgniteException.class, + "due to different node filters configuration"); + } } diff --git a/modules/opencensus/pom.xml b/modules/opencensus/pom.xml index 0fd8897744820..185444c6fc332 100644 --- a/modules/opencensus/pom.xml +++ b/modules/opencensus/pom.xml @@ -100,14 +100,14 @@ io.opencensus opencensus-exporter-stats-prometheus ${opencensus.version} - test + runtime io.prometheus simpleclient_httpserver 0.3.0 - test + runtime diff --git a/modules/rest-http/README.txt b/modules/rest-http/README.txt index e7a855edb6796..9128a7caac390 100644 --- a/modules/rest-http/README.txt +++ b/modules/rest-http/README.txt @@ -4,8 +4,8 @@ Apache Ignite REST-HTTP Module Apache Ignite REST-HTTP module provides Jetty-based server which can be used to execute tasks and/or cache commands in grid using REST approach via HTTP protocol. -To enable REST-HTTP module when starting a standalone node, move 'optional/ignite-rest-http' folder to -'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will +To enable REST-HTTP module when starting a standalone node, move 'optional/ignite-rest-http' and 'optional/ignite-json' +folders to 'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will be added to classpath in this case. The module depends on third-party libraries that use the slf4j facade for logging. diff --git a/parent/pom.xml b/parent/pom.xml index d04beac9d297e..b044593560052 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -84,7 +84,7 @@ 1.0.0 1.0.6.Final 16.0.3 - 9.4.53.v20231009 + 9.4.55.v20240627 1.13 4.5.2 3.1.15 @@ -109,7 +109,7 @@ 3.1.2 9.0.63 0.8.3 - 3.8.3 + 3.8.4 1.5.5-5 0.31.1 3.9