diff --git a/database/src/main/postgres/runs/V1.9.12__get_partitioning.sql b/database/src/main/postgres/runs/V1.9.12__get_partitioning.sql new file mode 100644 index 000000000..3851b750a --- /dev/null +++ b/database/src/main/postgres/runs/V1.9.12__get_partitioning.sql @@ -0,0 +1,82 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE OR REPLACE FUNCTION runs.get_partitioning( + IN i_partitioning JSONB, + OUT status INTEGER, + OUT status_text TEXT, + OUT id BIGINT, + OUT o_partitioning JSONB, + OUT author TEXT +) RETURNS record AS +$$ +------------------------------------------------------------------------------- +-- +-- Function: runs.get_partitioning(1) +-- Retrieves a partitioning by its JSONB representation. +-- +-- Parameters: +-- i_partitioning - partitioning to search for, a valid example: +-- { +-- "keys": ["one", "two", "three"], +-- "version": 1, +-- "keysToValues": { +-- "one": "DatasetA", +-- "two": "Version1", +-- "three": "2022-12-20" +-- } +-- } +-- +-- Returns: +-- status - status of the operation: +-- status_text - textual representation of the status +-- id - ID of the partitioning +-- o_partitioning - partitioning data +-- author - author of the partitioning +-- +-- Status codes: +-- 11 - OK +-- 41 - Partitioning not found +-- +------------------------------------------------------------------------------- +BEGIN + -- Initialize status and status_text + status := 41; + status_text := 'Partitioning not found'; + + -- Retrieve partitioning ID + id := runs._get_id_partitioning(i_partitioning); + + -- If ID is found, retrieve partitioning details + IF id IS NOT NULL THEN + SELECT GPBI.id, GPBI.partitioning, GPBI.author + INTO get_partitioning.id, get_partitioning.o_partitioning, get_partitioning.author + FROM runs.get_partitioning_by_id(id) AS GPBI; + + -- Update status if partitioning is found + IF FOUND THEN + status := 11; + status_text := 'OK'; + END IF; + END IF; + + RETURN; +END; +$$ +LANGUAGE plpgsql IMMUTABLE SECURITY DEFINER; + +ALTER FUNCTION runs.get_partitioning(i_partitioning JSONB) OWNER TO atum_owner; +GRANT EXECUTE ON FUNCTION runs.get_partitioning(i_partitioning JSONB) TO atum_user; diff --git a/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningIntegrationTests.scala b/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningIntegrationTests.scala new file mode 100644 index 000000000..6d2ac96ca --- /dev/null +++ b/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningIntegrationTests.scala @@ -0,0 +1,117 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.database.runs + +import io.circe.Json +import za.co.absa.balta.DBTestSuite +import za.co.absa.balta.classes.JsonBString + +class GetPartitioningIntegrationTests extends DBTestSuite { + + private val fnCreatePartitioning = "runs.create_partitioning" + private val fnGetPartitioning = "runs.get_partitioning" + + private val partitioning1Value = + """ + |{ + | "version":1, + | "keys":["key1","key2","key3","key4"], + | "keysToValues":{ + | "key1":"valueX", + | "key2":"valueY", + | "key3":"valueZ", + | "key4":"valueA" + | } + |} + |""".stripMargin + + private val partitioning1 = JsonBString(partitioning1Value) + + private val partitioning2 = JsonBString( + """ + |{ + | "version":1, + | "keys":["key1","key2","key3","key4"], + | "keysToValues":{ + | "key1":"valueX", + | "key2":"valueX", + | "key3":"valueX", + | "key4":"valueX" + | } + |} + |""".stripMargin + ) + + test("Existing (correct) partitioning is returned") { + val partitioning1ID = function(fnCreatePartitioning) + .setParam("i_partitioning", partitioning1) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + row.getLong("id_partitioning").get + } + + function(fnCreatePartitioning) + .setParam("i_partitioning", partitioning2) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + } + + function(fnGetPartitioning) + .setParam("i_partitioning", partitioning1) + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("OK")) + assert(row.getLong("id").contains(partitioning1ID)) + assert { + val retrievedPartitioningAsJson = Json.fromString(row.getJsonB("o_partitioning").get.value) + val expectedPartitioningAsJson = Json.fromString(partitioning1Value) + retrievedPartitioningAsJson \\ "keysToValues" == expectedPartitioningAsJson \\ "keysToValues" && + retrievedPartitioningAsJson \\ "keys" == expectedPartitioningAsJson \\ "keys" + } + assert(row.getString("author").contains("Fantômas")) + assert(!queryResult.hasNext) + } + } + + test("Non-existent partitioning is not returned") { + function(fnGetPartitioning) + .setParam("i_partitioning", partitioning1) + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(41)) + assert(row.getString("status_text").contains("Partitioning not found")) + assert(row.getLong("id").isEmpty) + assert(row.getJsonB("o_partitioning").isEmpty) + assert(row.getString("author").isEmpty) + assert(!queryResult.hasNext) + } + } + +} diff --git a/server/src/main/scala/za/co/absa/atum/server/Constants.scala b/server/src/main/scala/za/co/absa/atum/server/Constants.scala deleted file mode 100644 index a2e6aa42c..000000000 --- a/server/src/main/scala/za/co/absa/atum/server/Constants.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2021 ABSA Group Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package za.co.absa.atum.server - -// TODO: to be removed when v2 endpoints are implemented, replaced by ApiPaths object -object Constants { - - object Endpoints { - - final val Api = "api" - final val V1 = "v1" - final val V2 = "v2" - - // todo to uppercase no hyphen for v1, backward compatibility - // todo - better API paths?? - final val CreatePartitioning = "create-partitioning" - final val CreateCheckpoint = "create-checkpoint" - - final val CreateOrUpdateAdditionalData = "upsert-additional-data" - - final val GetPartitioningCheckpoints = "get-partitioning-checkpoints" - final val GetFlowCheckpoints = "get-flow-checkpoints" - - final val Health = "health" - final val ZioMetrics = "zio-metrics" - } - - final val SwaggerApiName = "Atum API" - final val SwaggerApiVersion = "1.0" - -} diff --git a/server/src/main/scala/za/co/absa/atum/server/Main.scala b/server/src/main/scala/za/co/absa/atum/server/Main.scala index be3d7bc5a..b5157a48a 100644 --- a/server/src/main/scala/za/co/absa/atum/server/Main.scala +++ b/server/src/main/scala/za/co/absa/atum/server/Main.scala @@ -17,12 +17,12 @@ package za.co.absa.atum.server import za.co.absa.atum.server.api.controller._ -import za.co.absa.atum.server.api.database.flows.functions.{GetFlowCheckpoints, GetFlowPartitionings} +import za.co.absa.atum.server.api.database.flows.functions._ import za.co.absa.atum.server.api.database.{PostgresDatabaseProvider, TransactorProvider} import za.co.absa.atum.server.api.database.runs.functions._ import za.co.absa.atum.server.api.http.Server -import za.co.absa.atum.server.api.repository.{CheckpointRepositoryImpl, FlowRepositoryImpl, PartitioningRepositoryImpl} -import za.co.absa.atum.server.api.service.{CheckpointServiceImpl, FlowServiceImpl, PartitioningServiceImpl} +import za.co.absa.atum.server.api.repository._ +import za.co.absa.atum.server.api.service._ import za.co.absa.atum.server.aws.AwsSecretsProviderImpl import za.co.absa.atum.server.config.JvmMonitoringConfig import zio._ @@ -63,6 +63,7 @@ object Main extends ZIOAppDefault with Server { GetPartitioningCheckpointV2.layer, GetFlowCheckpoints.layer, GetPartitioningById.layer, + GetPartitioning.layer, GetFlowPartitionings.layer, PostgresDatabaseProvider.layer, TransactorProvider.layer, diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala index fd0e9720f..73669e78a 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala @@ -16,6 +16,7 @@ package za.co.absa.atum.server.api.controller +import io.circe.{Decoder, parser} import za.co.absa.atum.server.api.exception.ServiceError import za.co.absa.atum.server.api.exception.ServiceError._ import za.co.absa.atum.server.api.http.ApiPaths @@ -24,6 +25,8 @@ import za.co.absa.atum.server.model.SuccessResponse._ import za.co.absa.atum.server.model._ import zio._ +import java.util.Base64 + trait BaseController { def serviceCall[A, B]( @@ -71,4 +74,10 @@ trait BaseController { protected def createV2RootAnchoredResourcePath(parts: Seq[String]): IO[ErrorResponse, String] = { ZIO.succeed(s"/${ApiPaths.Api}/${ApiPaths.V2}/${parts.mkString("/")}") } + + protected def base64Decode[T: Decoder](base64EncodedString: String): Either[io.circe.Error, T] = { + val decodedBytes = Base64.getDecoder.decode(base64EncodedString) + val decodedString = new String(decodedBytes, "UTF-8") + parser.decode[T](decodedString) + } } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/FlowControllerImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/FlowControllerImpl.scala index a677232ea..2683b529a 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/FlowControllerImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/FlowControllerImpl.scala @@ -24,6 +24,7 @@ import zio._ class FlowControllerImpl(flowService: FlowService) extends FlowController with BaseController { + // to be replaced (and moved to checkpointcontroller) with new implementation in #233 override def getFlowCheckpointsV2( checkpointQueryDTO: CheckpointQueryDTO ): IO[ErrorResponse, MultiSuccessResponse[CheckpointDTO]] = { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala index 605e06140..f48991112 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala @@ -18,7 +18,7 @@ package za.co.absa.atum.server.api.controller import za.co.absa.atum.model.dto._ import za.co.absa.atum.server.model.ErrorResponse -import za.co.absa.atum.server.model.SuccessResponse.{MultiSuccessResponse, PaginatedResponse, SingleSuccessResponse} +import za.co.absa.atum.server.model.SuccessResponse._ import zio.IO import zio.macros.accessible @@ -41,12 +41,16 @@ trait PartitioningController { additionalDataPatchDTO: AdditionalDataPatchDTO ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataDTO]] - def getPartitioningV2(partitioningId: Long): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] + def getPartitioningByIdV2(partitioningId: Long): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] def getPartitioningMeasuresV2( partitioningId: Long ): IO[ErrorResponse, MultiSuccessResponse[MeasureDTO]] + def getPartitioning( + partitioning: String + ): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] + def getFlowPartitionings( flowId: Long, limit: Option[Int], diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala index c450405e8..ec84e0890 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala @@ -20,8 +20,8 @@ import za.co.absa.atum.model.dto._ import za.co.absa.atum.server.api.exception.ServiceError import za.co.absa.atum.server.api.http.ApiPaths.V2Paths import za.co.absa.atum.server.api.service.PartitioningService -import za.co.absa.atum.server.model.{ErrorResponse, InternalServerErrorResponse, PaginatedResult} -import za.co.absa.atum.server.model.SuccessResponse.{MultiSuccessResponse, PaginatedResponse, SingleSuccessResponse} +import za.co.absa.atum.server.model.SuccessResponse._ +import za.co.absa.atum.server.model.{ErrorResponse, GeneralErrorResponse, InternalServerErrorResponse, PaginatedResult} import zio._ class PartitioningControllerImpl(partitioningService: PartitioningService) @@ -59,12 +59,12 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) ) ) } - override def getPartitioningV2( + override def getPartitioningByIdV2( partitioningId: Long ): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] = { mapToSingleSuccessResponse( serviceCall[PartitioningWithIdDTO, PartitioningWithIdDTO]( - partitioningService.getPartitioning(partitioningId) + partitioningService.getPartitioningById(partitioningId) ) ) } @@ -119,6 +119,21 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) ) } + override def getPartitioning( + partitioning: String + ): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] = { + for { + decodedPartitions <- ZIO + .fromEither(base64Decode[PartitioningDTO](partitioning)) + .mapError(error => GeneralErrorResponse(error.getMessage)) + response <- + mapToSingleSuccessResponse[PartitioningWithIdDTO]( + serviceCall[PartitioningWithIdDTO, PartitioningWithIdDTO]( + partitioningService.getPartitioning(decodedPartitions) + ) + ) + } yield response + } } object PartitioningControllerImpl { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/flows/functions/GetFlowPartitionings.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/flows/functions/GetFlowPartitionings.scala index be56853dc..7e3555c9c 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/database/flows/functions/GetFlowPartitionings.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/flows/functions/GetFlowPartitionings.scala @@ -18,16 +18,18 @@ package za.co.absa.atum.server.api.database.flows.functions import doobie.implicits.toSqlInterpolator import io.circe.{DecodingFailure, Json} -import za.co.absa.atum.model.dto.{PartitioningDTO, PartitioningWithIdDTO} +import za.co.absa.atum.model.dto.{PartitionDTO, PartitioningWithIdDTO} import za.co.absa.atum.server.api.database.PostgresDatabaseProvider import za.co.absa.atum.server.api.database.flows.Flows import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings._ +import za.co.absa.atum.server.model.PartitioningForDB import za.co.absa.db.fadb.DBSchema import za.co.absa.db.fadb.doobie.DoobieEngine import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieMultipleResultFunctionWithAggStatus import za.co.absa.db.fadb.status.aggregation.implementations.ByFirstErrorStatusAggregator import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling import zio.{Task, URLayer, ZIO, ZLayer} + import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbGet import scala.annotation.tailrec @@ -62,10 +64,13 @@ object GetFlowPartitionings { else { val head = results.head val tail = results.tail - val decodingResult = head.partitioningJson.as[PartitioningDTO] + val decodingResult = head.partitioningJson.as[PartitioningForDB] decodingResult match { case Left(decodingFailure) => Left(decodingFailure) - case Right(partitioningDTO) => + case Right(partitioningForDB) => + val partitioningDTO = partitioningForDB.keys.map { key => + PartitionDTO(key, partitioningForDB.keysToValues(key)) + } resultsToPartitioningWithIdDTOs(tail, acc :+ PartitioningWithIdDTO(head.id, partitioningDTO, head.author)) } } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioning.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioning.scala new file mode 100644 index 000000000..c18ec5f86 --- /dev/null +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioning.scala @@ -0,0 +1,46 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.database.runs.functions + +import doobie.implicits.toSqlInterpolator +import io.circe.syntax.EncoderOps +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.api.database.runs.Runs +import za.co.absa.atum.server.model.{PartitioningForDB, PartitioningFromDB} +import za.co.absa.db.fadb.DBSchema +import za.co.absa.db.fadb.doobie.DoobieEngine +import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieSingleResultFunctionWithStatus +import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling +import zio._ + +import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbPut +import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbGet + +class GetPartitioning(implicit schema: DBSchema, dbEngine: DoobieEngine[Task]) + extends DoobieSingleResultFunctionWithStatus[PartitioningForDB, Option[PartitioningFromDB], Task](partitioningForDB => + Seq(fr"${partitioningForDB.asJson}") + ) with StandardStatusHandling { + override def fieldsToSelect: Seq[String] = super.fieldsToSelect ++ Seq("id", "o_partitioning", "author") + } + +object GetPartitioning { + val layer: URLayer[PostgresDatabaseProvider, GetPartitioning] = ZLayer { + for { + dbProvider <- ZIO.service[PostgresDatabaseProvider] + } yield new GetPartitioning()(Runs, dbProvider.dbEngine) + } +} diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/BaseEndpoints.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/BaseEndpoints.scala index 15919c240..06dc64fdd 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/BaseEndpoints.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/BaseEndpoints.scala @@ -23,7 +23,7 @@ import za.co.absa.atum.server.model._ import sttp.tapir.typelevel.MatchType import sttp.tapir.ztapir._ import sttp.tapir.{EndpointOutput, PublicEndpoint} -import za.co.absa.atum.server.Constants.Endpoints.{Api, V1, V2} +import za.co.absa.atum.server.api.http.ApiPaths._ import java.util.UUID diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala index 0f4febcc5..5740c0a5f 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala @@ -18,14 +18,13 @@ package za.co.absa.atum.server.api.http import sttp.model.StatusCode import sttp.tapir.generic.auto.schemaForCaseClass -import sttp.tapir.ztapir._ import sttp.tapir.json.circe.jsonBody +import sttp.tapir.ztapir._ import za.co.absa.atum.model.dto._ -import za.co.absa.atum.server.Constants.Endpoints._ import za.co.absa.atum.server.model.ErrorResponse -import za.co.absa.atum.server.model.SuccessResponse.{MultiSuccessResponse, PaginatedResponse, SingleSuccessResponse} +import za.co.absa.atum.server.model.SuccessResponse._ import sttp.tapir.{PublicEndpoint, Validator, endpoint} -import za.co.absa.atum.server.api.http.ApiPaths.{V1Paths, V2Paths} +import za.co.absa.atum.server.api.http.ApiPaths.{Health, ZioMetrics, _} import java.util.UUID @@ -78,7 +77,7 @@ trait Endpoints extends BaseEndpoints { } protected val getPartitioningAdditionalDataEndpointV2 - : PublicEndpoint[Long, ErrorResponse, SingleSuccessResponse[AdditionalDataDTO], Any] = { + : PublicEndpoint[Long, ErrorResponse, SingleSuccessResponse[AdditionalDataDTO], Any] = { apiV2.get .in(V2Paths.Partitionings / path[Long]("partitioningId") / V2Paths.AdditionalData) .out(statusCode(StatusCode.Ok)) @@ -87,9 +86,9 @@ trait Endpoints extends BaseEndpoints { } protected val patchPartitioningAdditionalDataEndpointV2 - : PublicEndpoint[(Long, AdditionalDataPatchDTO), ErrorResponse, SingleSuccessResponse[ - AdditionalDataDTO - ], Any] = { + : PublicEndpoint[(Long, AdditionalDataPatchDTO), ErrorResponse, SingleSuccessResponse[ + AdditionalDataDTO + ], Any] = { apiV2.patch .in(V2Paths.Partitionings / path[Long]("partitioningId") / V2Paths.AdditionalData) .in(jsonBody[AdditionalDataPatchDTO]) @@ -98,6 +97,18 @@ trait Endpoints extends BaseEndpoints { .errorOutVariantPrepend(notFoundErrorOneOfVariant) } + protected val getPartitioningEndpointV2 + : PublicEndpoint[String, ErrorResponse, SingleSuccessResponse[ + PartitioningWithIdDTO + ], Any] = { + apiV2.get + .in(V2Paths.Partitionings) + .in(query[String]("partitioning").description("base64 encoded json representation of partitioning")) + .out(statusCode(StatusCode.Ok)) + .out(jsonBody[SingleSuccessResponse[PartitioningWithIdDTO]]) + .errorOutVariantPrepend(notFoundErrorOneOfVariant) + } + protected val getPartitioningCheckpointEndpointV2 : PublicEndpoint[(Long, UUID), ErrorResponse, SingleSuccessResponse[CheckpointV2DTO], Any] = { apiV2.get @@ -121,16 +132,7 @@ trait Endpoints extends BaseEndpoints { .errorOutVariantPrepend(notFoundErrorOneOfVariant) } - protected val getFlowCheckpointsEndpointV2 - : PublicEndpoint[CheckpointQueryDTO, ErrorResponse, MultiSuccessResponse[CheckpointDTO], Any] = { - apiV2.post - .in(GetFlowCheckpoints) - .in(jsonBody[CheckpointQueryDTO]) - .out(statusCode(StatusCode.Ok)) - .out(jsonBody[MultiSuccessResponse[CheckpointDTO]]) - } - - protected val getPartitioningEndpointV2 + protected val getPartitioningByIdEndpointV2 : PublicEndpoint[Long, ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO], Any] = { apiV2.get .in(V2Paths.Partitionings / path[Long]("partitioningId")) @@ -140,7 +142,7 @@ trait Endpoints extends BaseEndpoints { } protected val getPartitioningMeasuresEndpointV2 - : PublicEndpoint[Long, ErrorResponse, MultiSuccessResponse[MeasureDTO], Any] = { + : PublicEndpoint[Long, ErrorResponse, MultiSuccessResponse[MeasureDTO], Any] = { apiV2.get .in(V2Paths.Partitionings / path[Long]("partitioningId") / V2Paths.Measures) .out(statusCode(StatusCode.Ok)) @@ -167,4 +169,5 @@ trait Endpoints extends BaseEndpoints { protected val healthEndpoint: PublicEndpoint[Unit, Unit, Unit, Any] = endpoint.get.in(Health) + } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala index a2456c697..e1605868b 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala @@ -25,11 +25,10 @@ import sttp.tapir.server.interceptor.metrics.MetricsRequestInterceptor import sttp.tapir.swagger.bundle.SwaggerInterpreter import sttp.tapir.ztapir._ import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO, CheckpointV2DTO, PartitioningWithIdDTO} -import za.co.absa.atum.server.Constants.{SwaggerApiName, SwaggerApiVersion} import za.co.absa.atum.server.api.controller.{CheckpointController, FlowController, PartitioningController} import za.co.absa.atum.server.config.{HttpMonitoringConfig, JvmMonitoringConfig} import za.co.absa.atum.server.model.ErrorResponse -import za.co.absa.atum.server.model.SuccessResponse.{PaginatedResponse, SingleSuccessResponse} +import za.co.absa.atum.server.model.SuccessResponse._ import zio._ import zio.interop.catz._ import zio.metrics.connectors.prometheus.PrometheusPublisher @@ -70,6 +69,7 @@ trait Routes extends Endpoints with ServerOptions { PartitioningController.patchPartitioningAdditionalDataV2(partitioningId, additionalDataPatchDTO) } ), + createServerEndpoint(getPartitioningEndpointV2, PartitioningController.getPartitioning), createServerEndpoint[ (Long, UUID), ErrorResponse, @@ -90,8 +90,7 @@ trait Routes extends Endpoints with ServerOptions { CheckpointController.getPartitioningCheckpoints(partitioningId, limit, offset, checkpointName) } ), - createServerEndpoint(getFlowCheckpointsEndpointV2, FlowController.getFlowCheckpointsV2), - createServerEndpoint(getPartitioningEndpointV2, PartitioningController.getPartitioningV2), + createServerEndpoint(getPartitioningByIdEndpointV2, PartitioningController.getPartitioningByIdV2), createServerEndpoint(getPartitioningMeasuresEndpointV2, PartitioningController.getPartitioningMeasuresV2), createServerEndpoint[ (Long, Option[Int], Option[Long]), @@ -120,12 +119,13 @@ trait Routes extends Endpoints with ServerOptions { patchPartitioningAdditionalDataEndpointV2, getPartitioningCheckpointsEndpointV2, getPartitioningCheckpointEndpointV2, - getFlowCheckpointsEndpointV2, + getPartitioningMeasuresEndpointV2, + getPartitioningEndpointV2, getPartitioningMeasuresEndpointV2, getFlowPartitioningsEndpointV2 ) ZHttp4sServerInterpreter[HttpEnv.Env](http4sServerOptions(None)) - .from(SwaggerInterpreter().fromEndpoints[HttpEnv.F](endpoints, SwaggerApiName, SwaggerApiVersion)) + .from(SwaggerInterpreter().fromEndpoints[HttpEnv.F](endpoints, "Atum API", "1.0")) .toRoutes } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala index 54ca4f700..cb4e42ad4 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala @@ -43,10 +43,14 @@ trait PartitioningRepository { additionalData: AdditionalDataPatchDTO ): IO[DatabaseError, AdditionalDataDTO] - def getPartitioning(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] + def getPartitioningById(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] def getPartitioningMeasuresById(partitioningId: Long): IO[DatabaseError, Seq[MeasureDTO]] + def getPartitioning( + partitioning: PartitioningDTO + ): IO[DatabaseError, PartitioningWithIdDTO] + def getFlowPartitionings( flowId: Long, limit: Option[Int], diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala index 37e6f81ef..ea8d7bbfc 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala @@ -17,13 +17,10 @@ package za.co.absa.atum.server.api.repository import za.co.absa.atum.model.dto._ -import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings -import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings.{ - GetFlowPartitioningsArgs, - GetFlowPartitioningsResult -} +import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings._ import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs import za.co.absa.atum.server.api.database.runs.functions._ +import za.co.absa.atum.server.api.database.flows.functions._ import za.co.absa.atum.server.api.exception.DatabaseError import za.co.absa.atum.server.model._ import zio._ @@ -31,15 +28,16 @@ import zio.interop.catz.asyncInstance import za.co.absa.atum.server.api.exception.DatabaseError.GeneralDatabaseError import za.co.absa.atum.server.model.PaginatedResult.{ResultHasMore, ResultNoMore} -class PartitioningRepositoryImpl( +class PartitioningRepositoryImpl ( createPartitioningIfNotExistsFn: CreatePartitioningIfNotExists, createPartitioningFn: CreatePartitioning, getPartitioningMeasuresFn: GetPartitioningMeasures, getPartitioningAdditionalDataFn: GetPartitioningAdditionalData, createOrUpdateAdditionalDataFn: CreateOrUpdateAdditionalData, - getPartitioningByIdFn: GetPartitioningById, getPartitioningAdditionalDataV2Fn: GetPartitioningAdditionalDataV2, + getPartitioningByIdFn: GetPartitioningById, getPartitioningMeasuresByIdFn: GetPartitioningMeasuresById, + getPartitioningFn: GetPartitioning, getFlowPartitioningsFn: GetFlowPartitionings ) extends PartitioningRepository with BaseRepository { @@ -95,17 +93,10 @@ class PartitioningRepositoryImpl( ).map(AdditionalDataItemFromDB.additionalDataFromDBItems) } - override def getPartitioning(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] = { - dbSingleResultCallWithStatus(getPartitioningByIdFn(partitioningId), "getPartitioningById") - .flatMap { - case Some(PartitioningFromDB(id, partitioning, author)) => - val decodingResult = partitioning.as[PartitioningDTO] - decodingResult.fold( - error => ZIO.fail(GeneralDatabaseError(s"Failed to decode JSON: $error")), - partitioningDTO => ZIO.succeed(PartitioningWithIdDTO(id, partitioningDTO, author)) - ) - case None => ZIO.fail(GeneralDatabaseError("Unexpected error.")) - } + override def getPartitioningById(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] = { + processPartitioningFromDBOptionIO( + dbSingleResultCallWithStatus(getPartitioningByIdFn(partitioningId), "getPartitioningById") + ) } override def getPartitioningMeasuresById(partitioningId: Long): IO[DatabaseError, Seq[MeasureDTO]] = { @@ -115,6 +106,35 @@ class PartitioningRepositoryImpl( }) } + override def getPartitioning( + partitioning: PartitioningDTO + ): IO[DatabaseError, PartitioningWithIdDTO] = { + processPartitioningFromDBOptionIO( + dbSingleResultCallWithStatus( + getPartitioningFn(PartitioningForDB.fromSeqPartitionDTO(partitioning)), + "getPartitioning" + ) + ) + } + + private def processPartitioningFromDBOptionIO( + partitioningFromDBOptionIO: IO[DatabaseError, Option[PartitioningFromDB]] + ): IO[DatabaseError, PartitioningWithIdDTO] = { + partitioningFromDBOptionIO.flatMap { + case Some(PartitioningFromDB(id, partitioning, author)) => + val decodingResult = partitioning.as[PartitioningForDB] + decodingResult.fold( + error => ZIO.fail(GeneralDatabaseError(s"Failed to decode JSON: $error")), + partitioningForDB => { + val partitioningDTO: PartitioningDTO = partitioningForDB.keys.map { key => + PartitionDTO(key, partitioningForDB.keysToValues(key)) + } + ZIO.succeed(PartitioningWithIdDTO(id, partitioningDTO, author)) + } + ) + case None => ZIO.fail(GeneralDatabaseError("Unexpected error.")) + } + } override def getFlowPartitionings( flowId: Long, limit: Option[Int], @@ -148,6 +168,7 @@ object PartitioningRepositoryImpl { with GetPartitioningAdditionalDataV2 with GetPartitioningById with GetPartitioningMeasuresById + with GetPartitioning with GetFlowPartitionings, PartitioningRepository ] = ZLayer { @@ -157,9 +178,10 @@ object PartitioningRepositoryImpl { getPartitioningMeasures <- ZIO.service[GetPartitioningMeasures] getPartitioningAdditionalData <- ZIO.service[GetPartitioningAdditionalData] createOrUpdateAdditionalData <- ZIO.service[CreateOrUpdateAdditionalData] - getPartitioningById <- ZIO.service[GetPartitioningById] getPartitioningAdditionalDataV2 <- ZIO.service[GetPartitioningAdditionalDataV2] - getPartitioningMeasuresV2 <- ZIO.service[GetPartitioningMeasuresById] + getPartitioningById <- ZIO.service[GetPartitioningById] + getPartitioningMeasuresById <- ZIO.service[GetPartitioningMeasuresById] + getPartitioning <- ZIO.service[GetPartitioning] getFlowPartitionings <- ZIO.service[GetFlowPartitionings] } yield new PartitioningRepositoryImpl( createPartitioningIfNotExists, @@ -167,10 +189,12 @@ object PartitioningRepositoryImpl { getPartitioningMeasures, getPartitioningAdditionalData, createOrUpdateAdditionalData, - getPartitioningById, getPartitioningAdditionalDataV2, - getPartitioningMeasuresV2, + getPartitioningById, + getPartitioningMeasuresById, + getPartitioning, getFlowPartitionings ) } + } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala index a9d03b798..f83d709e5 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala @@ -41,10 +41,14 @@ trait PartitioningService { additionalData: AdditionalDataPatchDTO ): IO[ServiceError, AdditionalDataDTO] - def getPartitioning(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] + def getPartitioningById(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] def getPartitioningMeasuresById(partitioningId: Long): IO[ServiceError, Seq[MeasureDTO]] + def getPartitioning( + partitioning: PartitioningDTO + ): IO[ServiceError, PartitioningWithIdDTO] + def getFlowPartitionings( flowId: Long, limit: Option[Int], diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala index 6d678e89d..4b34b9104 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala @@ -75,8 +75,8 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) ) } - override def getPartitioning(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] = { - repositoryCall(partitioningRepository.getPartitioning(partitioningId), "getPartitioning") + override def getPartitioningById(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] = { + repositoryCall(partitioningRepository.getPartitioningById(partitioningId), "getPartitioning") } override def getPartitioningMeasuresById(partitioningId: Long): IO[ServiceError, Seq[MeasureDTO]] = { @@ -96,6 +96,14 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) "getFlowPartitionings" ) } + override def getPartitioning( + partitioning: PartitioningDTO + ): IO[ServiceError, PartitioningWithIdDTO] = { + repositoryCall( + partitioningRepository.getPartitioning(partitioning), + "getPartitioning" + ) + } } object PartitioningServiceImpl { diff --git a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala index db8c3c390..5305a83f4 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala @@ -16,16 +16,16 @@ package za.co.absa.atum.server.api -import io.circe.{Json, parser} +import io.circe.parser +import io.circe.syntax.EncoderOps +import za.co.absa.atum.model.dto.MeasureResultDTO.TypedValue import za.co.absa.atum.model.dto._ +import za.co.absa.atum.model.{ResultValueType, dto} +import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings.GetFlowPartitioningsResult import za.co.absa.atum.server.model.{CheckpointFromDB, CheckpointItemFromDB, MeasureFromDB, PartitioningFromDB} import java.time.ZonedDateTime -import java.util.UUID -import MeasureResultDTO.TypedValue -import io.circe.syntax.EncoderOps -import za.co.absa.atum.model.ResultValueType -import za.co.absa.atum.server.api.database.flows.functions.GetFlowPartitionings.GetFlowPartitioningsResult +import java.util.{Base64, UUID} trait TestData { @@ -50,24 +50,19 @@ trait TestData { authorIfNew = "" ) - private val partitioningAsJson: Json = parser + private val partitioningAsJson = parser .parse( """ - |[ - | { - | "key": "key1", - | "value": "val1" - | }, - | { - | "key": "key2", - | "value": "val2" + |{ + | "version": 1, + | "keys": ["key1", "key2"], + | "keysToValues": { + | "key1": "val1", + | "key2": "val2" | } - |] + |} |""".stripMargin - ) - .getOrElse { - throw new Exception("Failed to parse JSON") - } + ).getOrElse(throw new Exception("Failed to parse JSON")) // Partitioning from the DB protected val partitioningFromDB1: PartitioningFromDB = PartitioningFromDB( @@ -378,4 +373,8 @@ trait TestData { AtumContextDTO(partitioningSubmitDTO.partitioning, measures, additionalData) } + protected def encodePartitioningDTO(partitioningDTO: PartitioningDTO): String = { + Base64.getUrlEncoder.encodeToString(partitioningDTO.asJson(dto.encodePartitioningDTO).noSpaces.getBytes("UTF-8")) + } + } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala index 315c21e76..69e3a9f02 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala @@ -62,11 +62,18 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { when(partitioningServiceMock.getPartitioningAdditionalDataV2(3L)) .thenReturn(ZIO.fail(NotFoundServiceError("not found"))) - when(partitioningServiceMock.getPartitioning(11L)) + when(partitioningServiceMock.getPartitioningById(11L)) .thenReturn(ZIO.succeed(partitioningWithIdDTO1)) - when(partitioningServiceMock.getPartitioning(22L)) + when(partitioningServiceMock.getPartitioningById(22L)) .thenReturn(ZIO.fail(NotFoundServiceError("not found"))) - when(partitioningServiceMock.getPartitioning(99L)) + when(partitioningServiceMock.getPartitioningById(99L)) + .thenReturn(ZIO.fail(GeneralServiceError("boom!"))) + + when(partitioningServiceMock.getPartitioning(partitioningDTO1)) + .thenReturn(ZIO.succeed(partitioningWithIdDTO1)) + when(partitioningServiceMock.getPartitioning(partitioningDTO2)) + .thenReturn(ZIO.fail(NotFoundServiceError("Partitioning not found"))) + when(partitioningServiceMock.getPartitioning(partitioningDTO3)) .thenReturn(ZIO.fail(GeneralServiceError("boom!"))) when(partitioningServiceMock.getFlowPartitionings(1L, Some(1), Some(0))) @@ -134,21 +141,21 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { ) } ), - suite("GetPartitioningSuite")( + suite("GetPartitioningByIdSuite")( test("Returns expected PartitioningWithIdDTO") { for { - result <- PartitioningController.getPartitioningV2(11L) + result <- PartitioningController.getPartitioningByIdV2(11L) expected = SingleSuccessResponse(partitioningWithIdDTO1, uuid1) actual = result.copy(requestId = uuid1) } yield assertTrue(actual == expected) }, test("Returns expected NotFoundErrorResponse") { - assertZIO(PartitioningController.getPartitioningV2(22L).exit)( + assertZIO(PartitioningController.getPartitioningByIdV2(22L).exit)( failsWithA[NotFoundErrorResponse] ) }, test("Returns expected InternalServerErrorResponse") { - assertZIO(PartitioningController.getPartitioningV2(99L).exit)( + assertZIO(PartitioningController.getPartitioningByIdV2(99L).exit)( failsWithA[InternalServerErrorResponse] ) } @@ -197,10 +204,30 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { failsWithA[NotFoundErrorResponse] ) } + ), + suite("GetPartitioningSuite")( + test("GetPartitioning - Returns expected PartitioningWithIdDTO") { + for { + result <- PartitioningController.getPartitioning(encodePartitioningDTO(partitioningDTO1)) + expected = SingleSuccessResponse(partitioningWithIdDTO1, uuid1) + actual = result.copy(requestId = uuid1) + } yield assertTrue(actual == expected) + }, + test("GetPartitioning - Returns expected NotFoundErrorResponse") { + assertZIO(PartitioningController.getPartitioning(encodePartitioningDTO(partitioningDTO2)).exit)( + failsWithA[NotFoundErrorResponse] + ) + }, + test("GetPartitioning - Returns expected InternalServerErrorResponse") { + assertZIO(PartitioningController.getPartitioning(encodePartitioningDTO(partitioningDTO3)).exit)( + failsWithA[InternalServerErrorResponse] + ) + } ) ).provide( PartitioningControllerImpl.layer, partitioningServiceMockLayer ) } + } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningIntegrationTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningIntegrationTests.scala new file mode 100644 index 000000000..1998fc66b --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningIntegrationTests.scala @@ -0,0 +1,46 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.database.runs.functions + +import za.co.absa.atum.model.dto.PartitionDTO +import za.co.absa.atum.server.ConfigProviderTest +import za.co.absa.atum.server.api.TestTransactorProvider +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.model.PartitioningForDB +import za.co.absa.db.fadb.exceptions.DataNotFoundException +import za.co.absa.db.fadb.status.FunctionStatus +import zio.{Scope, ZIO} +import zio.test.{Spec, TestEnvironment, assertTrue} +import zio.interop.catz.asyncInstance + +object GetPartitioningIntegrationTests extends ConfigProviderTest { + + override def spec: Spec[Unit with TestEnvironment with Scope, Any] = { + suite("GetPartitioningIntegrationTests")( + test("GetPartitioning returns DataNotFoundException when partitioning not found") { + for { + getPartitioningFn <- ZIO.service[GetPartitioning] + result <- getPartitioningFn(PartitioningForDB.fromSeqPartitionDTO(Seq.empty[PartitionDTO])) + } yield assertTrue(result == Left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) + } + ) + }.provide( + GetPartitioning.layer, + PostgresDatabaseProvider.layer, + TestTransactorProvider.layerWithRollback + ) +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala index b6f415332..88d749e33 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package za.co.absa.atum.server.api.database.runs.functions import za.co.absa.atum.server.ConfigProviderTest diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/GetFlowCheckpointsEndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/GetFlowCheckpointsEndpointUnitTests.scala deleted file mode 100644 index bb11a896e..000000000 --- a/server/src/test/scala/za/co/absa/atum/server/api/http/GetFlowCheckpointsEndpointUnitTests.scala +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2021 ABSA Group Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package za.co.absa.atum.server.api.http - -import org.mockito.Mockito.{mock, when} -import sttp.client3.testing.SttpBackendStub -import sttp.client3.{UriContext, basicRequest} -import sttp.client3.circe._ -import sttp.model.StatusCode -import sttp.tapir.server.stub.TapirStubInterpreter -import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} -import za.co.absa.atum.model.dto.CheckpointDTO -import za.co.absa.atum.server.api.TestData -import za.co.absa.atum.server.api.controller.FlowController -import za.co.absa.atum.server.model.{GeneralErrorResponse, InternalServerErrorResponse} -import za.co.absa.atum.server.model.SuccessResponse.MultiSuccessResponse -import zio._ -import zio.test.Assertion.equalTo -import zio.test._ - -object GetFlowCheckpointsEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { - - private val flowControllerMock = mock(classOf[FlowController]) - - when(flowControllerMock.getFlowCheckpointsV2(checkpointQueryDTO1)) - .thenReturn(ZIO.succeed(MultiSuccessResponse(Seq(checkpointDTO1, checkpointDTO2), uuid1))) - when(flowControllerMock.getFlowCheckpointsV2(checkpointQueryDTO2)) - .thenReturn(ZIO.fail(GeneralErrorResponse("error"))) - when(flowControllerMock.getFlowCheckpointsV2(checkpointQueryDTO3)) - .thenReturn(ZIO.fail(InternalServerErrorResponse("error"))) - - private val flowControllerMockLayer = ZLayer.succeed(flowControllerMock) - - private val getFlowCheckpointsServerEndpoint = - getFlowCheckpointsEndpointV2.zServerLogic(FlowController.getFlowCheckpointsV2) - - def spec: Spec[TestEnvironment with Scope, Any] = { - val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[FlowController])) - .whenServerEndpoint(getFlowCheckpointsServerEndpoint) - .thenRunLogic() - .backend() - - val request = basicRequest - .post(uri"https://test.com/api/v2/get-flow-checkpoints") - .response(asJson[MultiSuccessResponse[CheckpointDTO]]) - - suite("GetFlowCheckpointsEndpointSuite")( - test("Returns expected CheckpointDTO") { - val response = request - .body(checkpointQueryDTO1) - .send(backendStub) - - val body = response.map(_.body) - val statusCode = response.map(_.code) - - val expectedResult = MultiSuccessResponse(Seq(checkpointDTO1, checkpointDTO2), uuid1) - - assertZIO(body <&> statusCode)(equalTo(Right(expectedResult), StatusCode.Ok)) - }, - test("Returns expected BadRequest") { - val response = request - .body(checkpointQueryDTO2) - .send(backendStub) - - val statusCode = response.map(_.code) - - assertZIO(statusCode)(equalTo(StatusCode.BadRequest)) - }, - test("Returns expected InternalServerError") { - val response = request - .body(checkpointQueryDTO3) - .send(backendStub) - - val statusCode = response.map(_.code) - - assertZIO(statusCode)(equalTo(StatusCode.InternalServerError)) - } - ) - }.provide( - flowControllerMockLayer - ) - -} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningByIdEndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningByIdEndpointUnitTests.scala new file mode 100644 index 000000000..275c13e17 --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningByIdEndpointUnitTests.scala @@ -0,0 +1,90 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.http + +import org.mockito.Mockito.{mock, when} +import sttp.client3.testing.SttpBackendStub +import sttp.client3._ +import sttp.tapir.server.stub.TapirStubInterpreter +import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} +import sttp.client3.circe._ +import sttp.model.StatusCode +import za.co.absa.atum.model.dto.PartitioningWithIdDTO +import za.co.absa.atum.server.api.TestData +import za.co.absa.atum.server.api.controller.PartitioningController +import za.co.absa.atum.server.model.{InternalServerErrorResponse, NotFoundErrorResponse} +import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse +import zio.{Scope, ZIO, ZLayer} +import zio.test.{Spec, TestEnvironment, ZIOSpecDefault, assertTrue} + +object GetPartitioningByIdEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { + + private val partitioningControllerMock = mock(classOf[PartitioningController]) + + when(partitioningControllerMock.getPartitioningByIdV2(1L)) + .thenReturn(ZIO.succeed(SingleSuccessResponse(partitioningWithIdDTO1))) + when(partitioningControllerMock.getPartitioningByIdV2(2L)) + .thenReturn(ZIO.fail(InternalServerErrorResponse("error"))) + when(partitioningControllerMock.getPartitioningByIdV2(3L)) + .thenReturn(ZIO.fail(NotFoundErrorResponse("boom!"))) + + private val partitioningControllerMockLayer = ZLayer.succeed(partitioningControllerMock) + + private val getPartitioningServerEndpoint = + getPartitioningByIdEndpointV2.zServerLogic(PartitioningController.getPartitioningByIdV2) + + def spec: Spec[TestEnvironment with Scope, Any] = { + val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[PartitioningController])) + .whenServerEndpoint(getPartitioningServerEndpoint) + .thenRunLogic() + .backend() + + def createBasicRequest(id: Long): RequestT[Identity, Either[ResponseException[String, io.circe.Error], SingleSuccessResponse[PartitioningWithIdDTO]], Any] = { + basicRequest + .get(uri"https://test.com/api/v2/partitionings/$id") + .response(asJson[SingleSuccessResponse[PartitioningWithIdDTO]]) + } + + suite("GetPartitioningEndpointSuite")( + test("Returns expected PartitioningWithIdDTO") { + for { + response <- createBasicRequest(1L).send(backendStub) + body <- ZIO.fromEither(response.body) + statusCode = response.code + } yield { + assertTrue(body.data == SingleSuccessResponse(partitioningWithIdDTO1).data, statusCode == StatusCode.Ok) + } + }, + test("Returns expected general error") { + for { + response <- createBasicRequest(2L).send(backendStub) + statusCode = response.code + } yield { + assertTrue(statusCode == StatusCode.InternalServerError) + } + }, + test("Returns expected not found error") { + for { + response <- createBasicRequest(3L).send(backendStub) + statusCode = response.code + } yield { + assertTrue(statusCode == StatusCode.NotFound) + } + } + ) + }.provide(partitioningControllerMockLayer) +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningEndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningEndpointUnitTests.scala index 2fa7b0325..b6916f923 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningEndpointUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningEndpointUnitTests.scala @@ -16,75 +16,75 @@ package za.co.absa.atum.server.api.http +import io.circe import org.mockito.Mockito.{mock, when} +import sttp.client3.circe.asJson +import sttp.client3.{Identity, RequestT, ResponseException, UriContext, basicRequest} import sttp.client3.testing.SttpBackendStub -import sttp.client3._ +import sttp.model.StatusCode import sttp.tapir.server.stub.TapirStubInterpreter import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} -import sttp.client3.circe._ -import sttp.model.StatusCode -import za.co.absa.atum.model.dto.PartitioningWithIdDTO +import za.co.absa.atum.model.dto.{PartitioningDTO, PartitioningWithIdDTO} import za.co.absa.atum.server.api.TestData import za.co.absa.atum.server.api.controller.PartitioningController -import za.co.absa.atum.server.model.{InternalServerErrorResponse, NotFoundErrorResponse} +import za.co.absa.atum.server.model.NotFoundErrorResponse import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse +import zio.test.Assertion.equalTo import zio.{Scope, ZIO, ZLayer} -import zio.test.{Spec, TestEnvironment, ZIOSpecDefault, assertTrue} +import zio.test.{Spec, TestEnvironment, ZIOSpecDefault, assertZIO} object GetPartitioningEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { private val partitioningControllerMock = mock(classOf[PartitioningController]) - when(partitioningControllerMock.getPartitioningV2(1L)) - .thenReturn(ZIO.succeed(SingleSuccessResponse(partitioningWithIdDTO1))) - when(partitioningControllerMock.getPartitioningV2(2L)) - .thenReturn(ZIO.fail(InternalServerErrorResponse("error"))) - when(partitioningControllerMock.getPartitioningV2(3L)) - .thenReturn(ZIO.fail(NotFoundErrorResponse("boom!"))) + when(partitioningControllerMock.getPartitioning(encodePartitioningDTO(partitioningDTO1))) + .thenReturn(ZIO.succeed(SingleSuccessResponse(partitioningWithIdDTO1, uuid1))) + when(partitioningControllerMock.getPartitioning(encodePartitioningDTO(partitioningDTO2))) + .thenReturn(ZIO.fail(NotFoundErrorResponse("Partitioning not found"))) private val partitioningControllerMockLayer = ZLayer.succeed(partitioningControllerMock) private val getPartitioningServerEndpoint = - getPartitioningEndpointV2.zServerLogic(PartitioningController.getPartitioningV2) + getPartitioningEndpointV2.zServerLogic(PartitioningController.getPartitioning) - def spec: Spec[TestEnvironment with Scope, Any] = { + override def spec: Spec[TestEnvironment with Scope, Any] = { val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[PartitioningController])) .whenServerEndpoint(getPartitioningServerEndpoint) .thenRunLogic() .backend() - def createBasicRequest(id: Long): RequestT[Identity, Either[ResponseException[String, io.circe.Error], SingleSuccessResponse[PartitioningWithIdDTO]], Any] = { - basicRequest - .get(uri"https://test.com/api/v2/partitionings/$id") - .response(asJson[SingleSuccessResponse[PartitioningWithIdDTO]]) - } - suite("GetPartitioningEndpointSuite")( - test("Returns expected PartitioningWithIdDTO") { - for { - response <- createBasicRequest(1L).send(backendStub) - body <- ZIO.fromEither(response.body) - statusCode = response.code - } yield { - assertTrue(body.data == SingleSuccessResponse(partitioningWithIdDTO1).data, statusCode == StatusCode.Ok) - } - }, - test("Returns expected general error") { - for { - response <- createBasicRequest(2L).send(backendStub) - statusCode = response.code - } yield { - assertTrue(statusCode == StatusCode.InternalServerError) - } + test("Returns expected SingleSuccessResponse[PartitioningWithIdDTO]") { + val response = getRequestForPartitioningDTO(partitioningDTO1).send(backendStub) + + val body = response.map(_.body) + val statusCode = response.map(_.code) + + val expectedResult = SingleSuccessResponse(partitioningWithIdDTO1, uuid1) + + assertZIO(body <&> statusCode)(equalTo(Right(expectedResult), StatusCode.Ok)) }, - test("Returns expected not found error") { - for { - response <- createBasicRequest(3L).send(backendStub) - statusCode = response.code - } yield { - assertTrue(statusCode == StatusCode.NotFound) - } + test("Returns NotFoundErrorResponse") { + val response = getRequestForPartitioningDTO(partitioningDTO2).send(backendStub) + + val statusCode = response.map(_.code) + + assertZIO(statusCode)(equalTo(StatusCode.NotFound)) } ) + }.provide(partitioningControllerMockLayer) + + private def getRequestForPartitioningDTO(partitioningDTO: PartitioningDTO): RequestT[Identity, Either[ + ResponseException[String, circe.Error], + SingleSuccessResponse[PartitioningWithIdDTO] + ], Any] = { + val baseUrl = uri"https://test.com/api/v2/partitionings" + val encodedPartitioning = encodePartitioningDTO(partitioningDTO) + + basicRequest + .get(baseUrl.addParam("partitioning", encodedPartitioning)) + .response(asJson[SingleSuccessResponse[PartitioningWithIdDTO]]) + } + } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala index 23e147aaa..455dd42d3 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala @@ -32,7 +32,7 @@ import zio._ import zio.interop.catz.asyncInstance import zio.test.Assertion.failsWithA import zio.test._ -import za.co.absa.atum.server.model.{AdditionalDataFromDB, AdditionalDataItemFromDB, PaginatedResult} +import za.co.absa.atum.server.model.{AdditionalDataFromDB, AdditionalDataItemFromDB, PartitioningForDB} object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { @@ -90,7 +90,7 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { Seq(Row(FunctionStatus(0, "success"), measureFromDB1), Row(FunctionStatus(0, "success"), measureFromDB2)) ) ) - when(getPartitioningMeasuresMock.apply(partitioningDTO2)).thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(getPartitioningMeasuresMock.apply(partitioningDTO2)).thenReturn(ZIO.fail(new Exception("boom!"))) private val getPartitioningMeasuresMockLayer = ZLayer.succeed(getPartitioningMeasuresMock) @@ -99,21 +99,21 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { when(getPartitioningAdditionalDataMock.apply(partitioningDTO1)) .thenReturn(ZIO.right(Seq(Row(FunctionStatus(0, "success"), AdditionalDataFromDB(Some("key"), Some("value")))))) - when(getPartitioningAdditionalDataMock.apply(partitioningDTO2)).thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(getPartitioningAdditionalDataMock.apply(partitioningDTO2)).thenReturn(ZIO.fail(new Exception("boom!"))) private val getPartitioningAdditionalDataMockLayer = ZLayer.succeed(getPartitioningAdditionalDataMock) // Get Partitioning By Id Mocks - private val getPartitioningByIdMock = mock(classOf[GetPartitioningById]) + private val getPartitioningByIdByIdMock = mock(classOf[GetPartitioningById]) - when(getPartitioningByIdMock.apply(1111L)) + when(getPartitioningByIdByIdMock.apply(1111L)) .thenReturn(ZIO.right(Row(FunctionStatus(11, "OK"), Some(partitioningFromDB1)))) - when(getPartitioningByIdMock.apply(9999L)) + when(getPartitioningByIdByIdMock.apply(9999L)) .thenReturn(ZIO.fail(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) - when(getPartitioningByIdMock.apply(8888L)) - .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(getPartitioningByIdByIdMock.apply(8888L)) + .thenReturn(ZIO.fail(new Exception("boom!"))) - private val getPartitioningByIdMockLayer = ZLayer.succeed(getPartitioningByIdMock) + private val getPartitioningByIdByIdMockLayer = ZLayer.succeed(getPartitioningByIdByIdMock) // GetPartitioningAdditionalDataV2 private val getPartitioningAdditionalDataV2Mock = mock(classOf[GetPartitioningAdditionalDataV2]) @@ -121,7 +121,7 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { when(getPartitioningAdditionalDataV2Mock.apply(1L)).thenReturn( ZIO.right(Seq(Row(FunctionStatus(0, "success"), Some(AdditionalDataItemFromDB("key", Some("value"), "author"))))) ) - when(getPartitioningAdditionalDataV2Mock.apply(2L)).thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(getPartitioningAdditionalDataV2Mock.apply(2L)).thenReturn(ZIO.fail(new Exception("boom!"))) when(getPartitioningAdditionalDataV2Mock.apply(3L)).thenReturn( ZIO.left(DataNotFoundException(FunctionStatus(41, "not found"))) ) @@ -137,10 +137,21 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) when(getPartitioningMeasuresV2Mock.apply(3L)) .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(42, "Measures not found")))) - when(getPartitioningMeasuresV2Mock.apply(4L)).thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(getPartitioningMeasuresV2Mock.apply(4L)).thenReturn(ZIO.fail(new Exception("boom!"))) private val getPartitioningMeasuresV2MockLayer = ZLayer.succeed(getPartitioningMeasuresV2Mock) + private val getPartitioningMock = mock(classOf[GetPartitioning]) + + when(getPartitioningMock.apply(PartitioningForDB.fromSeqPartitionDTO(partitioningDTO1))) + .thenReturn(ZIO.right(Row(FunctionStatus(11, "OK"), Some(partitioningFromDB1)))) + when(getPartitioningMock.apply(PartitioningForDB.fromSeqPartitionDTO(partitioningDTO2))) + .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) + when(getPartitioningMock.apply(PartitioningForDB.fromSeqPartitionDTO(partitioningDTO3))) + .thenReturn(ZIO.fail(new Exception("boom!"))) + + private val getPartitioningMockLayer = ZLayer.succeed(getPartitioningMock) + private val getFlowPartitioningsMock = mock(classOf[GetFlowPartitionings]) when(getFlowPartitioningsMock.apply(GetFlowPartitioningsArgs(1L, Some(10), Some(0))) @@ -271,22 +282,22 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { } ), suite("GetPartitioningByIdSuite")( - test("Returns expected PartitioningWithIdDTO") { + test("GetPartitioningById - Returns expected PartitioningWithIdDTO") { for { - result <- PartitioningRepository.getPartitioning(1111L) + result <- PartitioningRepository.getPartitioningById(1111L) } yield assertTrue(result == partitioningWithIdDTO1) }, - test("Returns expected DataNotFoundException") { + test("GetPartitioningById - Returns expected DataNotFoundException") { for { - result <- PartitioningRepository.getPartitioning(9999L).exit + result <- PartitioningRepository.getPartitioningById(9999L).exit } yield assertTrue( result == Exit.fail( NotFoundDatabaseError("Exception caused by operation: 'getPartitioningById': (41) Partitioning not found") ) ) }, - test("Returns expected GeneralDatabaseError") { - assertZIO(PartitioningRepository.getPartitioning(8888L).exit)( + test("GetPartitioningById - Returns expected GeneralDatabaseError") { + assertZIO(PartitioningRepository.getPartitioningById(8888L).exit)( failsWithA[GeneralDatabaseError] ) } @@ -334,19 +345,37 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { failsWithA[GeneralDatabaseError] ) } + ), + suite("GetPartitioningSuite")( + test("GetPartitioning - Returns expected PartitioningWithIdDTO") { + for { + result <- PartitioningRepository.getPartitioning(partitioningDTO1) + } yield assertTrue(result == partitioningWithIdDTO1) + }, + test("GetPartitioning - Returns expected NotFoundDatabaseError") { + assertZIO(PartitioningRepository.getPartitioning(partitioningDTO2).exit)( + failsWithA[NotFoundDatabaseError] + ) + }, + test("GetPartitioning - Returns expected GeneralDatabaseError") { + assertZIO(PartitioningRepository.getPartitioning(partitioningDTO3).exit)( + failsWithA[GeneralDatabaseError] + ) + } ) - ).provide( - PartitioningRepositoryImpl.layer, - createPartitioningIfNotExistsMockLayer, - createPartitioningMockLayer, - getPartitioningMeasuresMockLayer, - getPartitioningAdditionalDataMockLayer, - createOrUpdateAdditionalDataMockLayer, - getPartitioningByIdMockLayer, - getPartitioningAdditionalDataV2MockLayer, - getPartitioningMeasuresV2MockLayer, - getFlowPartitioningsMockLayer ) - } + }.provide( + PartitioningRepositoryImpl.layer, + createPartitioningIfNotExistsMockLayer, + createPartitioningMockLayer, + getPartitioningMeasuresMockLayer, + getPartitioningAdditionalDataMockLayer, + createOrUpdateAdditionalDataMockLayer, + getPartitioningByIdByIdMockLayer, + getPartitioningAdditionalDataV2MockLayer, + getPartitioningMockLayer, + getPartitioningMeasuresV2MockLayer, + getFlowPartitioningsMockLayer + ) } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala index 3e647d5a1..405a9749d 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala @@ -67,10 +67,10 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { when(partitioningRepositoryMock.getPartitioningAdditionalDataV2(2L)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) - when(partitioningRepositoryMock.getPartitioning(1111L)).thenReturn(ZIO.succeed(partitioningWithIdDTO1)) - when(partitioningRepositoryMock.getPartitioning(9999L)) + when(partitioningRepositoryMock.getPartitioningById(1111L)).thenReturn(ZIO.succeed(partitioningWithIdDTO1)) + when(partitioningRepositoryMock.getPartitioningById(9999L)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) - when(partitioningRepositoryMock.getPartitioning(8888L)) + when(partitioningRepositoryMock.getPartitioningById(8888L)) .thenReturn(ZIO.fail(NotFoundDatabaseError("Partitioning not found"))) when(partitioningRepositoryMock.getPartitioningMeasuresById(1L)) @@ -80,6 +80,13 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { when(partitioningRepositoryMock.getPartitioningMeasuresById(3L)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(partitioningRepositoryMock.getPartitioning(partitioningDTO1)) + .thenReturn(ZIO.succeed(partitioningWithIdDTO1)) + when(partitioningRepositoryMock.getPartitioning(partitioningDTO2)) + .thenReturn(ZIO.fail(NotFoundDatabaseError("Partitioning not found"))) + when(partitioningRepositoryMock.getPartitioning(partitioningDTO3)) + .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(partitioningRepositoryMock.getFlowPartitionings(1L, Some(1), Some(1L))) .thenReturn(ZIO.succeed(ResultHasMore(Seq(partitioningWithIdDTO1)))) when(partitioningRepositoryMock.getFlowPartitionings(2L, Some(1), Some(1L))) @@ -196,17 +203,17 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { suite("GetPartitioningByIDSuite")( test("Returns expected Right with PartitioningWithIdDTO") { for { - result <- PartitioningService.getPartitioning(1111L) + result <- PartitioningService.getPartitioningById(1111L) } yield assertTrue(result == partitioningWithIdDTO1) }, test("Returns expected GeneralDatabaseError") { for { - result <- PartitioningService.getPartitioning(9999L).exit + result <- PartitioningService.getPartitioningById(9999L).exit } yield assertTrue(result == Exit.fail(GeneralServiceError("Failed to perform 'getPartitioning': boom!"))) }, test("Returns expected NotFoundDatabaseError") { for { - result <- PartitioningService.getPartitioning(8888L).exit + result <- PartitioningService.getPartitioningById(8888L).exit } yield assertTrue( result == Exit.fail(NotFoundServiceError("Failed to perform 'getPartitioning': Partitioning not found")) ) @@ -229,6 +236,25 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { ) } ), + suite("GetPartitioningSuite")( + test("GetPartitioning - Returns expected Right with PartitioningWithIdDTO") { + for { + result <- PartitioningService.getPartitioning(partitioningDTO1) + } yield assertTrue(result == partitioningWithIdDTO1) + }, + test("GetPartitioning - Returns expected NotFoundServiceError") { + for { + result <- PartitioningService.getPartitioning(partitioningDTO2).exit + } yield assertTrue( + result == Exit.fail(NotFoundServiceError("Failed to perform 'getPartitioning': Partitioning not found")) + ) + }, + test("GetPartitioning - Returns expected GeneralServiceError") { + assertZIO(PartitioningService.getPartitioning(partitioningDTO3).exit)( + failsWithA[GeneralServiceError] + ) + } + ), suite("GetFlowPartitioningsSuite")( test("Returns expected Right with ResultHasMore[PartitioningWithIdDTO]") { for {