diff --git a/database/src/main/postgres/runs/V1.9.7__create_partitioning.sql b/database/src/main/postgres/runs/V1.9.7__create_partitioning.sql new file mode 100644 index 000000000..0b5ae74c9 --- /dev/null +++ b/database/src/main/postgres/runs/V1.9.7__create_partitioning.sql @@ -0,0 +1,93 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE OR REPLACE FUNCTION runs.create_partitioning( + IN i_partitioning JSONB, + IN i_by_user TEXT, + IN i_parent_partitioning_id BIGINT = NULL, + OUT status INTEGER, + OUT status_text TEXT, + OUT id_partitioning BIGINT +) RETURNS record AS +$$ +------------------------------------------------------------------------------- +-- +-- Function: runs.create_partitioning(3) +-- Creates a partitioning entry +-- +-- Parameters: +-- i_partitioning - partitioning to create or which existence to check +-- i_by_user - user behind the change +-- i_parent_partitioning_id - (optional) parent partitioning id +-- +-- Returns: +-- status - Status code +-- status_text - Status text +-- id_partitioning - id of the partitioning +-- +-- Status codes: +-- 11 - Partitioning created +-- 12 - Partitioning created with parent partitioning +-- 31 - Partitioning already exists +-- 41 - Parent partitioning not found +-- +------------------------------------------------------------------------------- +BEGIN + id_partitioning := runs._get_id_partitioning(i_partitioning, true); + + IF id_partitioning IS NOT NULL THEN + status := 31; + status_text := 'Partitioning already exists'; + RETURN; + END IF; + + IF i_parent_partitioning_id IS NOT NULL THEN + PERFORM 1 FROM runs.partitionings P WHERE P.id_partitioning = i_parent_partitioning_id; + IF NOT FOUND THEN + status := 41; + status_text := 'Parent partitioning not found'; + RETURN; + END IF; + END IF; + + INSERT INTO runs.partitionings (partitioning, created_by) + VALUES (i_partitioning, i_by_user) + RETURNING partitionings.id_partitioning INTO create_partitioning.id_partitioning; + + PERFORM 1 FROM flows._create_flow(id_partitioning, i_by_user); + status := 11; + status_text := 'Partitioning created'; + + IF i_parent_partitioning_id IS NOT NULL THEN + PERFORM 1 FROM flows._add_to_parent_flows(i_parent_partitioning_id, id_partitioning, i_by_user); + + -- copying measure definitions to establish continuity + INSERT INTO runs.measure_definitions(fk_partitioning, measure_name, measured_columns, created_by, created_at) + SELECT id_partitioning, CMD.measure_name, CMD.measured_columns, CMD.created_by, CMD.created_at + FROM runs.measure_definitions CMD + WHERE CMD.fk_partitioning = i_parent_partitioning_id; + + status := 12; + status_text := 'Partitioning created with parent partitioning'; + END IF; + + RETURN; +END; +$$ +LANGUAGE plpgsql VOLATILE SECURITY DEFINER; + +ALTER FUNCTION runs.create_partitioning(JSONB, TEXT, BIGINT) OWNER TO atum_owner; +GRANT EXECUTE ON FUNCTION runs.create_partitioning(JSONB, TEXT, BIGINT) TO atum_user; diff --git a/database/src/test/scala/za/co/absa/atum/database/runs/CreatePartitioningIntegrationTests.scala b/database/src/test/scala/za/co/absa/atum/database/runs/CreatePartitioningIntegrationTests.scala new file mode 100644 index 000000000..b9297d4a2 --- /dev/null +++ b/database/src/test/scala/za/co/absa/atum/database/runs/CreatePartitioningIntegrationTests.scala @@ -0,0 +1,191 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.database.runs + +import za.co.absa.balta.DBTestSuite +import za.co.absa.balta.classes.JsonBString + +class CreatePartitioningIntegrationTests extends DBTestSuite{ + + private val fncCreatePartitioning = "runs.create_partitioning" + + private val partitioning = JsonBString( + """ + |{ + | "version": 1, + | "keys": ["key1", "key3", "key2", "key4"], + | "keysToValues": { + | "key1": "valueX", + | "key2": "valueY", + | "key3": "valueZ", + | "key4": "valueA" + | } + |} + |""".stripMargin + ) + + private val parentPartitioning = JsonBString( + """ + |{ + | "version": 1, + | "keys": ["key1", "key3"], + | "keysToValues": { + | "key1": "valueX", + | "key3": "valueZ" + | } + |} + |""".stripMargin + ) + + test("Partitioning created") { + val partitioningID = function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + row.getLong("id_partitioning").get + } + + table("runs.partitionings").where(add("id_partitioning", partitioningID)) {partitioningResult => + val row = partitioningResult.next() + assert(row.getString("created_by").contains("Fantômas")) + assert(row.getOffsetDateTime("created_at").contains(now())) + } + + val idFlow = table("flows.partitioning_to_flow").where(add("fk_partitioning", partitioningID)) { partToFlowResult => + assert(partToFlowResult.hasNext) + val partToFlowRow = partToFlowResult.next() + val result = partToFlowRow.getLong("fk_flow") + assert(partToFlowRow.getString("created_by").contains("Fantômas")) + assert(!partToFlowResult.hasNext) + result.get + } + + table("flows.flows").where(add("id_flow", idFlow)) {flowsResult => + assert(flowsResult.hasNext) + val flowRow = flowsResult.next() + assert(flowRow.getString("flow_name").exists(_.startsWith("Custom flow #"))) + assert(flowRow.getString("flow_description").contains("")) + assert(flowRow.getBoolean("from_pattern").contains(false)) + assert(flowRow.getString("created_by").contains("Fantômas")) + assert(flowRow.getOffsetDateTime("created_at").contains(now())) + assert(!flowsResult.hasNext) + } + } + + test("Partitioning created with parent partitioning that already exists") { + val parentPartitioningID = function(fncCreatePartitioning) + .setParam("i_partitioning", parentPartitioning) + .setParam("i_by_user", "Albert Einstein") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + row.getLong("id_partitioning").get + } + + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", parentPartitioningID)) == 1 + ) + val partitioningID = function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParam("i_parent_partitioning_id", parentPartitioningID) + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(12)) + assert(row.getString("status_text").contains("Partitioning created with parent partitioning")) + row.getLong("id_partitioning").get + } + + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", parentPartitioningID)) == 1 + ) + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", partitioningID)) == 2 + ) + } + + test("Partitioning already exists") { + val partitioningID = function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + row.getLong("id_partitioning").get + } + + function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(31)) + assert(row.getString("status_text").contains("Partitioning already exists")) + assert(row.getLong("id_partitioning").contains(partitioningID)) + } + + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", partitioningID)) == 1 + ) + } + + test("Partitioning exists, parent is not added") { + val partitioningID = function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParamNull("i_parent_partitioning_id") + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Partitioning created")) + row.getLong("id_partitioning").get + } + + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", partitioningID)) == 1 + ) + + function(fncCreatePartitioning) + .setParam("i_partitioning", partitioning) + .setParam("i_by_user", "Fantômas") + .setParam("i_parent_partitioning_id", 123456789L) + .execute { queryResult => + assert(queryResult.hasNext) + val row = queryResult.next() + assert(row.getInt("status").contains(31)) + assert(row.getString("status_text").contains("Partitioning already exists")) + } + + assert( + table("flows.partitioning_to_flow").count(add("fk_partitioning", partitioningID)) == 1 + ) + } +} diff --git a/model/src/main/scala/za/co/absa/atum/model/dto/PartitioningSubmitV2DTO.scala b/model/src/main/scala/za/co/absa/atum/model/dto/PartitioningSubmitV2DTO.scala new file mode 100644 index 000000000..0b48b53a2 --- /dev/null +++ b/model/src/main/scala/za/co/absa/atum/model/dto/PartitioningSubmitV2DTO.scala @@ -0,0 +1,31 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.model.dto + +import io.circe.generic.semiauto._ +import io.circe._ + +case class PartitioningSubmitV2DTO( + partitioning: PartitioningDTO, + parentPartitioningId: Option[Long], + author: String +) + +object PartitioningSubmitV2DTO { + implicit val decodePartitioningSubmitV2DTO: Decoder[PartitioningSubmitV2DTO] = deriveDecoder + implicit val encodePartitioningSubmitV2DTO: Encoder[PartitioningSubmitV2DTO] = deriveEncoder +} diff --git a/server/src/main/scala/za/co/absa/atum/server/Main.scala b/server/src/main/scala/za/co/absa/atum/server/Main.scala index 01360ea69..cd60a204d 100644 --- a/server/src/main/scala/za/co/absa/atum/server/Main.scala +++ b/server/src/main/scala/za/co/absa/atum/server/Main.scala @@ -51,6 +51,7 @@ object Main extends ZIOAppDefault with Server { CheckpointRepositoryImpl.layer, FlowRepositoryImpl.layer, CreatePartitioningIfNotExists.layer, + CreatePartitioning.layer, GetPartitioningMeasures.layer, GetPartitioningMeasuresById.layer, GetPartitioningAdditionalData.layer, diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala index 12f505c35..ac8d057f1 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala @@ -28,9 +28,9 @@ trait PartitioningController { partitioningSubmitDTO: PartitioningSubmitDTO ): IO[ErrorResponse, AtumContextDTO] - def createPartitioningIfNotExistsV2( - partitioningSubmitDTO: PartitioningSubmitDTO - ): IO[ErrorResponse, SingleSuccessResponse[AtumContextDTO]] + def postPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[ErrorResponse, (SingleSuccessResponse[PartitioningWithIdDTO], String)] def getPartitioningAdditionalDataV2( partitioningId: Long diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala index 998d422b3..cdb457e8c 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala @@ -18,6 +18,7 @@ package za.co.absa.atum.server.api.controller import za.co.absa.atum.model.dto._ import za.co.absa.atum.server.api.exception.ServiceError +import za.co.absa.atum.server.api.http.ApiPaths.V2Paths import za.co.absa.atum.server.api.service.PartitioningService import za.co.absa.atum.server.model.{ErrorResponse, InternalServerErrorResponse} import za.co.absa.atum.server.model.SuccessResponse.{MultiSuccessResponse, SingleSuccessResponse} @@ -49,12 +50,6 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) atumContextDTOEffect } - override def createPartitioningIfNotExistsV2( - partitioningSubmitDTO: PartitioningSubmitDTO - ): IO[ErrorResponse, SingleSuccessResponse[AtumContextDTO]] = { - mapToSingleSuccessResponse(createPartitioningIfNotExistsV1(partitioningSubmitDTO)) - } - override def getPartitioningCheckpointsV2( checkpointQueryDTO: CheckpointQueryDTO ): IO[ErrorResponse, MultiSuccessResponse[CheckpointDTO]] = { @@ -84,6 +79,21 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) ) } + override def postPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[ErrorResponse, (SingleSuccessResponse[PartitioningWithIdDTO], String)] = { + for { + response <- mapToSingleSuccessResponse( + serviceCall[PartitioningWithIdDTO, PartitioningWithIdDTO]( + partitioningService.createPartitioning(partitioningSubmitDTO) + ) + ) + uri <- createV2RootAnchoredResourcePath( + Seq(V2Paths.Partitionings, response.data.id.toString) + ) + } yield (response, uri) + } + override def patchPartitioningAdditionalDataV2( partitioningId: Long, additionalDataPatchDTO: AdditionalDataPatchDTO diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioning.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioning.scala new file mode 100644 index 000000000..471693768 --- /dev/null +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioning.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.database.runs.functions + +import doobie.implicits.toSqlInterpolator +import io.circe.syntax.EncoderOps +import za.co.absa.atum.model.dto.PartitioningSubmitV2DTO +import za.co.absa.db.fadb.DBSchema +import za.co.absa.db.fadb.doobie.DoobieEngine +import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieSingleResultFunctionWithStatus +import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.api.database.runs.Runs +import za.co.absa.atum.server.model.PartitioningForDB +import zio._ +import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbPut + +class CreatePartitioning(implicit schema: DBSchema, dbEngine: DoobieEngine[Task]) + extends DoobieSingleResultFunctionWithStatus[PartitioningSubmitV2DTO, Long, Task](partitioningSubmit => + Seq( + fr"${PartitioningForDB.fromSeqPartitionDTO(partitioningSubmit.partitioning).asJson}", + fr"${partitioningSubmit.author}", + fr"${partitioningSubmit.parentPartitioningId}" + ) + ) + with StandardStatusHandling { + override def fieldsToSelect: Seq[String] = super.fieldsToSelect ++ Seq("id_partitioning") +} + +object CreatePartitioning { + val layer: URLayer[PostgresDatabaseProvider, CreatePartitioning] = ZLayer { + for { + dbProvider <- ZIO.service[PostgresDatabaseProvider] + } yield new CreatePartitioning()(Runs, dbProvider.dbEngine) + } +} diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIfNotExists.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIfNotExists.scala index a9f1e8b13..d39fa6c16 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIfNotExists.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIfNotExists.scala @@ -17,27 +17,25 @@ package za.co.absa.atum.server.api.database.runs.functions import doobie.implicits.toSqlInterpolator +import io.circe.syntax.EncoderOps import za.co.absa.atum.model.dto.PartitioningSubmitDTO -import za.co.absa.atum.server.model.PartitioningForDB import za.co.absa.db.fadb.DBSchema import za.co.absa.db.fadb.doobie.DoobieEngine import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieSingleResultFunctionWithStatus import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling import za.co.absa.atum.server.api.database.PostgresDatabaseProvider import za.co.absa.atum.server.api.database.runs.Runs +import za.co.absa.atum.server.model.PartitioningForDB import zio._ -import io.circe.syntax._ - import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbPut class CreatePartitioningIfNotExists(implicit schema: DBSchema, dbEngine: DoobieEngine[Task]) - extends DoobieSingleResultFunctionWithStatus[PartitioningSubmitDTO, Unit, Task](values => + extends DoobieSingleResultFunctionWithStatus[PartitioningSubmitDTO, Unit, Task](partitioningSubmit => Seq( - fr"${PartitioningForDB.fromSeqPartitionDTO(values.partitioning).asJson}", - fr"${values.authorIfNew}", - fr"${values.parentPartitioning.map(PartitioningForDB.fromSeqPartitionDTO).map(_.asJson)}" - ) - ) + fr"${PartitioningForDB.fromSeqPartitionDTO(partitioningSubmit.partitioning).asJson}", + fr"${partitioningSubmit.authorIfNew}", + fr"${partitioningSubmit.parentPartitioning.map(PartitioningForDB.fromSeqPartitionDTO).map(_.asJson)}" + )) with StandardStatusHandling object CreatePartitioningIfNotExists { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala index b0f7e507e..d8c3d729f 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala @@ -62,13 +62,19 @@ trait Endpoints extends BaseEndpoints { .out(jsonBody[AtumContextDTO]) } - protected val createPartitioningEndpointV2 - : PublicEndpoint[PartitioningSubmitDTO, ErrorResponse, SingleSuccessResponse[AtumContextDTO], Any] = { + protected val postPartitioningEndpointV2: PublicEndpoint[ + PartitioningSubmitV2DTO, + ErrorResponse, + (SingleSuccessResponse[PartitioningWithIdDTO], String), + Any + ] = { apiV2.post - .in(CreatePartitioning) - .in(jsonBody[PartitioningSubmitDTO]) - .out(statusCode(StatusCode.Ok)) - .out(jsonBody[SingleSuccessResponse[AtumContextDTO]]) + .in(V2Paths.Partitionings) + .in(jsonBody[PartitioningSubmitV2DTO]) + .out(statusCode(StatusCode.Created)) + .out(jsonBody[SingleSuccessResponse[PartitioningWithIdDTO]]) + .out(header[String]("Location")) + .errorOutVariantPrepend(conflictErrorOneOfVariant) } protected val getPartitioningAdditionalDataEndpointV2 diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala index 01e1ed6ec..ea8d7d87b 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala @@ -19,7 +19,6 @@ package za.co.absa.atum.server.api.http import cats.syntax.semigroupk._ import org.http4s.HttpRoutes import sttp.tapir.PublicEndpoint -import sttp.tapir.model.ServerRequest import sttp.tapir.server.http4s.Http4sServerInterpreter import sttp.tapir.server.http4s.ztapir.ZHttp4sServerInterpreter import sttp.tapir.server.interceptor.metrics.MetricsRequestInterceptor @@ -28,7 +27,6 @@ import sttp.tapir.ztapir._ import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO, CheckpointV2DTO} import za.co.absa.atum.server.Constants.{SwaggerApiName, SwaggerApiVersion} import za.co.absa.atum.server.api.controller.{CheckpointController, FlowController, PartitioningController} -import za.co.absa.atum.server.api.http.ApiPaths.V2Paths import za.co.absa.atum.server.config.{HttpMonitoringConfig, JvmMonitoringConfig} import za.co.absa.atum.server.model.ErrorResponse import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse @@ -57,7 +55,7 @@ trait Routes extends Endpoints with ServerOptions { } ), createServerEndpoint(createPartitioningEndpointV1, PartitioningController.createPartitioningIfNotExistsV1), - createServerEndpoint(createPartitioningEndpointV2, PartitioningController.createPartitioningIfNotExistsV2), + createServerEndpoint(postPartitioningEndpointV2, PartitioningController.postPartitioning), createServerEndpoint( getPartitioningAdditionalDataEndpointV2, PartitioningController.getPartitioningAdditionalDataV2 @@ -99,7 +97,7 @@ trait Routes extends Endpoints with ServerOptions { createCheckpointEndpointV1, postCheckpointEndpointV2, createPartitioningEndpointV1, - createPartitioningEndpointV2, + postPartitioningEndpointV2, patchPartitioningAdditionalDataEndpointV2, getPartitioningCheckpointsEndpointV2, getPartitioningCheckpointEndpointV2, diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala index 14dbfc387..b35f908da 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala @@ -26,6 +26,10 @@ import zio.macros.accessible trait PartitioningRepository { def createPartitioningIfNotExists(partitioningSubmitDTO: PartitioningSubmitDTO): IO[DatabaseError, Unit] + def createPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[DatabaseError, PartitioningWithIdDTO] + def getPartitioningMeasures(partitioning: PartitioningDTO): IO[DatabaseError, Seq[MeasureDTO]] def getPartitioningAdditionalData(partitioning: PartitioningDTO): IO[DatabaseError, InitialAdditionalDataDTO] diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala index 3a275a363..83fa2d92f 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala @@ -20,13 +20,14 @@ import za.co.absa.atum.model.dto._ import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs import za.co.absa.atum.server.api.database.runs.functions._ import za.co.absa.atum.server.api.exception.DatabaseError -import za.co.absa.atum.server.model.{AdditionalDataFromDB, AdditionalDataItemFromDB, CheckpointFromDB, MeasureFromDB, PartitioningFromDB} +import za.co.absa.atum.server.model._ import zio._ import zio.interop.catz.asyncInstance import za.co.absa.atum.server.api.exception.DatabaseError.GeneralDatabaseError class PartitioningRepositoryImpl( createPartitioningIfNotExistsFn: CreatePartitioningIfNotExists, + createPartitioningFn: CreatePartitioning, getPartitioningMeasuresFn: GetPartitioningMeasures, getPartitioningAdditionalDataFn: GetPartitioningAdditionalData, createOrUpdateAdditionalDataFn: CreateOrUpdateAdditionalData, @@ -44,6 +45,17 @@ class PartitioningRepositoryImpl( ) } + override def createPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[DatabaseError, PartitioningWithIdDTO] = { + for { + result <- dbSingleResultCallWithStatus( + createPartitioningFn(partitioningSubmitDTO), + "createPartitioning" + ) + } yield PartitioningWithIdDTO(result, partitioningSubmitDTO.partitioning, partitioningSubmitDTO.author) + } + override def createOrUpdateAdditionalData( partitioningId: Long, additionalData: AdditionalDataPatchDTO @@ -99,7 +111,6 @@ class PartitioningRepositoryImpl( } } - override def getPartitioningMeasuresById(partitioningId: Long): IO[DatabaseError, Seq[MeasureDTO]] = { dbMultipleResultCallWithAggregatedStatus(getPartitioningMeasuresByIdFn(partitioningId), "getPartitioningMeasures") .map(_.map { case MeasureFromDB(measureName, measuredColumns) => @@ -112,6 +123,7 @@ class PartitioningRepositoryImpl( object PartitioningRepositoryImpl { val layer: URLayer[ CreatePartitioningIfNotExists + with CreatePartitioning with GetPartitioningMeasures with GetPartitioningAdditionalData with CreateOrUpdateAdditionalData @@ -123,6 +135,7 @@ object PartitioningRepositoryImpl { ] = ZLayer { for { createPartitioningIfNotExists <- ZIO.service[CreatePartitioningIfNotExists] + createPartitioning <- ZIO.service[CreatePartitioning] getPartitioningMeasures <- ZIO.service[GetPartitioningMeasures] getPartitioningAdditionalData <- ZIO.service[GetPartitioningAdditionalData] createOrUpdateAdditionalData <- ZIO.service[CreateOrUpdateAdditionalData] @@ -132,6 +145,7 @@ object PartitioningRepositoryImpl { getPartitioningMeasuresV2 <- ZIO.service[GetPartitioningMeasuresById] } yield new PartitioningRepositoryImpl( createPartitioningIfNotExists, + createPartitioning, getPartitioningMeasures, getPartitioningAdditionalData, createOrUpdateAdditionalData, diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala index 212bdeb89..b47bd9652 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala @@ -25,6 +25,10 @@ import zio.macros.accessible trait PartitioningService { def createPartitioningIfNotExists(partitioningSubmitDTO: PartitioningSubmitDTO): IO[ServiceError, Unit] + def createPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[ServiceError, PartitioningWithIdDTO] + def getPartitioningMeasures(partitioning: PartitioningDTO): IO[ServiceError, Seq[MeasureDTO]] def getPartitioningAdditionalData(partitioning: PartitioningDTO): IO[ServiceError, InitialAdditionalDataDTO] diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala index fc3d55e81..6fcd7c966 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala @@ -34,6 +34,15 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) ) } + override def createPartitioning( + partitioningSubmitDTO: PartitioningSubmitV2DTO + ): IO[ServiceError, PartitioningWithIdDTO] = { + repositoryCall( + partitioningRepository.createPartitioning(partitioningSubmitDTO), + "createPartitioning" + ) + } + override def getPartitioningMeasures(partitioning: PartitioningDTO): IO[ServiceError, Seq[MeasureDTO]] = { repositoryCall( partitioningRepository.getPartitioningMeasures(partitioning), diff --git a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala index d6c3995ad..61d0df6b0 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala @@ -89,6 +89,18 @@ trait TestData { protected val partitioningSubmitDTO3: PartitioningSubmitDTO = partitioningSubmitDTO1.copy(authorIfNew = "yetAnotherAuthor") + protected val partitioningSubmitV2DTO1: PartitioningSubmitV2DTO = PartitioningSubmitV2DTO( + partitioning = partitioningDTO1, + parentPartitioningId = None, + author = "" + ) + + protected val partitioningSubmitV2DTO2: PartitioningSubmitV2DTO = + partitioningSubmitV2DTO1.copy(author = "differentAuthor") + + protected val partitioningSubmitV2DTO3: PartitioningSubmitV2DTO = + partitioningSubmitV2DTO1.copy(author = "yetAnotherAuthor") + // Measure protected val measureDTO1: MeasureDTO = MeasureDTO("count1", Seq("col_A1", "col_B1")) protected val measureDTO2: MeasureDTO = MeasureDTO("count2", Seq("col_A2", "col_B2")) diff --git a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala index 3baa18639..2473bf490 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala @@ -19,9 +19,9 @@ package za.co.absa.atum.server.api.controller import org.mockito.Mockito.{mock, when} import za.co.absa.atum.model.dto.CheckpointDTO import za.co.absa.atum.server.api.TestData -import za.co.absa.atum.server.api.exception.ServiceError.{GeneralServiceError, NotFoundServiceError} +import za.co.absa.atum.server.api.exception.ServiceError.{ConflictServiceError, GeneralServiceError, NotFoundServiceError} import za.co.absa.atum.server.api.service.PartitioningService -import za.co.absa.atum.server.model.{InternalServerErrorResponse, NotFoundErrorResponse} +import za.co.absa.atum.server.model.{ConflictErrorResponse, InternalServerErrorResponse, NotFoundErrorResponse} import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse import zio._ import zio.test.Assertion.failsWithA @@ -35,6 +35,13 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { when(partitioningServiceMock.createPartitioningIfNotExists(partitioningSubmitDTO2)) .thenReturn(ZIO.fail(GeneralServiceError("boom!"))) + when(partitioningServiceMock.createPartitioning(partitioningSubmitV2DTO1)) + .thenReturn(ZIO.succeed(partitioningWithIdDTO1)) + when(partitioningServiceMock.createPartitioning(partitioningSubmitV2DTO2)) + .thenReturn(ZIO.fail(GeneralServiceError("boom!"))) + when(partitioningServiceMock.createPartitioning(partitioningSubmitV2DTO3)) + .thenReturn(ZIO.fail(ConflictServiceError("Partitioning already present"))) + when(partitioningServiceMock.getPartitioningMeasures(partitioningDTO1)) .thenReturn(ZIO.succeed(Seq(measureDTO1, measureDTO2))) @@ -85,6 +92,27 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { ) } ), + suite("CreatePartitioningSuite")( + test("Returns expected PartitioningWithIdDTO") { + for { + result <- PartitioningController.postPartitioning(partitioningSubmitV2DTO1) + expectedData = SingleSuccessResponse(partitioningWithIdDTO1, uuid1) + actualData = result._1.copy(requestId = uuid1) + expectedUri = s"/api/v2/partitionings/${partitioningWithIdDTO1.id}" + actualUri = result._2 + } yield assertTrue(actualData == expectedData && actualUri == expectedUri) + }, + test("Returns expected InternalServerErrorResponse") { + assertZIO(PartitioningController.postPartitioning(partitioningSubmitV2DTO2).exit)( + failsWithA[InternalServerErrorResponse] + ) + }, + test("Returns expected ConflictServiceError") { + assertZIO(PartitioningController.postPartitioning(partitioningSubmitV2DTO3).exit)( + failsWithA[ConflictErrorResponse] + ) + } + ), suite("PatchAdditionalDataSuite")( test("Returns expected AdditionalDataSubmitDTO") { for { diff --git a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIntegrationTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIntegrationTests.scala new file mode 100644 index 000000000..d1ff73067 --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreatePartitioningIntegrationTests.scala @@ -0,0 +1,45 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.database.runs.functions + +import za.co.absa.atum.server.ConfigProviderTest +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.api.{TestData, TestTransactorProvider} +import zio._ +import zio.interop.catz.asyncInstance +import zio.test._ + +object CreatePartitioningIntegrationTests extends ConfigProviderTest with TestData { + + override def spec: Spec[Unit with TestEnvironment with Scope, Any] = { + + suite("CreatePartitioningSuite")( + test("Returns expected Right") { + for { + createPartitioning <- ZIO.service[CreatePartitioning] + result <- createPartitioning(partitioningSubmitV2DTO1) + } yield assertTrue(result.isRight) + } + ).provide( + CreatePartitioning.layer, + PostgresDatabaseProvider.layer, + TestTransactorProvider.layerWithRollback + ) + + } + +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/CreatePartitioningEndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/PostPartitioningEndpointUnitTests.scala similarity index 59% rename from server/src/test/scala/za/co/absa/atum/server/api/http/CreatePartitioningEndpointUnitTests.scala rename to server/src/test/scala/za/co/absa/atum/server/api/http/PostPartitioningEndpointUnitTests.scala index d56e0206d..660642cb6 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/http/CreatePartitioningEndpointUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/PostPartitioningEndpointUnitTests.scala @@ -17,36 +17,42 @@ package za.co.absa.atum.server.api.http import org.mockito.Mockito.{mock, when} +import sttp.client3.circe._ import sttp.client3.testing.SttpBackendStub import sttp.client3.{UriContext, basicRequest} -import sttp.client3.circe._ import sttp.model.StatusCode import sttp.tapir.server.stub.TapirStubInterpreter import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} -import za.co.absa.atum.model.dto.AtumContextDTO +import za.co.absa.atum.model.dto.{PartitioningSubmitDTO, PartitioningSubmitV2DTO, PartitioningWithIdDTO} import za.co.absa.atum.server.api.TestData import za.co.absa.atum.server.api.controller.PartitioningController -import za.co.absa.atum.server.model.{GeneralErrorResponse, InternalServerErrorResponse} import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse +import za.co.absa.atum.server.model.{ConflictErrorResponse, InternalServerErrorResponse} import zio._ import zio.test.Assertion.equalTo import zio.test._ -object CreatePartitioningEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { +object PostPartitioningEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { + + private val partitioningControllerMock = mock(classOf[PartitioningController]) - private val createPartitioningEndpointMock = mock(classOf[PartitioningController]) + when(partitioningControllerMock.postPartitioning(partitioningSubmitV2DTO1)) + .thenReturn(ZIO.succeed((SingleSuccessResponse(partitioningWithIdDTO1, uuid1), "location"))) - when(createPartitioningEndpointMock.createPartitioningIfNotExistsV2(partitioningSubmitDTO1)) - .thenReturn(ZIO.succeed(SingleSuccessResponse(createAtumContextDTO(partitioningSubmitDTO1), uuid1))) - when(createPartitioningEndpointMock.createPartitioningIfNotExistsV2(partitioningSubmitDTO2)) - .thenReturn(ZIO.fail(GeneralErrorResponse("error"))) - when(createPartitioningEndpointMock.createPartitioningIfNotExistsV2(partitioningSubmitDTO3)) + when(partitioningControllerMock.postPartitioning(partitioningSubmitV2DTO2)) + .thenReturn(ZIO.fail(ConflictErrorResponse("Partitioning already exists"))) + + when(partitioningControllerMock.postPartitioning(partitioningSubmitV2DTO3)) .thenReturn(ZIO.fail(InternalServerErrorResponse("error"))) - private val createPartitioningEndpointMockLayer = ZLayer.succeed(createPartitioningEndpointMock) + private val createPartitioningEndpointMockLayer = ZLayer.succeed(partitioningControllerMock) private val createPartitioningServerEndpoint = - createPartitioningEndpointV2.zServerLogic(PartitioningController.createPartitioningIfNotExistsV2) + postPartitioningEndpointV2.zServerLogic({ + partitioningSubmitDTO: PartitioningSubmitV2DTO => + PartitioningController.postPartitioning(partitioningSubmitDTO) + } + ) def spec: Spec[TestEnvironment with Scope, Any] = { val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[PartitioningController])) @@ -55,34 +61,35 @@ object CreatePartitioningEndpointUnitTests extends ZIOSpecDefault with Endpoints .backend() val request = basicRequest - .post(uri"https://test.com/api/v2/create-partitioning") - .response(asJson[SingleSuccessResponse[AtumContextDTO]]) + .post(uri"https://test.com/api/v2/partitionings") + .response(asJson[SingleSuccessResponse[PartitioningWithIdDTO]]) suite("CreatePartitioningEndpointSuite")( - test("Returns expected AtumContextDTO") { + test("Returns expected PartitioningWithIdDTO and location header") { val response = request - .body(partitioningSubmitDTO1) + .body(partitioningSubmitV2DTO1) .send(backendStub) val body = response.map(_.body) val statusCode = response.map(_.code) + val header = response.map(_.header("Location")) - assertZIO(body <&> statusCode)( - equalTo(Right(SingleSuccessResponse(createAtumContextDTO(partitioningSubmitDTO1), uuid1)), StatusCode.Ok) + assertZIO(body <&> statusCode <&> header)( + equalTo(Right(SingleSuccessResponse(partitioningWithIdDTO1, uuid1)), StatusCode.Created, Some("location")) ) }, test("Returns expected BadRequest") { val response = request - .body(partitioningSubmitDTO2) + .body(partitioningSubmitV2DTO2) .send(backendStub) val statusCode = response.map(_.code) - assertZIO(statusCode)(equalTo(StatusCode.BadRequest)) + assertZIO(statusCode)(equalTo(StatusCode.Conflict)) }, test("Returns expected InternalServerError") { val response = request - .body(partitioningSubmitDTO3) + .body(partitioningSubmitV2DTO3) .send(backendStub) val statusCode = response.map(_.code) diff --git a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala index 4e2bec8e1..8f3f3dc49 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala @@ -17,13 +17,13 @@ package za.co.absa.atum.server.api.repository import org.mockito.Mockito.{mock, when} -import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataItemDTO} +import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataItemDTO, PartitioningWithIdDTO} import za.co.absa.atum.server.api.TestData import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs import za.co.absa.atum.server.api.database.runs.functions._ import za.co.absa.atum.server.api.exception.DatabaseError import za.co.absa.atum.server.api.exception.DatabaseError._ -import za.co.absa.db.fadb.exceptions.{DataNotFoundException, ErrorInDataException} +import za.co.absa.db.fadb.exceptions.{DataConflictException, DataNotFoundException, ErrorInDataException} import za.co.absa.db.fadb.status.{FunctionStatus, Row} import zio._ import zio.interop.catz.asyncInstance @@ -45,11 +45,32 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { private val createPartitioningIfNotExistsMockLayer = ZLayer.succeed(createPartitioningIfNotExistsMock) + // Create Partitioning If Not Exists V2 Mocks + private val createPartitioningMock = mock(classOf[CreatePartitioning]) + + when(createPartitioningMock.apply(partitioningSubmitV2DTO1)) + .thenReturn(ZIO.right(Row(FunctionStatus(11, "success"), partitioningWithIdDTO1.id))) + when(createPartitioningMock.apply(partitioningSubmitV2DTO2)) + .thenReturn(ZIO.left(DataConflictException(FunctionStatus(31, "Partitioning already present")))) + when(createPartitioningMock.apply(partitioningSubmitV2DTO3)) + .thenReturn(ZIO.fail(new Exception("boom!"))) + + private val createPartitioningMockLayer = ZLayer.succeed(createPartitioningMock) + // Create Additional Data Mocks private val createOrUpdateAdditionalDataMock = mock(classOf[CreateOrUpdateAdditionalData]) when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(1L, additionalDataPatchDTO1))) - .thenReturn(ZIO.right(Seq(Row(FunctionStatus(11, "Additional data have been updated, added or both"), Option.empty[AdditionalDataItemFromDB])))) + .thenReturn( + ZIO.right( + Seq( + Row( + FunctionStatus(11, "Additional data have been updated, added or both"), + Option.empty[AdditionalDataItemFromDB] + ) + ) + ) + ) when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(0L, additionalDataPatchDTO1))) .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(2L, additionalDataPatchDTO1))) @@ -117,7 +138,8 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { private val getPartitioningMeasuresV2Mock = mock(classOf[GetPartitioningMeasuresById]) when(getPartitioningMeasuresV2Mock.apply(1L)).thenReturn( - ZIO.right(Seq(Row(FunctionStatus(0, "success"), measureFromDB1), Row(FunctionStatus(0, "success"), measureFromDB2)))) + ZIO.right(Seq(Row(FunctionStatus(0, "success"), measureFromDB1), Row(FunctionStatus(0, "success"), measureFromDB2))) + ) when(getPartitioningMeasuresV2Mock.apply(2L)) .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) when(getPartitioningMeasuresV2Mock.apply(3L)) @@ -126,7 +148,6 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { private val getPartitioningMeasuresV2MockLayer = ZLayer.succeed(getPartitioningMeasuresV2Mock) - override def spec: Spec[TestEnvironment with Scope, Any] = { suite("PartitioningRepositorySuite")( @@ -153,6 +174,29 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { ) } ), + suite("CreatePartitioningSuite")( + test("Returns expected Right with PartitioningWithIdDTO") { + for { + result <- PartitioningRepository.createPartitioning(partitioningSubmitV2DTO1) + } yield assertTrue(result.isInstanceOf[PartitioningWithIdDTO]) + }, + test("Returns expected Left with DataConflictException") { + for { + result <- PartitioningRepository.createPartitioning(partitioningSubmitV2DTO2).exit + } yield assertTrue( + result == Exit.fail( + ConflictDatabaseError( + "Exception caused by operation: 'createPartitioning': (31) Partitioning already present" + ) + ) + ) + }, + test("Returns expected GeneralDatabaseError") { + assertZIO(PartitioningRepository.createPartitioning(partitioningSubmitV2DTO3).exit)( + failsWithA[GeneralDatabaseError] + ) + } + ), suite("CreateOrUpdateAdditionalDataSuite")( test("Returns expected Right with Unit") { for { @@ -245,9 +289,11 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { test("Returns expected DataNotFoundException") { for { result <- PartitioningRepository.getPartitioning(9999L).exit - } yield assertTrue(result == Exit.fail(NotFoundDatabaseError( - "Exception caused by operation: 'getPartitioningById': (41) Partitioning not found")) + } yield assertTrue( + result == Exit.fail( + NotFoundDatabaseError("Exception caused by operation: 'getPartitioningById': (41) Partitioning not found") ) + ) }, test("Returns expected GeneralDatabaseError") { assertZIO(PartitioningRepository.getPartitioning(8888L).exit)( @@ -280,6 +326,7 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { ).provide( PartitioningRepositoryImpl.layer, createPartitioningIfNotExistsMockLayer, + createPartitioningMockLayer, getPartitioningMeasuresMockLayer, getPartitioningAdditionalDataMockLayer, createOrUpdateAdditionalDataMockLayer, diff --git a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala index b7db40174..ffbe160f7 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala @@ -17,6 +17,7 @@ package za.co.absa.atum.server.api.service import org.mockito.Mockito.{mock, when} +import za.co.absa.atum.model.dto.PartitioningWithIdDTO import za.co.absa.atum.server.api.TestData import za.co.absa.atum.server.api.exception.DatabaseError._ import za.co.absa.atum.server.api.exception.ServiceError @@ -36,6 +37,13 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { when(partitioningRepositoryMock.createPartitioningIfNotExists(partitioningSubmitDTO3)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(partitioningRepositoryMock.createPartitioning(partitioningSubmitV2DTO1)) + .thenReturn(ZIO.succeed(PartitioningWithIdDTO(1L, Seq.empty, "author"))) + when(partitioningRepositoryMock.createPartitioning(partitioningSubmitV2DTO2)) + .thenReturn(ZIO.fail(ConflictDatabaseError("Partitioning already exists"))) + when(partitioningRepositoryMock.createPartitioning(partitioningSubmitV2DTO3)) + .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + when(partitioningRepositoryMock.createOrUpdateAdditionalData(1L, additionalDataPatchDTO1)) .thenReturn(ZIO.succeed(additionalDataDTO1)) when(partitioningRepositoryMock.createOrUpdateAdditionalData(0L, additionalDataPatchDTO1)) @@ -100,6 +108,27 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { ) } ), + suite("CreatePartitioningSuite")( + test("Returns expected Right with PartitioningWithIdDTO") { + for { + result <- PartitioningService.createPartitioning(partitioningSubmitV2DTO1) + } yield assertTrue(result == PartitioningWithIdDTO(1L, Seq.empty, "author")) + }, + test("Returns expected ConflictServiceError") { + for { + result <- PartitioningService.createPartitioning(partitioningSubmitV2DTO2).exit + } yield assertTrue( + result == Exit.fail( + ConflictServiceError("Failed to perform 'createPartitioning': Partitioning already exists") + ) + ) + }, + test("Returns expected GeneralServiceError") { + assertZIO(PartitioningService.createPartitioning(partitioningSubmitV2DTO3).exit)( + failsWithA[GeneralServiceError] + ) + } + ), suite("PatchAdditionalDataSuite")( test("Returns expected Right with Unit") { for {