diff --git a/database/src/main/postgres/runs/V1.5.16__create_or_update_additional_data.sql b/database/src/main/postgres/runs/V1.5.16__create_or_update_additional_data.sql index 8f48ae1d5..97e018e71 100644 --- a/database/src/main/postgres/runs/V1.5.16__create_or_update_additional_data.sql +++ b/database/src/main/postgres/runs/V1.5.16__create_or_update_additional_data.sql @@ -14,13 +14,15 @@ */ CREATE OR REPLACE FUNCTION runs.create_or_update_additional_data( - IN i_partitioning JSONB, - IN i_additional_data HSTORE, - IN i_by_user TEXT, - OUT status INTEGER, - OUT status_text TEXT, - OUT id_additional_data BIGINT -) RETURNS record AS + IN i_partitioning_id BIGINT, + IN i_additional_data HSTORE, + IN i_by_user TEXT, + OUT status INTEGER, + OUT status_text TEXT, + OUT o_ad_name TEXT, + OUT o_ad_value TEXT, + OUT o_ad_author TEXT +) RETURNS SETOF record AS $$ ------------------------------------------------------------------------------- -- @@ -28,66 +30,65 @@ $$ -- Adds the additional data for the input partitioning. If additional data of a given name already -- exists for such partitioning, the value is updated and the old value is moved to the -- additional data history table. +-- The function returns all actual additional data of the partitioning. -- -- Parameters: --- i_partitioning - partitioning to add the additional data for +-- i_partitioning_id - id of partitioning to add the additional data for -- i_additional_data - sets of key/value pairs representing name and values of the additional data -- i_by_user - user behind the change (an author of AD records if there will be something to upsert) -- -- Returns: -- status - Status code -- status_text - Status text --- id_additional_data - id of the data added +-- ad_name - Name of the additional data +-- ad_value - Value of the additional data +-- ad_author - Author of the additional data -- -- Status codes: --- 11 - Additional data have been added --- 12 - Additional data have been upserted +-- 11 - Additional data have been updated, added or both -- 14 - No changes in additional data (this is when they already existed) -- 41 - Partitioning not found -- ------------------------------------------------------------------------------- DECLARE - _fk_partitioning BIGINT; - _records_updated BOOLEAN; + _records_updated BOOLEAN; BEGIN - _fk_partitioning := runs._get_id_partitioning(i_partitioning, true); - - IF _fk_partitioning IS NULL THEN + PERFORM 1 FROM runs.partitionings WHERE id_partitioning = i_partitioning_id; + IF NOT FOUND THEN status := 41; status_text := 'Partitioning not found'; + RETURN NEXT; RETURN; END IF; -- 1. (backup) get records that already exist but values differ, -- then insert them into AD history table and -- then update the actual AD table with new values - _records_updated := runs._update_existing_additional_data(_fk_partitioning, i_additional_data, i_by_user); + _records_updated := runs._update_existing_additional_data(i_partitioning_id, i_additional_data, i_by_user); -- 2. (insert) get records that do not not exist yet and insert it into ad table -- (their original rows were previously saved in step 1) INSERT INTO runs.additional_data (fk_partitioning, ad_name, ad_value, created_by) - SELECT _fk_partitioning, ad_input.key, ad_input.value, i_by_user + SELECT i_partitioning_id, ad_input.key, ad_input.value, i_by_user FROM each(i_additional_data) AS ad_input ON CONFLICT (fk_partitioning, ad_name) DO NOTHING; - IF _records_updated THEN - status := 12; - status_text := 'Additional data have been upserted'; + -- 3. return the updated additional data (all, not only updated/added records) + IF not _records_updated AND not found THEN + RETURN QUERY + SELECT 14, 'No changes in additional data', GPAD.ad_name, GPAD.ad_value, GPAD.ad_author + FROM runs.get_partitioning_additional_data(i_partitioning_id) AS GPAD; + RETURN; ELSE - IF found THEN - status := 11; - status_text := 'Additional data have been added'; - ELSE - status := 14; - status_text := 'No changes in additional data'; - END IF; + RETURN QUERY + SELECT 11, 'Additional data have been updated, added or both', GPAD.ad_name, GPAD.ad_value, GPAD.ad_author + FROM runs.get_partitioning_additional_data(i_partitioning_id) AS GPAD; + RETURN; END IF; - - RETURN; END; $$ LANGUAGE plpgsql VOLATILE SECURITY DEFINER; -ALTER FUNCTION runs.create_or_update_additional_data(JSONB, HSTORE, TEXT) OWNER TO atum_owner; -GRANT EXECUTE ON FUNCTION runs.create_or_update_additional_data(JSONB, HSTORE, TEXT) TO atum_user; +ALTER FUNCTION runs.create_or_update_additional_data(BIGINT, HSTORE, TEXT) OWNER TO atum_owner; +GRANT EXECUTE ON FUNCTION runs.create_or_update_additional_data(BIGINT, HSTORE, TEXT) TO atum_user; diff --git a/database/src/test/scala/za/co/absa/atum/database/runs/CreateOrUpdateAdditionalDataIntegrationTests.scala b/database/src/test/scala/za/co/absa/atum/database/runs/CreateOrUpdateAdditionalDataIntegrationTests.scala index 679450fb2..32d8d679c 100644 --- a/database/src/test/scala/za/co/absa/atum/database/runs/CreateOrUpdateAdditionalDataIntegrationTests.scala +++ b/database/src/test/scala/za/co/absa/atum/database/runs/CreateOrUpdateAdditionalDataIntegrationTests.scala @@ -20,7 +20,7 @@ import za.co.absa.balta.DBTestSuite import za.co.absa.balta.classes.JsonBString import za.co.absa.balta.classes.setter.CustomDBType -class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ +class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite { private val fncCreateOrUpdateAdditionalData = "runs.create_or_update_additional_data" @@ -70,15 +70,35 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ ) function(fncCreateOrUpdateAdditionalData) - .setParam("i_partitioning", partitioning) + .setParam("i_partitioning_id", fkPartitioning) .setParam("i_additional_data", inputADToUpsert) .setParam("i_by_user", "MikeRusty") .execute { queryResult => assert(queryResult.hasNext) val row = queryResult.next() - assert(row.getInt("status").contains(12)) - assert(row.getString("status_text").contains("Additional data have been upserted")) + assert(row.getInt("status").contains(11)) + assert(row.getString("status_text").contains("Additional data have been updated, added or both")) + assert(row.getString("o_ad_name").contains("PrimaryOwner")) + assert(row.getString("o_ad_value").contains("TechnicalManagerA")) + assert(row.getString("o_ad_author").contains("SuperTool")) + + assert(queryResult.hasNext) + val row2 = queryResult.next() + + assert(row2.getInt("status").contains(11)) + assert(row2.getString("status_text").contains("Additional data have been updated, added or both")) + assert(row2.getString("o_ad_name").contains("SecondaryOwner")) + assert(row2.getString("o_ad_value").contains("AnalystNew")) + assert(row2.getString("o_ad_author").contains("MikeRusty")) + + assert(queryResult.hasNext) + val row3 = queryResult.next() + assert(row3.getInt("status").contains(11)) + assert(row3.getString("status_text").contains("Additional data have been updated, added or both")) + assert(row3.getString("o_ad_name").contains("IsDatasetInDatalake")) + assert(row3.getString("o_ad_value").contains("true")) + assert(row3.getString("o_ad_author").contains("MikeRusty")) assert(!queryResult.hasNext) } @@ -134,7 +154,7 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ ) function(fncCreateOrUpdateAdditionalData) - .setParam("i_partitioning", partitioning) + .setParam("i_partitioning_id", fkPartitioning) .setParam("i_additional_data", inputADToUpsert) .setParam("i_by_user", "MikeRusty") .execute { queryResult => @@ -142,9 +162,7 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ val row = queryResult.next() assert(row.getInt("status").contains(11)) - assert(row.getString("status_text").contains("Additional data have been added")) - - assert(!queryResult.hasNext) + assert(row.getString("status_text").contains("Additional data have been updated, added or both")) } assert(table("runs.additional_data").count() == 5) @@ -199,7 +217,7 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ ) function(fncCreateOrUpdateAdditionalData) - .setParam("i_partitioning", partitioning) + .setParam("i_partitioning_id", fkPartitioning) .setParam("i_additional_data", inputADToUpsert) .setParam("i_by_user", "MikeRusty") .execute { queryResult => @@ -208,8 +226,6 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ assert(row.getInt("status").contains(14)) assert(row.getString("status_text").contains("No changes in additional data")) - - assert(!queryResult.hasNext) } assert(table("runs.additional_data").count(add("fk_partitioning", fkPartitioning)) == 2) @@ -228,7 +244,7 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite{ ) function(fncCreateOrUpdateAdditionalData) - .setParam("i_partitioning", partitioning) + .setParam("i_partitioning_id", 0L) .setParam("i_additional_data", inputADToInsert) .setParam("i_by_user", "MikeRusty") .execute { queryResult => diff --git a/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchDTO.scala b/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchDTO.scala index 0a113ff48..f6b424cda 100644 --- a/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchDTO.scala +++ b/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchDTO.scala @@ -20,7 +20,8 @@ import io.circe.generic.semiauto.{deriveDecoder, deriveEncoder} import io.circe.{Decoder, Encoder} case class AdditionalDataPatchDTO( - data: Map[String, AdditionalDataItemDTO] + byUser: String, + data: Map[String, String] ) object AdditionalDataPatchDTO { diff --git a/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchItemDTO.scala b/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchItemDTO.scala new file mode 100644 index 000000000..b8743945b --- /dev/null +++ b/model/src/main/scala/za/co/absa/atum/model/dto/AdditionalDataPatchItemDTO.scala @@ -0,0 +1,30 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.model.dto + +import io.circe.generic.semiauto.{deriveDecoder, deriveEncoder} +import io.circe.{Decoder, Encoder} + +case class AdditionalDataPatchItemDTO( + value: String, + author: String +) + +object AdditionalDataPatchItemDTO { + implicit val encoderAdditionalDataPatchItem: Encoder[AdditionalDataPatchItemDTO] = deriveEncoder + implicit val decoderAdditionalDataPatchItem: Decoder[AdditionalDataPatchItemDTO] = deriveDecoder +} diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala index 5b8f3bd2b..5e88951ae 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/BaseController.scala @@ -19,8 +19,7 @@ package za.co.absa.atum.server.api.controller import za.co.absa.atum.server.api.exception.ServiceError import za.co.absa.atum.server.api.exception.ServiceError._ import za.co.absa.atum.server.api.http.ApiPaths -import za.co.absa.atum.server.model.{ConflictErrorResponse, ErrorResponse, InternalServerErrorResponse, NotFoundErrorResponse} -import za.co.absa.atum.server.model.SuccessResponse.{MultiSuccessResponse, SingleSuccessResponse} +import za.co.absa.atum.server.model.SuccessResponse._ import za.co.absa.atum.server.model._ import zio._ diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala index 7060d9dbc..2d27efa3b 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala @@ -36,9 +36,10 @@ trait PartitioningController { partitioningId: Long ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataDTO]] - def createOrUpdateAdditionalDataV2( - additionalData: AdditionalDataSubmitDTO - ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataSubmitDTO]] + def patchPartitioningAdditionalDataV2( + partitioningId: Long, + additionalDataPatchDTO: AdditionalDataPatchDTO + ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataDTO]] def getPartitioningCheckpointsV2( checkpointQueryDTO: CheckpointQueryDTO diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala index d3defc721..7b08e3bef 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala @@ -55,17 +55,6 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) mapToSingleSuccessResponse(createPartitioningIfNotExistsV1(partitioningSubmitDTO)) } - override def createOrUpdateAdditionalDataV2( - additionalData: AdditionalDataSubmitDTO - ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataSubmitDTO]] = { - mapToSingleSuccessResponse( - serviceCall[Unit, AdditionalDataSubmitDTO]( - partitioningService.createOrUpdateAdditionalData(additionalData), - _ => additionalData - ) - ) - } - override def getPartitioningCheckpointsV2( checkpointQueryDTO: CheckpointQueryDTO ): IO[ErrorResponse, MultiSuccessResponse[CheckpointDTO]] = { @@ -94,6 +83,17 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) ) ) } + + override def patchPartitioningAdditionalDataV2( + partitioningId: Long, + additionalDataPatchDTO: AdditionalDataPatchDTO + ): IO[ErrorResponse, SingleSuccessResponse[AdditionalDataDTO]] = { + mapToSingleSuccessResponse( + serviceCall[AdditionalDataDTO, AdditionalDataDTO]( + partitioningService.patchAdditionalData(partitioningId, additionalDataPatchDTO) + ) + ) + } } object PartitioningControllerImpl { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalData.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalData.scala index ca8281a55..bafddf3c5 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalData.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalData.scala @@ -17,31 +17,42 @@ package za.co.absa.atum.server.api.database.runs.functions import doobie.implicits.toSqlInterpolator -import za.co.absa.atum.model.dto.AdditionalDataSubmitDTO import za.co.absa.atum.server.api.database.PostgresDatabaseProvider import za.co.absa.atum.server.api.database.runs.Runs -import za.co.absa.atum.server.model.PartitioningForDB +import za.co.absa.atum.server.model.AdditionalDataItemFromDB import za.co.absa.db.fadb.DBSchema -import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieSingleResultFunctionWithStatus +import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieMultipleResultFunctionWithAggStatus import za.co.absa.db.fadb.doobie.DoobieEngine import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling import zio._ import io.circe.syntax._ - import doobie.postgres.implicits._ -import za.co.absa.db.fadb.doobie.postgres.circe.implicits.jsonbPut +import za.co.absa.atum.model.dto.AdditionalDataPatchDTO +import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs +import za.co.absa.db.fadb.status.aggregation.implementations.ByFirstRowStatusAggregator class CreateOrUpdateAdditionalData(implicit schema: DBSchema, dbEngine: DoobieEngine[Task]) - extends DoobieSingleResultFunctionWithStatus[AdditionalDataSubmitDTO, Unit, Task](values => + extends DoobieMultipleResultFunctionWithAggStatus[CreateOrUpdateAdditionalDataArgs, Option[ + AdditionalDataItemFromDB + ], Task](args => Seq( - fr"${PartitioningForDB.fromSeqPartitionDTO(values.partitioning).asJson}", - fr"${values.additionalData.map { case (k, v) => (k, v.orNull) }}", - fr"${values.author}" + fr"${args.partitioningId}", + fr"${args.additionalData.data}", + fr"${args.additionalData.byUser}" ) ) with StandardStatusHandling + with ByFirstRowStatusAggregator { + + override def fieldsToSelect: Seq[String] = super.fieldsToSelect ++ Seq("o_ad_name", "o_ad_value", "o_ad_author") +} object CreateOrUpdateAdditionalData { + case class CreateOrUpdateAdditionalDataArgs( + partitioningId: Long, + additionalData: AdditionalDataPatchDTO + ) + val layer: URLayer[PostgresDatabaseProvider, CreateOrUpdateAdditionalData] = ZLayer { for { dbProvider <- ZIO.service[PostgresDatabaseProvider] diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala index 5ad4e2def..daa0a684f 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala @@ -80,13 +80,16 @@ trait Endpoints extends BaseEndpoints { .errorOutVariantPrepend(notFoundErrorOneOfVariant) } - protected val createOrUpdateAdditionalDataEndpointV2 - : PublicEndpoint[AdditionalDataSubmitDTO, ErrorResponse, SingleSuccessResponse[AdditionalDataSubmitDTO], Any] = { - apiV2.post - .in(CreateOrUpdateAdditionalData) - .in(jsonBody[AdditionalDataSubmitDTO]) + protected val patchPartitioningAdditionalDataEndpointV2 + : PublicEndpoint[(Long, AdditionalDataPatchDTO), ErrorResponse, SingleSuccessResponse[ + AdditionalDataDTO + ], Any] = { + apiV2.patch + .in(V2Paths.Partitionings / path[Long]("partitioningId") / V2Paths.AdditionalData) + .in(jsonBody[AdditionalDataPatchDTO]) .out(statusCode(StatusCode.Ok)) - .out(jsonBody[SingleSuccessResponse[AdditionalDataSubmitDTO]]) + .out(jsonBody[SingleSuccessResponse[AdditionalDataDTO]]) + .errorOutVariantPrepend(notFoundErrorOneOfVariant) } protected val getPartitioningCheckpointEndpointV2 diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala index 8fbace613..1177e36d1 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala @@ -25,7 +25,7 @@ import sttp.tapir.server.http4s.ztapir.ZHttp4sServerInterpreter import sttp.tapir.server.interceptor.metrics.MetricsRequestInterceptor import sttp.tapir.swagger.bundle.SwaggerInterpreter import sttp.tapir.ztapir._ -import za.co.absa.atum.model.dto.{CheckpointDTO, CheckpointV2DTO} +import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO, CheckpointDTO, CheckpointV2DTO} import za.co.absa.atum.server.Constants.{SwaggerApiName, SwaggerApiVersion} import za.co.absa.atum.server.api.controller.{CheckpointController, FlowController, PartitioningController} import za.co.absa.atum.server.api.http.ApiPaths.V2Paths @@ -62,9 +62,15 @@ trait Routes extends Endpoints with ServerOptions { getPartitioningAdditionalDataEndpointV2, PartitioningController.getPartitioningAdditionalDataV2 ), - createServerEndpoint( - createOrUpdateAdditionalDataEndpointV2, - PartitioningController.createOrUpdateAdditionalDataV2 + createServerEndpoint[ + (Long, AdditionalDataPatchDTO), + ErrorResponse, + SingleSuccessResponse[AdditionalDataDTO] + ]( + patchPartitioningAdditionalDataEndpointV2, + { case (partitioningId: Long, additionalDataPatchDTO: AdditionalDataPatchDTO) => + PartitioningController.patchPartitioningAdditionalDataV2(partitioningId, additionalDataPatchDTO) + } ), createServerEndpoint[ (Long, UUID), @@ -93,7 +99,7 @@ trait Routes extends Endpoints with ServerOptions { postCheckpointEndpointV2, createPartitioningEndpointV1, createPartitioningEndpointV2, - createOrUpdateAdditionalDataEndpointV2, + patchPartitioningAdditionalDataEndpointV2, getPartitioningCheckpointsEndpointV2, getPartitioningCheckpointEndpointV2, getFlowCheckpointsEndpointV2 diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala index ae9a5ee79..1e8cc84d7 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala @@ -34,7 +34,10 @@ trait PartitioningRepository { partitioningId: Long ): IO[DatabaseError, AdditionalDataDTO] - def createOrUpdateAdditionalData(additionalData: AdditionalDataSubmitDTO): IO[DatabaseError, Unit] + def createOrUpdateAdditionalData( + partitioningId: Long, + additionalData: AdditionalDataPatchDTO + ): IO[DatabaseError, AdditionalDataDTO] def getPartitioningCheckpoints(checkpointQueryDTO: CheckpointQueryDTO): IO[DatabaseError, Seq[CheckpointFromDB]] diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala index 03eaec076..63788ea1a 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala @@ -17,9 +17,10 @@ package za.co.absa.atum.server.api.repository import za.co.absa.atum.model.dto._ -import za.co.absa.atum.server.model.{AdditionalDataFromDB, AdditionalDataItemFromDB, CheckpointFromDB, MeasureFromDB, PartitioningFromDB} +import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs import za.co.absa.atum.server.api.database.runs.functions._ import za.co.absa.atum.server.api.exception.DatabaseError +import za.co.absa.atum.server.model.{AdditionalDataFromDB, AdditionalDataItemFromDB, CheckpointFromDB, MeasureFromDB, PartitioningFromDB} import zio._ import zio.interop.catz.asyncInstance import za.co.absa.atum.server.api.exception.DatabaseError.GeneralDatabaseError @@ -42,8 +43,14 @@ class PartitioningRepositoryImpl( ) } - override def createOrUpdateAdditionalData(additionalData: AdditionalDataSubmitDTO): IO[DatabaseError, Unit] = { - dbSingleResultCallWithStatus(createOrUpdateAdditionalDataFn(additionalData), "createOrUpdateAdditionalData") + override def createOrUpdateAdditionalData( + partitioningId: Long, + additionalData: AdditionalDataPatchDTO + ): IO[DatabaseError, AdditionalDataDTO] = { + dbMultipleResultCallWithAggregatedStatus( + createOrUpdateAdditionalDataFn(CreateOrUpdateAdditionalDataArgs(partitioningId, additionalData)), + "createOrUpdateAdditionalData" + ).map(AdditionalDataItemFromDB.additionalDataFromDBItems) } override def getPartitioningMeasures(partitioning: PartitioningDTO): IO[DatabaseError, Seq[MeasureDTO]] = { @@ -75,10 +82,7 @@ class PartitioningRepositoryImpl( dbMultipleResultCallWithAggregatedStatus( getPartitioningAdditionalDataV2Fn(partitioningId), "getPartitioningAdditionalData" - ).map(_.collect { case Some(AdditionalDataItemFromDB(adName, adValue, author)) => - adName -> Some(AdditionalDataItemDTO(adValue, author)) - }.toMap) - .map(AdditionalDataDTO(_)) + ).map(AdditionalDataItemFromDB.additionalDataFromDBItems) } override def getPartitioning(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] = { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala index db547e451..ab699997f 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala @@ -31,7 +31,10 @@ trait PartitioningService { def getPartitioningAdditionalDataV2(partitioningId: Long): IO[ServiceError, AdditionalDataDTO] - def createOrUpdateAdditionalData(additionalData: AdditionalDataSubmitDTO): IO[ServiceError, Unit] + def patchAdditionalData( + partitioningId: Long, + additionalData: AdditionalDataPatchDTO + ): IO[ServiceError, AdditionalDataDTO] def getPartitioningCheckpoints(checkpointQueryDTO: CheckpointQueryDTO): IO[ServiceError, Seq[CheckpointDTO]] diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala index e68f720e7..1925568b7 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala @@ -34,13 +34,6 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) ) } - override def createOrUpdateAdditionalData(additionalData: AdditionalDataSubmitDTO): IO[ServiceError, Unit] = { - repositoryCall( - partitioningRepository.createOrUpdateAdditionalData(additionalData), - "createOrUpdateAdditionalData" - ) - } - override def getPartitioningMeasures(partitioning: PartitioningDTO): IO[ServiceError, Seq[MeasureDTO]] = { repositoryCall( partitioningRepository.getPartitioningMeasures(partitioning), @@ -81,6 +74,16 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) ) } + override def patchAdditionalData( + partitioningId: Long, + additionalData: AdditionalDataPatchDTO + ): IO[ServiceError, AdditionalDataDTO] = { + repositoryCall( + partitioningRepository.createOrUpdateAdditionalData(partitioningId, additionalData), + "createOrUpdateAdditionalData" + ) + } + override def getPartitioning(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] = { repositoryCall(partitioningRepository.getPartitioning(partitioningId), "getPartitioning") } diff --git a/server/src/main/scala/za/co/absa/atum/server/model/AdditionalDataItemFromDB.scala b/server/src/main/scala/za/co/absa/atum/server/model/AdditionalDataItemFromDB.scala index 2f86ea78c..3413dddcc 100644 --- a/server/src/main/scala/za/co/absa/atum/server/model/AdditionalDataItemFromDB.scala +++ b/server/src/main/scala/za/co/absa/atum/server/model/AdditionalDataItemFromDB.scala @@ -16,8 +16,25 @@ package za.co.absa.atum.server.model -case class AdditionalDataItemFromDB ( +import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataItemDTO} + +case class AdditionalDataItemFromDB( adName: String, adValue: Option[String], - author: String, + author: String ) + +object AdditionalDataItemFromDB { + def additionalDataFromDBItems(dbItems: Seq[Option[AdditionalDataItemFromDB]]): AdditionalDataDTO = { + AdditionalDataDTO( + dbItems.flatten + .map(item => + item.adValue match { + case Some(value) => item.adName -> Some(AdditionalDataItemDTO(Option(value), item.author)) + case None => item.adName -> None + } + ) + .toMap + ) + } +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala index 94d0a394a..d6c3995ad 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/TestData.scala @@ -128,6 +128,14 @@ trait TestData { protected val additionalDataDTO3: AdditionalDataDTO = AdditionalDataDTO(Map.empty) + protected val additionalDataPatchDTO1: AdditionalDataPatchDTO = AdditionalDataPatchDTO( + byUser = "author", + data = Map( + "key1" -> "value1", + "key3" -> "value3" + ) + ) + val mainValue: TypedValue = TypedValue( value = "123", valueType = ResultValueType.LongValue diff --git a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala index 86cc066b1..3baa18639 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/controller/PartitioningControllerUnitTests.scala @@ -41,9 +41,11 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { when(partitioningServiceMock.getPartitioningAdditionalData(partitioningDTO1)) .thenReturn(ZIO.succeed(Map.empty)) - when(partitioningServiceMock.createOrUpdateAdditionalData(additionalDataSubmitDTO1)) - .thenReturn(ZIO.unit) - when(partitioningServiceMock.createOrUpdateAdditionalData(additionalDataSubmitDTO2)) + when(partitioningServiceMock.patchAdditionalData(1L, additionalDataPatchDTO1)) + .thenReturn(ZIO.succeed(additionalDataDTO1)) + when(partitioningServiceMock.patchAdditionalData(0L, additionalDataPatchDTO1)) + .thenReturn(ZIO.fail(NotFoundServiceError("Partitioning not found"))) + when(partitioningServiceMock.patchAdditionalData(2L, additionalDataPatchDTO1)) .thenReturn(ZIO.fail(GeneralServiceError("boom!"))) when(partitioningServiceMock.getPartitioningCheckpoints(checkpointQueryDTO1)) @@ -83,16 +85,21 @@ object PartitioningControllerUnitTests extends ZIOSpecDefault with TestData { ) } ), - suite("CreateOrUpdateAdditionalDataSuite")( + suite("PatchAdditionalDataSuite")( test("Returns expected AdditionalDataSubmitDTO") { for { - result <- PartitioningController.createOrUpdateAdditionalDataV2(additionalDataSubmitDTO1) - expected = SingleSuccessResponse(additionalDataSubmitDTO1, uuid1) + result <- PartitioningController.patchPartitioningAdditionalDataV2(1L, additionalDataPatchDTO1) + expected = SingleSuccessResponse(additionalDataDTO1, uuid1) actual = result.copy(requestId = uuid1) } yield assertTrue(actual == expected) }, + test("Returns expected NotFoundErrorResponse") { + assertZIO(PartitioningController.patchPartitioningAdditionalDataV2(0L, additionalDataPatchDTO1).exit)( + failsWithA[NotFoundErrorResponse] + ) + }, test("Returns expected InternalServerErrorResponse") { - assertZIO(PartitioningController.createOrUpdateAdditionalDataV2(additionalDataSubmitDTO2).exit)( + assertZIO(PartitioningController.patchPartitioningAdditionalDataV2(2L, additionalDataPatchDTO1).exit)( failsWithA[InternalServerErrorResponse] ) } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalDataIntegrationTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalDataIntegrationTests.scala index 94bb6564c..dc948ed54 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalDataIntegrationTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/CreateOrUpdateAdditionalDataIntegrationTests.scala @@ -16,33 +16,25 @@ package za.co.absa.atum.server.api.database.runs.functions -import za.co.absa.atum.model.dto.{AdditionalDataSubmitDTO, PartitionDTO} import za.co.absa.atum.server.ConfigProviderTest -import za.co.absa.atum.server.api.TestTransactorProvider import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs +import za.co.absa.atum.server.api.{TestData, TestTransactorProvider} import za.co.absa.db.fadb.exceptions.DataNotFoundException import za.co.absa.db.fadb.status.FunctionStatus import zio._ import zio.interop.catz.asyncInstance import zio.test._ -object CreateOrUpdateAdditionalDataIntegrationTests extends ConfigProviderTest { +object CreateOrUpdateAdditionalDataIntegrationTests extends ConfigProviderTest with TestData { override def spec: Spec[TestEnvironment with Scope, Any] = { suite("CreateOrUpdateAdditionalDataIntegrationSuite")( - test("Returns expected Right with Unit") { - val additionalDataSubmitDTO = AdditionalDataSubmitDTO( - partitioning = Seq(PartitionDTO("key1", "val1"), PartitionDTO("key2", "val2")), - additionalData = Map[String, Option[String]]( - "ownership" -> Some("total"), - "role" -> Some("primary") - ), - author = "testAuthor" - ) + test("Returns expected DataNotFoundException") { for { createOrUpdateAdditionalData <- ZIO.service[CreateOrUpdateAdditionalData] - result <- createOrUpdateAdditionalData(additionalDataSubmitDTO) + result <- createOrUpdateAdditionalData(CreateOrUpdateAdditionalDataArgs(1L, additionalDataPatchDTO1)) } yield assertTrue(result == Left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) } ).provide( diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningAdditionalDataV2EndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningAdditionalDataV2EndpointUnitTests.scala index 3c500f787..f945b5c0b 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningAdditionalDataV2EndpointUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningAdditionalDataV2EndpointUnitTests.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package za.co.absa.atum.server.api.http import org.mockito.Mockito.{mock, when} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/PatchAdditionalDataEndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/PatchAdditionalDataEndpointUnitTests.scala new file mode 100644 index 000000000..b6842e8a6 --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/PatchAdditionalDataEndpointUnitTests.scala @@ -0,0 +1,105 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.http + +import io.circe +import sttp.client3.circe._ +import org.mockito.Mockito.{mock, when} +import sttp.client3.circe.asJson +import sttp.client3.testing.SttpBackendStub +import sttp.client3.{Identity, RequestT, ResponseException, UriContext, basicRequest} +import sttp.model.StatusCode +import sttp.tapir.server.stub.TapirStubInterpreter +import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} +import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO} +import za.co.absa.atum.server.api.TestData +import za.co.absa.atum.server.api.controller.PartitioningController +import za.co.absa.atum.server.model.SuccessResponse.SingleSuccessResponse +import za.co.absa.atum.server.model.{InternalServerErrorResponse, NotFoundErrorResponse} +import zio.test.Assertion.equalTo +import zio.test.{Spec, TestEnvironment, ZIOSpecDefault, assertZIO} +import zio.{Scope, ZIO, ZLayer} + +object PatchAdditionalDataEndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { + + private val partitioningControllerMock: PartitioningController = mock(classOf[PartitioningController]) + + when(partitioningControllerMock.patchPartitioningAdditionalDataV2(1L, additionalDataPatchDTO1)) + .thenReturn(ZIO.succeed(SingleSuccessResponse(additionalDataDTO1, uuid1))) + when(partitioningControllerMock.patchPartitioningAdditionalDataV2(0L, additionalDataPatchDTO1)) + .thenReturn(ZIO.fail(NotFoundErrorResponse("error"))) + when(partitioningControllerMock.patchPartitioningAdditionalDataV2(2L, additionalDataPatchDTO1)) + .thenReturn(ZIO.fail(InternalServerErrorResponse("error"))) + + private val partitioningControllerMockLayer = ZLayer.succeed(partitioningControllerMock) + + private val patchAdditionalDataEndpointLogic = patchPartitioningAdditionalDataEndpointV2 + .zServerLogic({ case (partitioningId: Long, additionalDataPatchDTO: AdditionalDataPatchDTO) => + PartitioningController.patchPartitioningAdditionalDataV2(partitioningId, additionalDataPatchDTO) + }) + + override def spec: Spec[TestEnvironment with Scope, Any] = { + val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[PartitioningController])) + .whenServerEndpoint(patchAdditionalDataEndpointLogic) + .thenRunLogic() + .backend() + + suite("PatchAdditionalDataEndpointUnitTests")( + test("Returns expected AdditionalDataDTO") { + val response = patchRequestForId(1L) + .body(additionalDataPatchDTO1) + .send(backendStub) + + val body = response.map(_.body) + val statusCode = response.map(_.code) + + assertZIO(body <&> statusCode)( + equalTo(Right(SingleSuccessResponse(additionalDataDTO1, uuid1)), StatusCode.Ok) + ) + }, + test("Returns NotFoundErrorResponse") { + val response = patchRequestForId(0L) + .body(additionalDataPatchDTO1) + .send(backendStub) + + val statusCode = response.map(_.code) + + assertZIO(statusCode)(equalTo(StatusCode.NotFound)) + }, + test("Returns InternalServerErrorResponse") { + val response = patchRequestForId(2L) + .body(additionalDataPatchDTO1) + .send(backendStub) + + val statusCode = response.map(_.code) + + assertZIO(statusCode)(equalTo(StatusCode.InternalServerError)) + } + ) + + }.provide(partitioningControllerMockLayer) + + private def patchRequestForId(id: Long): RequestT[Identity, Either[ + ResponseException[String, circe.Error], + SingleSuccessResponse[AdditionalDataDTO] + ], Any] = { + basicRequest + .patch(uri"https://test.com/api/v2/partitionings/$id/additional-data") + .response(asJson[SingleSuccessResponse[AdditionalDataDTO]]) + } + +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala index 5f50ea5ec..520a77792 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala @@ -19,6 +19,7 @@ package za.co.absa.atum.server.api.repository import org.mockito.Mockito.{mock, when} import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataItemDTO} import za.co.absa.atum.server.api.TestData +import za.co.absa.atum.server.api.database.runs.functions.CreateOrUpdateAdditionalData.CreateOrUpdateAdditionalDataArgs import za.co.absa.atum.server.api.database.runs.functions._ import za.co.absa.atum.server.api.exception.DatabaseError import za.co.absa.atum.server.api.exception.DatabaseError._ @@ -47,11 +48,11 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { // Create Additional Data Mocks private val createOrUpdateAdditionalDataMock = mock(classOf[CreateOrUpdateAdditionalData]) - when(createOrUpdateAdditionalDataMock.apply(additionalDataSubmitDTO1)) - .thenReturn(ZIO.right(Row(FunctionStatus(0, "success"), ()))) - when(createOrUpdateAdditionalDataMock.apply(additionalDataSubmitDTO2)) - .thenReturn(ZIO.left(ErrorInDataException(FunctionStatus(50, "error in AD data")))) - when(createOrUpdateAdditionalDataMock.apply(additionalDataSubmitDTO3)) + when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(1L, additionalDataPatchDTO1))) + .thenReturn(ZIO.right(Seq(Row(FunctionStatus(11, "Additional data have been updated, added or both"), Option.empty[AdditionalDataItemFromDB])))) + when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(0L, additionalDataPatchDTO1))) + .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) + when(createOrUpdateAdditionalDataMock.apply(CreateOrUpdateAdditionalDataArgs(2L, additionalDataPatchDTO1))) .thenReturn(ZIO.fail(new Exception("boom!"))) private val createOrUpdateAdditionalDataMockLayer = ZLayer.succeed(createOrUpdateAdditionalDataMock) @@ -142,23 +143,23 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { suite("CreateOrUpdateAdditionalDataSuite")( test("Returns expected Right with Unit") { for { - result <- PartitioningRepository.createOrUpdateAdditionalData(additionalDataSubmitDTO1) - } yield assertTrue(result == ()) + result <- PartitioningRepository.createOrUpdateAdditionalData(1L, additionalDataPatchDTO1) + } yield assertTrue(result.isInstanceOf[AdditionalDataDTO]) }, test("Returns expected Left with StatusException") { for { - result <- PartitioningRepository.createOrUpdateAdditionalData(additionalDataSubmitDTO2).exit + result <- PartitioningRepository.createOrUpdateAdditionalData(0L, additionalDataPatchDTO1).exit } yield assertTrue( result == Exit.fail( - GeneralDatabaseError( - "Exception caused by operation: 'createOrUpdateAdditionalData': (50) error in AD data" + NotFoundDatabaseError( + "Exception caused by operation: 'createOrUpdateAdditionalData': (41) Partitioning not found" ) ) ) }, test("Returns expected DatabaseError") { - assertZIO(PartitioningRepository.createOrUpdateAdditionalData(additionalDataSubmitDTO3).exit)( - failsWithA[DatabaseError] + assertZIO(PartitioningRepository.createOrUpdateAdditionalData(2L, additionalDataPatchDTO1).exit)( + failsWithA[GeneralDatabaseError] ) } ), diff --git a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala index 6da92baaa..c297ad18a 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala @@ -36,10 +36,11 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { when(partitioningRepositoryMock.createPartitioningIfNotExists(partitioningSubmitDTO3)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) - when(partitioningRepositoryMock.createOrUpdateAdditionalData(additionalDataSubmitDTO1)).thenReturn(ZIO.unit) - when(partitioningRepositoryMock.createOrUpdateAdditionalData(additionalDataSubmitDTO2)) - .thenReturn(ZIO.fail(GeneralDatabaseError("error in AD data"))) - when(partitioningRepositoryMock.createOrUpdateAdditionalData(additionalDataSubmitDTO3)) + when(partitioningRepositoryMock.createOrUpdateAdditionalData(1L, additionalDataPatchDTO1)) + .thenReturn(ZIO.succeed(additionalDataDTO1)) + when(partitioningRepositoryMock.createOrUpdateAdditionalData(0L, additionalDataPatchDTO1)) + .thenReturn(ZIO.fail(NotFoundDatabaseError("Partitioning not found"))) + when(partitioningRepositoryMock.createOrUpdateAdditionalData(2L, additionalDataPatchDTO1)) .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) when(partitioningRepositoryMock.getPartitioningMeasures(partitioningDTO1)) @@ -92,23 +93,23 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { ) } ), - suite("CreateOrUpdateAdditionalDataSuite")( + suite("PatchAdditionalDataSuite")( test("Returns expected Right with Unit") { for { - result <- PartitioningService.createOrUpdateAdditionalData(additionalDataSubmitDTO1) - } yield assertTrue(result == ()) + result <- PartitioningService.patchAdditionalData(1L, additionalDataPatchDTO1) + } yield assertTrue(result == additionalDataDTO1) }, - test("Returns expected Left with StatusException") { + test("Returns expected NotFoundServiceError") { for { - result <- PartitioningService.createOrUpdateAdditionalData(additionalDataSubmitDTO2).exit + result <- PartitioningService.patchAdditionalData(0L, additionalDataPatchDTO1).exit } yield assertTrue( result == Exit.fail( - GeneralServiceError("Failed to perform 'createOrUpdateAdditionalData': error in AD data") + NotFoundServiceError("Failed to perform 'createOrUpdateAdditionalData': Partitioning not found") ) ) }, - test("Returns expected ServiceError") { - assertZIO(PartitioningService.createOrUpdateAdditionalData(additionalDataSubmitDTO3).exit)( + test("Returns expected GeneralServiceError") { + assertZIO(PartitioningService.patchAdditionalData(2L, additionalDataPatchDTO1).exit)( failsWithA[GeneralServiceError] ) }